[PATCH] powerpc: Make sure we have an RTC before trying to adjust it
[deliverable/linux.git] / arch / powerpc / kernel / prom.c
CommitLineData
9b6b563c
PM
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <linux/module.h>
32
33#include <asm/prom.h>
34#include <asm/rtas.h>
35#include <asm/lmb.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/smp.h>
41#include <asm/system.h>
42#include <asm/mmu.h>
43#include <asm/pgtable.h>
44#include <asm/pci.h>
45#include <asm/iommu.h>
46#include <asm/btext.h>
47#include <asm/sections.h>
48#include <asm/machdep.h>
49#include <asm/pSeries_reconfig.h>
40ef8cbc
PM
50#include <asm/pci-bridge.h>
51#ifdef CONFIG_PPC64
52#include <asm/systemcfg.h>
53#endif
9b6b563c
PM
54
55#ifdef DEBUG
56#define DBG(fmt...) printk(KERN_ERR fmt)
57#else
58#define DBG(fmt...)
59#endif
60
61struct pci_reg_property {
62 struct pci_address addr;
63 u32 size_hi;
64 u32 size_lo;
65};
66
67struct isa_reg_property {
68 u32 space;
69 u32 address;
70 u32 size;
71};
72
73
74typedef int interpret_func(struct device_node *, unsigned long *,
75 int, int, int);
76
77extern struct rtas_t rtas;
78extern struct lmb lmb;
79extern unsigned long klimit;
80
81static unsigned long memory_limit;
82
83static int __initdata dt_root_addr_cells;
84static int __initdata dt_root_size_cells;
85
86#ifdef CONFIG_PPC64
87static int __initdata iommu_is_off;
88int __initdata iommu_force_on;
89extern unsigned long tce_alloc_start, tce_alloc_end;
90#endif
91
92typedef u32 cell_t;
93
94#if 0
95static struct boot_param_header *initial_boot_params __initdata;
96#else
97struct boot_param_header *initial_boot_params;
98#endif
99
100static struct device_node *allnodes = NULL;
101
102/* use when traversing tree through the allnext, child, sibling,
103 * or parent members of struct device_node.
104 */
105static DEFINE_RWLOCK(devtree_lock);
106
107/* export that to outside world */
108struct device_node *of_chosen;
109
110struct device_node *dflt_interrupt_controller;
111int num_interrupt_controllers;
112
113u32 rtas_data;
114u32 rtas_entry;
115
116/*
117 * Wrapper for allocating memory for various data that needs to be
118 * attached to device nodes as they are processed at boot or when
119 * added to the device tree later (e.g. DLPAR). At boot there is
120 * already a region reserved so we just increment *mem_start by size;
121 * otherwise we call kmalloc.
122 */
123static void * prom_alloc(unsigned long size, unsigned long *mem_start)
124{
125 unsigned long tmp;
126
127 if (!mem_start)
128 return kmalloc(size, GFP_KERNEL);
129
130 tmp = *mem_start;
131 *mem_start += size;
132 return (void *)tmp;
133}
134
135/*
136 * Find the device_node with a given phandle.
137 */
138static struct device_node * find_phandle(phandle ph)
139{
140 struct device_node *np;
141
142 for (np = allnodes; np != 0; np = np->allnext)
143 if (np->linux_phandle == ph)
144 return np;
145 return NULL;
146}
147
148/*
149 * Find the interrupt parent of a node.
150 */
151static struct device_node * __devinit intr_parent(struct device_node *p)
152{
153 phandle *parp;
154
155 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
156 if (parp == NULL)
157 return p->parent;
158 p = find_phandle(*parp);
159 if (p != NULL)
160 return p;
161 /*
162 * On a powermac booted with BootX, we don't get to know the
163 * phandles for any nodes, so find_phandle will return NULL.
164 * Fortunately these machines only have one interrupt controller
165 * so there isn't in fact any ambiguity. -- paulus
166 */
167 if (num_interrupt_controllers == 1)
168 p = dflt_interrupt_controller;
169 return p;
170}
171
172/*
173 * Find out the size of each entry of the interrupts property
174 * for a node.
175 */
176int __devinit prom_n_intr_cells(struct device_node *np)
177{
178 struct device_node *p;
179 unsigned int *icp;
180
181 for (p = np; (p = intr_parent(p)) != NULL; ) {
182 icp = (unsigned int *)
183 get_property(p, "#interrupt-cells", NULL);
184 if (icp != NULL)
185 return *icp;
186 if (get_property(p, "interrupt-controller", NULL) != NULL
187 || get_property(p, "interrupt-map", NULL) != NULL) {
188 printk("oops, node %s doesn't have #interrupt-cells\n",
189 p->full_name);
190 return 1;
191 }
192 }
193#ifdef DEBUG_IRQ
194 printk("prom_n_intr_cells failed for %s\n", np->full_name);
195#endif
196 return 1;
197}
198
199/*
200 * Map an interrupt from a device up to the platform interrupt
201 * descriptor.
202 */
203static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
204 struct device_node *np, unsigned int *ints,
205 int nintrc)
206{
207 struct device_node *p, *ipar;
208 unsigned int *imap, *imask, *ip;
209 int i, imaplen, match;
210 int newintrc = 0, newaddrc = 0;
211 unsigned int *reg;
212 int naddrc;
213
214 reg = (unsigned int *) get_property(np, "reg", NULL);
215 naddrc = prom_n_addr_cells(np);
216 p = intr_parent(np);
217 while (p != NULL) {
218 if (get_property(p, "interrupt-controller", NULL) != NULL)
219 /* this node is an interrupt controller, stop here */
220 break;
221 imap = (unsigned int *)
222 get_property(p, "interrupt-map", &imaplen);
223 if (imap == NULL) {
224 p = intr_parent(p);
225 continue;
226 }
227 imask = (unsigned int *)
228 get_property(p, "interrupt-map-mask", NULL);
229 if (imask == NULL) {
230 printk("oops, %s has interrupt-map but no mask\n",
231 p->full_name);
232 return 0;
233 }
234 imaplen /= sizeof(unsigned int);
235 match = 0;
236 ipar = NULL;
237 while (imaplen > 0 && !match) {
238 /* check the child-interrupt field */
239 match = 1;
240 for (i = 0; i < naddrc && match; ++i)
241 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
242 for (; i < naddrc + nintrc && match; ++i)
243 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
244 imap += naddrc + nintrc;
245 imaplen -= naddrc + nintrc;
246 /* grab the interrupt parent */
247 ipar = find_phandle((phandle) *imap++);
248 --imaplen;
249 if (ipar == NULL && num_interrupt_controllers == 1)
250 /* cope with BootX not giving us phandles */
251 ipar = dflt_interrupt_controller;
252 if (ipar == NULL) {
253 printk("oops, no int parent %x in map of %s\n",
254 imap[-1], p->full_name);
255 return 0;
256 }
257 /* find the parent's # addr and intr cells */
258 ip = (unsigned int *)
259 get_property(ipar, "#interrupt-cells", NULL);
260 if (ip == NULL) {
261 printk("oops, no #interrupt-cells on %s\n",
262 ipar->full_name);
263 return 0;
264 }
265 newintrc = *ip;
266 ip = (unsigned int *)
267 get_property(ipar, "#address-cells", NULL);
268 newaddrc = (ip == NULL)? 0: *ip;
269 imap += newaddrc + newintrc;
270 imaplen -= newaddrc + newintrc;
271 }
272 if (imaplen < 0) {
273 printk("oops, error decoding int-map on %s, len=%d\n",
274 p->full_name, imaplen);
275 return 0;
276 }
277 if (!match) {
278#ifdef DEBUG_IRQ
279 printk("oops, no match in %s int-map for %s\n",
280 p->full_name, np->full_name);
281#endif
282 return 0;
283 }
284 p = ipar;
285 naddrc = newaddrc;
286 nintrc = newintrc;
287 ints = imap - nintrc;
288 reg = ints - naddrc;
289 }
290 if (p == NULL) {
291#ifdef DEBUG_IRQ
292 printk("hmmm, int tree for %s doesn't have ctrler\n",
293 np->full_name);
294#endif
295 return 0;
296 }
297 *irq = ints;
298 *ictrler = p;
299 return nintrc;
300}
301
302static int __devinit finish_node_interrupts(struct device_node *np,
303 unsigned long *mem_start,
304 int measure_only)
305{
306 unsigned int *ints;
307 int intlen, intrcells, intrcount;
308 int i, j, n;
309 unsigned int *irq, virq;
310 struct device_node *ic;
311
a575b807
PM
312 if (num_interrupt_controllers == 0) {
313 /*
314 * Old machines just have a list of interrupt numbers
315 * and no interrupt-controller nodes.
316 */
317 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
318 &intlen);
319 /* XXX old interpret_pci_props looked in parent too */
320 /* XXX old interpret_macio_props looked for interrupts
321 before AAPL,interrupts */
322 if (ints == NULL)
323 ints = (unsigned int *) get_property(np, "interrupts",
324 &intlen);
325 if (ints == NULL)
326 return 0;
327
328 np->n_intrs = intlen / sizeof(unsigned int);
329 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
330 mem_start);
331 if (!np->intrs)
332 return -ENOMEM;
333 if (measure_only)
334 return 0;
335
336 for (i = 0; i < np->n_intrs; ++i) {
337 np->intrs[i].line = *ints++;
338 np->intrs[i].sense = 1;
339 }
340 return 0;
341 }
342
9b6b563c
PM
343 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
344 if (ints == NULL)
345 return 0;
346 intrcells = prom_n_intr_cells(np);
347 intlen /= intrcells * sizeof(unsigned int);
348
349 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
350 if (!np->intrs)
351 return -ENOMEM;
352
353 if (measure_only)
354 return 0;
355
356 intrcount = 0;
357 for (i = 0; i < intlen; ++i, ints += intrcells) {
358 n = map_interrupt(&irq, &ic, np, ints, intrcells);
359 if (n <= 0)
360 continue;
361
362 /* don't map IRQ numbers under a cascaded 8259 controller */
363 if (ic && device_is_compatible(ic, "chrp,iic")) {
364 np->intrs[intrcount].line = irq[0];
365 } else {
366#ifdef CONFIG_PPC64
367 virq = virt_irq_create_mapping(irq[0]);
368 if (virq == NO_IRQ) {
369 printk(KERN_CRIT "Could not allocate interrupt"
370 " number for %s\n", np->full_name);
371 continue;
372 }
373 virq = irq_offset_up(virq);
374#else
375 virq = irq[0];
376#endif
377 np->intrs[intrcount].line = virq;
378 }
379
380#ifdef CONFIG_PPC64
381 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
382 if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
383 char *name = get_property(ic->parent, "name", NULL);
384 if (name && !strcmp(name, "u3"))
385 np->intrs[intrcount].line += 128;
386 else if (!(name && !strcmp(name, "mac-io")))
387 /* ignore other cascaded controllers, such as
388 the k2-sata-root */
389 break;
390 }
391#endif
392 np->intrs[intrcount].sense = 1;
393 if (n > 1)
394 np->intrs[intrcount].sense = irq[1];
395 if (n > 2) {
396 printk("hmmm, got %d intr cells for %s:", n,
397 np->full_name);
398 for (j = 0; j < n; ++j)
399 printk(" %d", irq[j]);
400 printk("\n");
401 }
402 ++intrcount;
403 }
404 np->n_intrs = intrcount;
405
406 return 0;
407}
408
409static int __devinit interpret_pci_props(struct device_node *np,
410 unsigned long *mem_start,
411 int naddrc, int nsizec,
412 int measure_only)
413{
414 struct address_range *adr;
415 struct pci_reg_property *pci_addrs;
416 int i, l, n_addrs;
417
418 pci_addrs = (struct pci_reg_property *)
419 get_property(np, "assigned-addresses", &l);
420 if (!pci_addrs)
421 return 0;
422
423 n_addrs = l / sizeof(*pci_addrs);
424
425 adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
426 if (!adr)
427 return -ENOMEM;
428
429 if (measure_only)
430 return 0;
431
432 np->addrs = adr;
433 np->n_addrs = n_addrs;
434
435 for (i = 0; i < n_addrs; i++) {
436 adr[i].space = pci_addrs[i].addr.a_hi;
437 adr[i].address = pci_addrs[i].addr.a_lo |
438 ((u64)pci_addrs[i].addr.a_mid << 32);
439 adr[i].size = pci_addrs[i].size_lo;
440 }
441
442 return 0;
443}
444
445static int __init interpret_dbdma_props(struct device_node *np,
446 unsigned long *mem_start,
447 int naddrc, int nsizec,
448 int measure_only)
449{
450 struct reg_property32 *rp;
451 struct address_range *adr;
452 unsigned long base_address;
453 int i, l;
454 struct device_node *db;
455
456 base_address = 0;
457 if (!measure_only) {
458 for (db = np->parent; db != NULL; db = db->parent) {
459 if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
460 base_address = db->addrs[0].address;
461 break;
462 }
463 }
464 }
465
466 rp = (struct reg_property32 *) get_property(np, "reg", &l);
467 if (rp != 0 && l >= sizeof(struct reg_property32)) {
468 i = 0;
469 adr = (struct address_range *) (*mem_start);
470 while ((l -= sizeof(struct reg_property32)) >= 0) {
471 if (!measure_only) {
472 adr[i].space = 2;
473 adr[i].address = rp[i].address + base_address;
474 adr[i].size = rp[i].size;
475 }
476 ++i;
477 }
478 np->addrs = adr;
479 np->n_addrs = i;
480 (*mem_start) += i * sizeof(struct address_range);
481 }
482
483 return 0;
484}
485
486static int __init interpret_macio_props(struct device_node *np,
487 unsigned long *mem_start,
488 int naddrc, int nsizec,
489 int measure_only)
490{
491 struct reg_property32 *rp;
492 struct address_range *adr;
493 unsigned long base_address;
494 int i, l;
495 struct device_node *db;
496
497 base_address = 0;
498 if (!measure_only) {
499 for (db = np->parent; db != NULL; db = db->parent) {
500 if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
501 base_address = db->addrs[0].address;
502 break;
503 }
504 }
505 }
506
507 rp = (struct reg_property32 *) get_property(np, "reg", &l);
508 if (rp != 0 && l >= sizeof(struct reg_property32)) {
509 i = 0;
510 adr = (struct address_range *) (*mem_start);
511 while ((l -= sizeof(struct reg_property32)) >= 0) {
512 if (!measure_only) {
513 adr[i].space = 2;
514 adr[i].address = rp[i].address + base_address;
515 adr[i].size = rp[i].size;
516 }
517 ++i;
518 }
519 np->addrs = adr;
520 np->n_addrs = i;
521 (*mem_start) += i * sizeof(struct address_range);
522 }
523
524 return 0;
525}
526
527static int __init interpret_isa_props(struct device_node *np,
528 unsigned long *mem_start,
529 int naddrc, int nsizec,
530 int measure_only)
531{
532 struct isa_reg_property *rp;
533 struct address_range *adr;
534 int i, l;
535
536 rp = (struct isa_reg_property *) get_property(np, "reg", &l);
537 if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
538 i = 0;
539 adr = (struct address_range *) (*mem_start);
540 while ((l -= sizeof(struct isa_reg_property)) >= 0) {
541 if (!measure_only) {
542 adr[i].space = rp[i].space;
543 adr[i].address = rp[i].address;
544 adr[i].size = rp[i].size;
545 }
546 ++i;
547 }
548 np->addrs = adr;
549 np->n_addrs = i;
550 (*mem_start) += i * sizeof(struct address_range);
551 }
552
553 return 0;
554}
555
556static int __init interpret_root_props(struct device_node *np,
557 unsigned long *mem_start,
558 int naddrc, int nsizec,
559 int measure_only)
560{
561 struct address_range *adr;
562 int i, l;
563 unsigned int *rp;
564 int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
565
566 rp = (unsigned int *) get_property(np, "reg", &l);
567 if (rp != 0 && l >= rpsize) {
568 i = 0;
569 adr = (struct address_range *) (*mem_start);
570 while ((l -= rpsize) >= 0) {
571 if (!measure_only) {
572 adr[i].space = 0;
573 adr[i].address = rp[naddrc - 1];
574 adr[i].size = rp[naddrc + nsizec - 1];
575 }
576 ++i;
577 rp += naddrc + nsizec;
578 }
579 np->addrs = adr;
580 np->n_addrs = i;
581 (*mem_start) += i * sizeof(struct address_range);
582 }
583
584 return 0;
585}
586
587static int __devinit finish_node(struct device_node *np,
588 unsigned long *mem_start,
589 interpret_func *ifunc,
590 int naddrc, int nsizec,
591 int measure_only)
592{
593 struct device_node *child;
594 int *ip, rc = 0;
595
596 /* get the device addresses and interrupts */
597 if (ifunc != NULL)
598 rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
599 if (rc)
600 goto out;
601
602 rc = finish_node_interrupts(np, mem_start, measure_only);
603 if (rc)
604 goto out;
605
606 /* Look for #address-cells and #size-cells properties. */
607 ip = (int *) get_property(np, "#address-cells", NULL);
608 if (ip != NULL)
609 naddrc = *ip;
610 ip = (int *) get_property(np, "#size-cells", NULL);
611 if (ip != NULL)
612 nsizec = *ip;
613
614 if (!strcmp(np->name, "device-tree") || np->parent == NULL)
615 ifunc = interpret_root_props;
616 else if (np->type == 0)
617 ifunc = NULL;
618 else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
619 ifunc = interpret_pci_props;
620 else if (!strcmp(np->type, "dbdma"))
621 ifunc = interpret_dbdma_props;
622 else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
623 ifunc = interpret_macio_props;
624 else if (!strcmp(np->type, "isa"))
625 ifunc = interpret_isa_props;
626 else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
627 ifunc = interpret_root_props;
628 else if (!((ifunc == interpret_dbdma_props
629 || ifunc == interpret_macio_props)
630 && (!strcmp(np->type, "escc")
631 || !strcmp(np->type, "media-bay"))))
632 ifunc = NULL;
633
634 for (child = np->child; child != NULL; child = child->sibling) {
635 rc = finish_node(child, mem_start, ifunc,
636 naddrc, nsizec, measure_only);
637 if (rc)
638 goto out;
639 }
640out:
641 return rc;
642}
643
644static void __init scan_interrupt_controllers(void)
645{
646 struct device_node *np;
647 int n = 0;
648 char *name, *ic;
649 int iclen;
650
651 for (np = allnodes; np != NULL; np = np->allnext) {
652 ic = get_property(np, "interrupt-controller", &iclen);
653 name = get_property(np, "name", NULL);
654 /* checking iclen makes sure we don't get a false
655 match on /chosen.interrupt_controller */
656 if ((name != NULL
657 && strcmp(name, "interrupt-controller") == 0)
658 || (ic != NULL && iclen == 0
659 && strcmp(name, "AppleKiwi"))) {
660 if (n == 0)
661 dflt_interrupt_controller = np;
662 ++n;
663 }
664 }
665 num_interrupt_controllers = n;
666}
667
668/**
669 * finish_device_tree is called once things are running normally
670 * (i.e. with text and data mapped to the address they were linked at).
671 * It traverses the device tree and fills in some of the additional,
672 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
673 * mapping is also initialized at this point.
674 */
675void __init finish_device_tree(void)
676{
677 unsigned long start, end, size = 0;
678
679 DBG(" -> finish_device_tree\n");
680
681#ifdef CONFIG_PPC64
682 /* Initialize virtual IRQ map */
683 virt_irq_init();
684#endif
685 scan_interrupt_controllers();
686
687 /*
688 * Finish device-tree (pre-parsing some properties etc...)
689 * We do this in 2 passes. One with "measure_only" set, which
690 * will only measure the amount of memory needed, then we can
691 * allocate that memory, and call finish_node again. However,
692 * we must be careful as most routines will fail nowadays when
693 * prom_alloc() returns 0, so we must make sure our first pass
694 * doesn't start at 0. We pre-initialize size to 16 for that
695 * reason and then remove those additional 16 bytes
696 */
697 size = 16;
698 finish_node(allnodes, &size, NULL, 0, 0, 1);
699 size -= 16;
700 end = start = (unsigned long) __va(lmb_alloc(size, 128));
701 finish_node(allnodes, &end, NULL, 0, 0, 0);
702 BUG_ON(end != start + size);
703
704 DBG(" <- finish_device_tree\n");
705}
706
707static inline char *find_flat_dt_string(u32 offset)
708{
709 return ((char *)initial_boot_params) +
710 initial_boot_params->off_dt_strings + offset;
711}
712
713/**
714 * This function is used to scan the flattened device-tree, it is
715 * used to extract the memory informations at boot before we can
716 * unflatten the tree
717 */
718static int __init scan_flat_dt(int (*it)(unsigned long node,
719 const char *uname, int depth,
720 void *data),
721 void *data)
722{
723 unsigned long p = ((unsigned long)initial_boot_params) +
724 initial_boot_params->off_dt_struct;
725 int rc = 0;
726 int depth = -1;
727
728 do {
729 u32 tag = *((u32 *)p);
730 char *pathp;
731
732 p += 4;
733 if (tag == OF_DT_END_NODE) {
734 depth --;
735 continue;
736 }
737 if (tag == OF_DT_NOP)
738 continue;
739 if (tag == OF_DT_END)
740 break;
741 if (tag == OF_DT_PROP) {
742 u32 sz = *((u32 *)p);
743 p += 8;
744 if (initial_boot_params->version < 0x10)
745 p = _ALIGN(p, sz >= 8 ? 8 : 4);
746 p += sz;
747 p = _ALIGN(p, 4);
748 continue;
749 }
750 if (tag != OF_DT_BEGIN_NODE) {
751 printk(KERN_WARNING "Invalid tag %x scanning flattened"
752 " device tree !\n", tag);
753 return -EINVAL;
754 }
755 depth++;
756 pathp = (char *)p;
757 p = _ALIGN(p + strlen(pathp) + 1, 4);
758 if ((*pathp) == '/') {
759 char *lp, *np;
760 for (lp = NULL, np = pathp; *np; np++)
761 if ((*np) == '/')
762 lp = np+1;
763 if (lp != NULL)
764 pathp = lp;
765 }
766 rc = it(p, pathp, depth, data);
767 if (rc != 0)
768 break;
769 } while(1);
770
771 return rc;
772}
773
774/**
775 * This function can be used within scan_flattened_dt callback to get
776 * access to properties
777 */
778static void* __init get_flat_dt_prop(unsigned long node, const char *name,
779 unsigned long *size)
780{
781 unsigned long p = node;
782
783 do {
784 u32 tag = *((u32 *)p);
785 u32 sz, noff;
786 const char *nstr;
787
788 p += 4;
789 if (tag == OF_DT_NOP)
790 continue;
791 if (tag != OF_DT_PROP)
792 return NULL;
793
794 sz = *((u32 *)p);
795 noff = *((u32 *)(p + 4));
796 p += 8;
797 if (initial_boot_params->version < 0x10)
798 p = _ALIGN(p, sz >= 8 ? 8 : 4);
799
800 nstr = find_flat_dt_string(noff);
801 if (nstr == NULL) {
802 printk(KERN_WARNING "Can't find property index"
803 " name !\n");
804 return NULL;
805 }
806 if (strcmp(name, nstr) == 0) {
807 if (size)
808 *size = sz;
809 return (void *)p;
810 }
811 p += sz;
812 p = _ALIGN(p, 4);
813 } while(1);
814}
815
816static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
817 unsigned long align)
818{
819 void *res;
820
821 *mem = _ALIGN(*mem, align);
822 res = (void *)*mem;
823 *mem += size;
824
825 return res;
826}
827
828static unsigned long __init unflatten_dt_node(unsigned long mem,
829 unsigned long *p,
830 struct device_node *dad,
831 struct device_node ***allnextpp,
832 unsigned long fpsize)
833{
834 struct device_node *np;
835 struct property *pp, **prev_pp = NULL;
836 char *pathp;
837 u32 tag;
838 unsigned int l, allocl;
839 int has_name = 0;
840 int new_format = 0;
841
842 tag = *((u32 *)(*p));
843 if (tag != OF_DT_BEGIN_NODE) {
844 printk("Weird tag at start of node: %x\n", tag);
845 return mem;
846 }
847 *p += 4;
848 pathp = (char *)*p;
849 l = allocl = strlen(pathp) + 1;
850 *p = _ALIGN(*p + l, 4);
851
852 /* version 0x10 has a more compact unit name here instead of the full
853 * path. we accumulate the full path size using "fpsize", we'll rebuild
854 * it later. We detect this because the first character of the name is
855 * not '/'.
856 */
857 if ((*pathp) != '/') {
858 new_format = 1;
859 if (fpsize == 0) {
860 /* root node: special case. fpsize accounts for path
861 * plus terminating zero. root node only has '/', so
862 * fpsize should be 2, but we want to avoid the first
863 * level nodes to have two '/' so we use fpsize 1 here
864 */
865 fpsize = 1;
866 allocl = 2;
867 } else {
868 /* account for '/' and path size minus terminal 0
869 * already in 'l'
870 */
871 fpsize += l;
872 allocl = fpsize;
873 }
874 }
875
876
877 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
878 __alignof__(struct device_node));
879 if (allnextpp) {
880 memset(np, 0, sizeof(*np));
881 np->full_name = ((char*)np) + sizeof(struct device_node);
882 if (new_format) {
883 char *p = np->full_name;
884 /* rebuild full path for new format */
885 if (dad && dad->parent) {
886 strcpy(p, dad->full_name);
887#ifdef DEBUG
888 if ((strlen(p) + l + 1) != allocl) {
889 DBG("%s: p: %d, l: %d, a: %d\n",
890 pathp, strlen(p), l, allocl);
891 }
892#endif
893 p += strlen(p);
894 }
895 *(p++) = '/';
896 memcpy(p, pathp, l);
897 } else
898 memcpy(np->full_name, pathp, l);
899 prev_pp = &np->properties;
900 **allnextpp = np;
901 *allnextpp = &np->allnext;
902 if (dad != NULL) {
903 np->parent = dad;
904 /* we temporarily use the next field as `last_child'*/
905 if (dad->next == 0)
906 dad->child = np;
907 else
908 dad->next->sibling = np;
909 dad->next = np;
910 }
911 kref_init(&np->kref);
912 }
913 while(1) {
914 u32 sz, noff;
915 char *pname;
916
917 tag = *((u32 *)(*p));
918 if (tag == OF_DT_NOP) {
919 *p += 4;
920 continue;
921 }
922 if (tag != OF_DT_PROP)
923 break;
924 *p += 4;
925 sz = *((u32 *)(*p));
926 noff = *((u32 *)((*p) + 4));
927 *p += 8;
928 if (initial_boot_params->version < 0x10)
929 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
930
931 pname = find_flat_dt_string(noff);
932 if (pname == NULL) {
933 printk("Can't find property name in list !\n");
934 break;
935 }
936 if (strcmp(pname, "name") == 0)
937 has_name = 1;
938 l = strlen(pname) + 1;
939 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
940 __alignof__(struct property));
941 if (allnextpp) {
942 if (strcmp(pname, "linux,phandle") == 0) {
943 np->node = *((u32 *)*p);
944 if (np->linux_phandle == 0)
945 np->linux_phandle = np->node;
946 }
947 if (strcmp(pname, "ibm,phandle") == 0)
948 np->linux_phandle = *((u32 *)*p);
949 pp->name = pname;
950 pp->length = sz;
951 pp->value = (void *)*p;
952 *prev_pp = pp;
953 prev_pp = &pp->next;
954 }
955 *p = _ALIGN((*p) + sz, 4);
956 }
957 /* with version 0x10 we may not have the name property, recreate
958 * it here from the unit name if absent
959 */
960 if (!has_name) {
961 char *p = pathp, *ps = pathp, *pa = NULL;
962 int sz;
963
964 while (*p) {
965 if ((*p) == '@')
966 pa = p;
967 if ((*p) == '/')
968 ps = p + 1;
969 p++;
970 }
971 if (pa < ps)
972 pa = p;
973 sz = (pa - ps) + 1;
974 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
975 __alignof__(struct property));
976 if (allnextpp) {
977 pp->name = "name";
978 pp->length = sz;
979 pp->value = (unsigned char *)(pp + 1);
980 *prev_pp = pp;
981 prev_pp = &pp->next;
982 memcpy(pp->value, ps, sz - 1);
983 ((char *)pp->value)[sz - 1] = 0;
984 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
985 }
986 }
987 if (allnextpp) {
988 *prev_pp = NULL;
989 np->name = get_property(np, "name", NULL);
990 np->type = get_property(np, "device_type", NULL);
991
992 if (!np->name)
993 np->name = "<NULL>";
994 if (!np->type)
995 np->type = "<NULL>";
996 }
997 while (tag == OF_DT_BEGIN_NODE) {
998 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
999 tag = *((u32 *)(*p));
1000 }
1001 if (tag != OF_DT_END_NODE) {
1002 printk("Weird tag at end of node: %x\n", tag);
1003 return mem;
1004 }
1005 *p += 4;
1006 return mem;
1007}
1008
1009
1010/**
1011 * unflattens the device-tree passed by the firmware, creating the
1012 * tree of struct device_node. It also fills the "name" and "type"
1013 * pointers of the nodes so the normal device-tree walking functions
1014 * can be used (this used to be done by finish_device_tree)
1015 */
1016void __init unflatten_device_tree(void)
1017{
1018 unsigned long start, mem, size;
1019 struct device_node **allnextp = &allnodes;
1020 char *p = NULL;
1021 int l = 0;
1022
1023 DBG(" -> unflatten_device_tree()\n");
1024
1025 /* First pass, scan for size */
1026 start = ((unsigned long)initial_boot_params) +
1027 initial_boot_params->off_dt_struct;
1028 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1029 size = (size | 3) + 1;
1030
1031 DBG(" size is %lx, allocating...\n", size);
1032
1033 /* Allocate memory for the expanded device tree */
1034 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1035 if (!mem) {
1036 DBG("Couldn't allocate memory with lmb_alloc()!\n");
1037 panic("Couldn't allocate memory with lmb_alloc()!\n");
1038 }
1039 mem = (unsigned long) __va(mem);
1040
1041 ((u32 *)mem)[size / 4] = 0xdeadbeef;
1042
1043 DBG(" unflattening %lx...\n", mem);
1044
1045 /* Second pass, do actual unflattening */
1046 start = ((unsigned long)initial_boot_params) +
1047 initial_boot_params->off_dt_struct;
1048 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1049 if (*((u32 *)start) != OF_DT_END)
1050 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1051 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1052 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1053 ((u32 *)mem)[size / 4] );
1054 *allnextp = NULL;
1055
1056 /* Get pointer to OF "/chosen" node for use everywhere */
1057 of_chosen = of_find_node_by_path("/chosen");
a575b807
PM
1058 if (of_chosen == NULL)
1059 of_chosen = of_find_node_by_path("/chosen@0");
9b6b563c
PM
1060
1061 /* Retreive command line */
1062 if (of_chosen != NULL) {
1063 p = (char *)get_property(of_chosen, "bootargs", &l);
1064 if (p != NULL && l > 0)
1065 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1066 }
1067#ifdef CONFIG_CMDLINE
1068 if (l == 0 || (l == 1 && (*p) == 0))
1069 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1070#endif /* CONFIG_CMDLINE */
1071
1072 DBG("Command line is: %s\n", cmd_line);
1073
1074 DBG(" <- unflatten_device_tree()\n");
1075}
1076
1077
1078static int __init early_init_dt_scan_cpus(unsigned long node,
1079 const char *uname, int depth, void *data)
1080{
1081 char *type = get_flat_dt_prop(node, "device_type", NULL);
1082 u32 *prop;
1083 unsigned long size = 0;
1084
1085 /* We are scanning "cpu" nodes only */
1086 if (type == NULL || strcmp(type, "cpu") != 0)
1087 return 0;
1088
1089#ifdef CONFIG_PPC_PSERIES
1090 /* On LPAR, look for the first ibm,pft-size property for the hash table size
1091 */
1092 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1093 u32 *pft_size;
1094 pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1095 if (pft_size != NULL) {
1096 /* pft_size[0] is the NUMA CEC cookie */
1097 ppc64_pft_size = pft_size[1];
1098 }
1099 }
1100#endif
1101
1102#ifdef CONFIG_PPC64
1103 if (initial_boot_params && initial_boot_params->version >= 2) {
1104 /* version 2 of the kexec param format adds the phys cpuid
1105 * of booted proc.
1106 */
1107 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1108 boot_cpuid = 0;
1109 } else {
1110 /* Check if it's the boot-cpu, set it's hw index in paca now */
1111 if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
40ef8cbc 1112 prop = get_flat_dt_prop(node, "reg", NULL);
9b6b563c
PM
1113 set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
1114 boot_cpuid_phys = get_hard_smp_processor_id(0);
1115 }
1116 }
1117#endif
1118
1119#ifdef CONFIG_ALTIVEC
1120 /* Check if we have a VMX and eventually update CPU features */
1121 prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1122 if (prop && (*prop) > 0) {
1123 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1124 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1125 }
1126
1127 /* Same goes for Apple's "altivec" property */
1128 prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1129 if (prop) {
1130 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1131 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1132 }
1133#endif /* CONFIG_ALTIVEC */
1134
1135#ifdef CONFIG_PPC_PSERIES
1136 /*
1137 * Check for an SMT capable CPU and set the CPU feature. We do
1138 * this by looking at the size of the ibm,ppc-interrupt-server#s
1139 * property
1140 */
1141 prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1142 &size);
1143 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1144 if (prop && ((size / sizeof(u32)) > 1))
1145 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1146#endif
1147
1148 return 0;
1149}
1150
1151static int __init early_init_dt_scan_chosen(unsigned long node,
1152 const char *uname, int depth, void *data)
1153{
1154 u32 *prop;
1155 unsigned long *lprop;
1156
1157 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1158
a575b807
PM
1159 if (depth != 1 ||
1160 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
9b6b563c
PM
1161 return 0;
1162
1163 /* get platform type */
1164 prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1165 if (prop == NULL)
1166 return 0;
1167#ifdef CONFIG_PPC64
1168 systemcfg->platform = *prop;
1169#else
1170 _machine = *prop;
1171#endif
1172
1173#ifdef CONFIG_PPC64
1174 /* check if iommu is forced on or off */
1175 if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1176 iommu_is_off = 1;
1177 if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1178 iommu_force_on = 1;
1179#endif
1180
1181 lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1182 if (lprop)
1183 memory_limit = *lprop;
1184
1185#ifdef CONFIG_PPC64
1186 lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1187 if (lprop)
1188 tce_alloc_start = *lprop;
1189 lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1190 if (lprop)
1191 tce_alloc_end = *lprop;
1192#endif
1193
1194#ifdef CONFIG_PPC_RTAS
1195 /* To help early debugging via the front panel, we retreive a minimal
1196 * set of RTAS infos now if available
1197 */
1198 {
1199 u64 *basep, *entryp;
1200
1201 basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1202 entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1203 prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1204 if (basep && entryp && prop) {
1205 rtas.base = *basep;
1206 rtas.entry = *entryp;
1207 rtas.size = *prop;
1208 }
1209 }
1210#endif /* CONFIG_PPC_RTAS */
1211
1212 /* break now */
1213 return 1;
1214}
1215
1216static int __init early_init_dt_scan_root(unsigned long node,
1217 const char *uname, int depth, void *data)
1218{
1219 u32 *prop;
1220
1221 if (depth != 0)
1222 return 0;
1223
1224 prop = get_flat_dt_prop(node, "#size-cells", NULL);
1225 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1226 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1227
1228 prop = get_flat_dt_prop(node, "#address-cells", NULL);
1229 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1230 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1231
1232 /* break now */
1233 return 1;
1234}
1235
1236static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1237{
1238 cell_t *p = *cellp;
1239 unsigned long r;
1240
1241 /* Ignore more than 2 cells */
1242 while (s > sizeof(unsigned long) / 4) {
1243 p++;
1244 s--;
1245 }
1246 r = *p++;
1247#ifdef CONFIG_PPC64
1248 if (s > 1) {
1249 r <<= 32;
1250 r |= *(p++);
1251 s--;
1252 }
1253#endif
1254
1255 *cellp = p;
1256 return r;
1257}
1258
1259
1260static int __init early_init_dt_scan_memory(unsigned long node,
1261 const char *uname, int depth, void *data)
1262{
1263 char *type = get_flat_dt_prop(node, "device_type", NULL);
1264 cell_t *reg, *endp;
1265 unsigned long l;
1266
1267 /* We are scanning "memory" nodes only */
1268 if (type == NULL || strcmp(type, "memory") != 0)
1269 return 0;
1270
1271 reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1272 if (reg == NULL)
1273 return 0;
1274
1275 endp = reg + (l / sizeof(cell_t));
1276
1277 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1278 uname, l, reg[0], reg[1], reg[2], reg[3]);
1279
1280 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1281 unsigned long base, size;
1282
1283 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1284 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1285
1286 if (size == 0)
1287 continue;
1288 DBG(" - %lx , %lx\n", base, size);
1289#ifdef CONFIG_PPC64
1290 if (iommu_is_off) {
1291 if (base >= 0x80000000ul)
1292 continue;
1293 if ((base + size) > 0x80000000ul)
1294 size = 0x80000000ul - base;
1295 }
1296#endif
1297 lmb_add(base, size);
1298 }
1299 return 0;
1300}
1301
1302static void __init early_reserve_mem(void)
1303{
1304 unsigned long base, size;
1305 unsigned long *reserve_map;
1306
1307 reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1308 initial_boot_params->off_mem_rsvmap);
1309 while (1) {
1310 base = *(reserve_map++);
1311 size = *(reserve_map++);
1312 if (size == 0)
1313 break;
1314 DBG("reserving: %lx -> %lx\n", base, size);
1315 lmb_reserve(base, size);
1316 }
1317
1318#if 0
1319 DBG("memory reserved, lmbs :\n");
1320 lmb_dump_all();
1321#endif
1322}
1323
1324void __init early_init_devtree(void *params)
1325{
1326 DBG(" -> early_init_devtree()\n");
1327
1328 /* Setup flat device-tree pointer */
1329 initial_boot_params = params;
1330
1331 /* Retrieve various informations from the /chosen node of the
1332 * device-tree, including the platform type, initrd location and
1333 * size, TCE reserve, and more ...
1334 */
1335 scan_flat_dt(early_init_dt_scan_chosen, NULL);
1336
1337 /* Scan memory nodes and rebuild LMBs */
1338 lmb_init();
1339 scan_flat_dt(early_init_dt_scan_root, NULL);
1340 scan_flat_dt(early_init_dt_scan_memory, NULL);
1341 lmb_enforce_memory_limit(memory_limit);
1342 lmb_analyze();
1343#ifdef CONFIG_PPC64
1344 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1345#endif
1346 lmb_reserve(0, __pa(klimit));
1347
1348 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1349
1350 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1351 early_reserve_mem();
1352
1353 DBG("Scanning CPUs ...\n");
1354
1355 /* Retreive hash table size from flattened tree plus other
1356 * CPU related informations (altivec support, boot CPU ID, ...)
1357 */
1358 scan_flat_dt(early_init_dt_scan_cpus, NULL);
1359
9b6b563c
PM
1360 DBG(" <- early_init_devtree()\n");
1361}
1362
1363#undef printk
1364
1365int
1366prom_n_addr_cells(struct device_node* np)
1367{
1368 int* ip;
1369 do {
1370 if (np->parent)
1371 np = np->parent;
1372 ip = (int *) get_property(np, "#address-cells", NULL);
1373 if (ip != NULL)
1374 return *ip;
1375 } while (np->parent);
1376 /* No #address-cells property for the root node, default to 1 */
1377 return 1;
1378}
1379
1380int
1381prom_n_size_cells(struct device_node* np)
1382{
1383 int* ip;
1384 do {
1385 if (np->parent)
1386 np = np->parent;
1387 ip = (int *) get_property(np, "#size-cells", NULL);
1388 if (ip != NULL)
1389 return *ip;
1390 } while (np->parent);
1391 /* No #size-cells property for the root node, default to 1 */
1392 return 1;
1393}
1394
1395/**
1396 * Work out the sense (active-low level / active-high edge)
1397 * of each interrupt from the device tree.
1398 */
1399void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1400{
1401 struct device_node *np;
1402 int i, j;
1403
1404 /* default to level-triggered */
1405 memset(senses, 1, max - off);
1406
1407 for (np = allnodes; np != 0; np = np->allnext) {
1408 for (j = 0; j < np->n_intrs; j++) {
1409 i = np->intrs[j].line;
1410 if (i >= off && i < max)
1411 senses[i-off] = np->intrs[j].sense ?
1412 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE :
1413 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE;
1414 }
1415 }
1416}
1417
1418/**
1419 * Construct and return a list of the device_nodes with a given name.
1420 */
1421struct device_node *find_devices(const char *name)
1422{
1423 struct device_node *head, **prevp, *np;
1424
1425 prevp = &head;
1426 for (np = allnodes; np != 0; np = np->allnext) {
1427 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1428 *prevp = np;
1429 prevp = &np->next;
1430 }
1431 }
1432 *prevp = NULL;
1433 return head;
1434}
1435EXPORT_SYMBOL(find_devices);
1436
1437/**
1438 * Construct and return a list of the device_nodes with a given type.
1439 */
1440struct device_node *find_type_devices(const char *type)
1441{
1442 struct device_node *head, **prevp, *np;
1443
1444 prevp = &head;
1445 for (np = allnodes; np != 0; np = np->allnext) {
1446 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1447 *prevp = np;
1448 prevp = &np->next;
1449 }
1450 }
1451 *prevp = NULL;
1452 return head;
1453}
1454EXPORT_SYMBOL(find_type_devices);
1455
1456/**
1457 * Returns all nodes linked together
1458 */
1459struct device_node *find_all_nodes(void)
1460{
1461 struct device_node *head, **prevp, *np;
1462
1463 prevp = &head;
1464 for (np = allnodes; np != 0; np = np->allnext) {
1465 *prevp = np;
1466 prevp = &np->next;
1467 }
1468 *prevp = NULL;
1469 return head;
1470}
1471EXPORT_SYMBOL(find_all_nodes);
1472
1473/** Checks if the given "compat" string matches one of the strings in
1474 * the device's "compatible" property
1475 */
1476int device_is_compatible(struct device_node *device, const char *compat)
1477{
1478 const char* cp;
1479 int cplen, l;
1480
1481 cp = (char *) get_property(device, "compatible", &cplen);
1482 if (cp == NULL)
1483 return 0;
1484 while (cplen > 0) {
1485 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1486 return 1;
1487 l = strlen(cp) + 1;
1488 cp += l;
1489 cplen -= l;
1490 }
1491
1492 return 0;
1493}
1494EXPORT_SYMBOL(device_is_compatible);
1495
1496
1497/**
1498 * Indicates whether the root node has a given value in its
1499 * compatible property.
1500 */
1501int machine_is_compatible(const char *compat)
1502{
1503 struct device_node *root;
1504 int rc = 0;
1505
1506 root = of_find_node_by_path("/");
1507 if (root) {
1508 rc = device_is_compatible(root, compat);
1509 of_node_put(root);
1510 }
1511 return rc;
1512}
1513EXPORT_SYMBOL(machine_is_compatible);
1514
1515/**
1516 * Construct and return a list of the device_nodes with a given type
1517 * and compatible property.
1518 */
1519struct device_node *find_compatible_devices(const char *type,
1520 const char *compat)
1521{
1522 struct device_node *head, **prevp, *np;
1523
1524 prevp = &head;
1525 for (np = allnodes; np != 0; np = np->allnext) {
1526 if (type != NULL
1527 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1528 continue;
1529 if (device_is_compatible(np, compat)) {
1530 *prevp = np;
1531 prevp = &np->next;
1532 }
1533 }
1534 *prevp = NULL;
1535 return head;
1536}
1537EXPORT_SYMBOL(find_compatible_devices);
1538
1539/**
1540 * Find the device_node with a given full_name.
1541 */
1542struct device_node *find_path_device(const char *path)
1543{
1544 struct device_node *np;
1545
1546 for (np = allnodes; np != 0; np = np->allnext)
1547 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1548 return np;
1549 return NULL;
1550}
1551EXPORT_SYMBOL(find_path_device);
1552
1553/*******
1554 *
1555 * New implementation of the OF "find" APIs, return a refcounted
1556 * object, call of_node_put() when done. The device tree and list
1557 * are protected by a rw_lock.
1558 *
1559 * Note that property management will need some locking as well,
1560 * this isn't dealt with yet.
1561 *
1562 *******/
1563
1564/**
1565 * of_find_node_by_name - Find a node by its "name" property
1566 * @from: The node to start searching from or NULL, the node
1567 * you pass will not be searched, only the next one
1568 * will; typically, you pass what the previous call
1569 * returned. of_node_put() will be called on it
1570 * @name: The name string to match against
1571 *
1572 * Returns a node pointer with refcount incremented, use
1573 * of_node_put() on it when done.
1574 */
1575struct device_node *of_find_node_by_name(struct device_node *from,
1576 const char *name)
1577{
1578 struct device_node *np;
1579
1580 read_lock(&devtree_lock);
1581 np = from ? from->allnext : allnodes;
1582 for (; np != 0; np = np->allnext)
1583 if (np->name != 0 && strcasecmp(np->name, name) == 0
1584 && of_node_get(np))
1585 break;
1586 if (from)
1587 of_node_put(from);
1588 read_unlock(&devtree_lock);
1589 return np;
1590}
1591EXPORT_SYMBOL(of_find_node_by_name);
1592
1593/**
1594 * of_find_node_by_type - Find a node by its "device_type" property
1595 * @from: The node to start searching from or NULL, the node
1596 * you pass will not be searched, only the next one
1597 * will; typically, you pass what the previous call
1598 * returned. of_node_put() will be called on it
1599 * @name: The type string to match against
1600 *
1601 * Returns a node pointer with refcount incremented, use
1602 * of_node_put() on it when done.
1603 */
1604struct device_node *of_find_node_by_type(struct device_node *from,
1605 const char *type)
1606{
1607 struct device_node *np;
1608
1609 read_lock(&devtree_lock);
1610 np = from ? from->allnext : allnodes;
1611 for (; np != 0; np = np->allnext)
1612 if (np->type != 0 && strcasecmp(np->type, type) == 0
1613 && of_node_get(np))
1614 break;
1615 if (from)
1616 of_node_put(from);
1617 read_unlock(&devtree_lock);
1618 return np;
1619}
1620EXPORT_SYMBOL(of_find_node_by_type);
1621
1622/**
1623 * of_find_compatible_node - Find a node based on type and one of the
1624 * tokens in its "compatible" property
1625 * @from: The node to start searching from or NULL, the node
1626 * you pass will not be searched, only the next one
1627 * will; typically, you pass what the previous call
1628 * returned. of_node_put() will be called on it
1629 * @type: The type string to match "device_type" or NULL to ignore
1630 * @compatible: The string to match to one of the tokens in the device
1631 * "compatible" list.
1632 *
1633 * Returns a node pointer with refcount incremented, use
1634 * of_node_put() on it when done.
1635 */
1636struct device_node *of_find_compatible_node(struct device_node *from,
1637 const char *type, const char *compatible)
1638{
1639 struct device_node *np;
1640
1641 read_lock(&devtree_lock);
1642 np = from ? from->allnext : allnodes;
1643 for (; np != 0; np = np->allnext) {
1644 if (type != NULL
1645 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1646 continue;
1647 if (device_is_compatible(np, compatible) && of_node_get(np))
1648 break;
1649 }
1650 if (from)
1651 of_node_put(from);
1652 read_unlock(&devtree_lock);
1653 return np;
1654}
1655EXPORT_SYMBOL(of_find_compatible_node);
1656
1657/**
1658 * of_find_node_by_path - Find a node matching a full OF path
1659 * @path: The full path to match
1660 *
1661 * Returns a node pointer with refcount incremented, use
1662 * of_node_put() on it when done.
1663 */
1664struct device_node *of_find_node_by_path(const char *path)
1665{
1666 struct device_node *np = allnodes;
1667
1668 read_lock(&devtree_lock);
1669 for (; np != 0; np = np->allnext) {
1670 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1671 && of_node_get(np))
1672 break;
1673 }
1674 read_unlock(&devtree_lock);
1675 return np;
1676}
1677EXPORT_SYMBOL(of_find_node_by_path);
1678
1679/**
1680 * of_find_node_by_phandle - Find a node given a phandle
1681 * @handle: phandle of the node to find
1682 *
1683 * Returns a node pointer with refcount incremented, use
1684 * of_node_put() on it when done.
1685 */
1686struct device_node *of_find_node_by_phandle(phandle handle)
1687{
1688 struct device_node *np;
1689
1690 read_lock(&devtree_lock);
1691 for (np = allnodes; np != 0; np = np->allnext)
1692 if (np->linux_phandle == handle)
1693 break;
1694 if (np)
1695 of_node_get(np);
1696 read_unlock(&devtree_lock);
1697 return np;
1698}
1699EXPORT_SYMBOL(of_find_node_by_phandle);
1700
1701/**
1702 * of_find_all_nodes - Get next node in global list
1703 * @prev: Previous node or NULL to start iteration
1704 * of_node_put() will be called on it
1705 *
1706 * Returns a node pointer with refcount incremented, use
1707 * of_node_put() on it when done.
1708 */
1709struct device_node *of_find_all_nodes(struct device_node *prev)
1710{
1711 struct device_node *np;
1712
1713 read_lock(&devtree_lock);
1714 np = prev ? prev->allnext : allnodes;
1715 for (; np != 0; np = np->allnext)
1716 if (of_node_get(np))
1717 break;
1718 if (prev)
1719 of_node_put(prev);
1720 read_unlock(&devtree_lock);
1721 return np;
1722}
1723EXPORT_SYMBOL(of_find_all_nodes);
1724
1725/**
1726 * of_get_parent - Get a node's parent if any
1727 * @node: Node to get parent
1728 *
1729 * Returns a node pointer with refcount incremented, use
1730 * of_node_put() on it when done.
1731 */
1732struct device_node *of_get_parent(const struct device_node *node)
1733{
1734 struct device_node *np;
1735
1736 if (!node)
1737 return NULL;
1738
1739 read_lock(&devtree_lock);
1740 np = of_node_get(node->parent);
1741 read_unlock(&devtree_lock);
1742 return np;
1743}
1744EXPORT_SYMBOL(of_get_parent);
1745
1746/**
1747 * of_get_next_child - Iterate a node childs
1748 * @node: parent node
1749 * @prev: previous child of the parent node, or NULL to get first
1750 *
1751 * Returns a node pointer with refcount incremented, use
1752 * of_node_put() on it when done.
1753 */
1754struct device_node *of_get_next_child(const struct device_node *node,
1755 struct device_node *prev)
1756{
1757 struct device_node *next;
1758
1759 read_lock(&devtree_lock);
1760 next = prev ? prev->sibling : node->child;
1761 for (; next != 0; next = next->sibling)
1762 if (of_node_get(next))
1763 break;
1764 if (prev)
1765 of_node_put(prev);
1766 read_unlock(&devtree_lock);
1767 return next;
1768}
1769EXPORT_SYMBOL(of_get_next_child);
1770
1771/**
1772 * of_node_get - Increment refcount of a node
1773 * @node: Node to inc refcount, NULL is supported to
1774 * simplify writing of callers
1775 *
1776 * Returns node.
1777 */
1778struct device_node *of_node_get(struct device_node *node)
1779{
1780 if (node)
1781 kref_get(&node->kref);
1782 return node;
1783}
1784EXPORT_SYMBOL(of_node_get);
1785
1786static inline struct device_node * kref_to_device_node(struct kref *kref)
1787{
1788 return container_of(kref, struct device_node, kref);
1789}
1790
1791/**
1792 * of_node_release - release a dynamically allocated node
1793 * @kref: kref element of the node to be released
1794 *
1795 * In of_node_put() this function is passed to kref_put()
1796 * as the destructor.
1797 */
1798static void of_node_release(struct kref *kref)
1799{
1800 struct device_node *node = kref_to_device_node(kref);
1801 struct property *prop = node->properties;
1802
1803 if (!OF_IS_DYNAMIC(node))
1804 return;
1805 while (prop) {
1806 struct property *next = prop->next;
1807 kfree(prop->name);
1808 kfree(prop->value);
1809 kfree(prop);
1810 prop = next;
1811 }
1812 kfree(node->intrs);
1813 kfree(node->addrs);
1814 kfree(node->full_name);
1815 kfree(node->data);
1816 kfree(node);
1817}
1818
1819/**
1820 * of_node_put - Decrement refcount of a node
1821 * @node: Node to dec refcount, NULL is supported to
1822 * simplify writing of callers
1823 *
1824 */
1825void of_node_put(struct device_node *node)
1826{
1827 if (node)
1828 kref_put(&node->kref, of_node_release);
1829}
1830EXPORT_SYMBOL(of_node_put);
1831
1832/*
1833 * Plug a device node into the tree and global list.
1834 */
1835void of_attach_node(struct device_node *np)
1836{
1837 write_lock(&devtree_lock);
1838 np->sibling = np->parent->child;
1839 np->allnext = allnodes;
1840 np->parent->child = np;
1841 allnodes = np;
1842 write_unlock(&devtree_lock);
1843}
1844
1845/*
1846 * "Unplug" a node from the device tree. The caller must hold
1847 * a reference to the node. The memory associated with the node
1848 * is not freed until its refcount goes to zero.
1849 */
1850void of_detach_node(const struct device_node *np)
1851{
1852 struct device_node *parent;
1853
1854 write_lock(&devtree_lock);
1855
1856 parent = np->parent;
1857
1858 if (allnodes == np)
1859 allnodes = np->allnext;
1860 else {
1861 struct device_node *prev;
1862 for (prev = allnodes;
1863 prev->allnext != np;
1864 prev = prev->allnext)
1865 ;
1866 prev->allnext = np->allnext;
1867 }
1868
1869 if (parent->child == np)
1870 parent->child = np->sibling;
1871 else {
1872 struct device_node *prevsib;
1873 for (prevsib = np->parent->child;
1874 prevsib->sibling != np;
1875 prevsib = prevsib->sibling)
1876 ;
1877 prevsib->sibling = np->sibling;
1878 }
1879
1880 write_unlock(&devtree_lock);
1881}
1882
1883#ifdef CONFIG_PPC_PSERIES
1884/*
1885 * Fix up the uninitialized fields in a new device node:
1886 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1887 *
1888 * A lot of boot-time code is duplicated here, because functions such
1889 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1890 * slab allocator.
1891 *
1892 * This should probably be split up into smaller chunks.
1893 */
1894
1895static int of_finish_dynamic_node(struct device_node *node,
1896 unsigned long *unused1, int unused2,
1897 int unused3, int unused4)
1898{
1899 struct device_node *parent = of_get_parent(node);
1900 int err = 0;
1901 phandle *ibm_phandle;
1902
1903 node->name = get_property(node, "name", NULL);
1904 node->type = get_property(node, "device_type", NULL);
1905
1906 if (!parent) {
1907 err = -ENODEV;
1908 goto out;
1909 }
1910
1911 /* We don't support that function on PowerMac, at least
1912 * not yet
1913 */
1914 if (systemcfg->platform == PLATFORM_POWERMAC)
1915 return -ENODEV;
1916
1917 /* fix up new node's linux_phandle field */
1918 if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1919 node->linux_phandle = *ibm_phandle;
1920
1921out:
1922 of_node_put(parent);
1923 return err;
1924}
1925
1926static int prom_reconfig_notifier(struct notifier_block *nb,
1927 unsigned long action, void *node)
1928{
1929 int err;
1930
1931 switch (action) {
1932 case PSERIES_RECONFIG_ADD:
1933 err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1934 if (err < 0) {
1935 printk(KERN_ERR "finish_node returned %d\n", err);
1936 err = NOTIFY_BAD;
1937 }
1938 break;
1939 default:
1940 err = NOTIFY_DONE;
1941 break;
1942 }
1943 return err;
1944}
1945
1946static struct notifier_block prom_reconfig_nb = {
1947 .notifier_call = prom_reconfig_notifier,
1948 .priority = 10, /* This one needs to run first */
1949};
1950
1951static int __init prom_reconfig_setup(void)
1952{
1953 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1954}
1955__initcall(prom_reconfig_setup);
1956#endif
1957
1958/*
1959 * Find a property with a given name for a given node
1960 * and return the value.
1961 */
1962unsigned char *get_property(struct device_node *np, const char *name,
1963 int *lenp)
1964{
1965 struct property *pp;
1966
1967 for (pp = np->properties; pp != 0; pp = pp->next)
1968 if (strcmp(pp->name, name) == 0) {
1969 if (lenp != 0)
1970 *lenp = pp->length;
1971 return pp->value;
1972 }
1973 return NULL;
1974}
1975EXPORT_SYMBOL(get_property);
1976
1977/*
1978 * Add a property to a node
1979 */
1980void prom_add_property(struct device_node* np, struct property* prop)
1981{
1982 struct property **next = &np->properties;
1983
1984 prop->next = NULL;
1985 while (*next)
1986 next = &(*next)->next;
1987 *next = prop;
1988}
1989
1990/* I quickly hacked that one, check against spec ! */
1991static inline unsigned long
1992bus_space_to_resource_flags(unsigned int bus_space)
1993{
1994 u8 space = (bus_space >> 24) & 0xf;
1995 if (space == 0)
1996 space = 0x02;
1997 if (space == 0x02)
1998 return IORESOURCE_MEM;
1999 else if (space == 0x01)
2000 return IORESOURCE_IO;
2001 else {
2002 printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2003 bus_space);
2004 return 0;
2005 }
2006}
2007
2008static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2009 struct address_range *range)
2010{
2011 unsigned long mask;
2012 int i;
2013
2014 /* Check this one */
2015 mask = bus_space_to_resource_flags(range->space);
2016 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2017 if ((pdev->resource[i].flags & mask) == mask &&
2018 pdev->resource[i].start <= range->address &&
2019 pdev->resource[i].end > range->address) {
2020 if ((range->address + range->size - 1) > pdev->resource[i].end) {
2021 /* Add better message */
2022 printk(KERN_WARNING "PCI/OF resource overlap !\n");
2023 return NULL;
2024 }
2025 break;
2026 }
2027 }
2028 if (i == DEVICE_COUNT_RESOURCE)
2029 return NULL;
2030 return &pdev->resource[i];
2031}
2032
2033/*
2034 * Request an OF device resource. Currently handles child of PCI devices,
2035 * or other nodes attached to the root node. Ultimately, put some
2036 * link to resources in the OF node.
2037 */
2038struct resource *request_OF_resource(struct device_node* node, int index,
2039 const char* name_postfix)
2040{
2041 struct pci_dev* pcidev;
2042 u8 pci_bus, pci_devfn;
2043 unsigned long iomask;
2044 struct device_node* nd;
2045 struct resource* parent;
2046 struct resource *res = NULL;
2047 int nlen, plen;
2048
2049 if (index >= node->n_addrs)
2050 goto fail;
2051
2052 /* Sanity check on bus space */
2053 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2054 if (iomask & IORESOURCE_MEM)
2055 parent = &iomem_resource;
2056 else if (iomask & IORESOURCE_IO)
2057 parent = &ioport_resource;
2058 else
2059 goto fail;
2060
2061 /* Find a PCI parent if any */
2062 nd = node;
2063 pcidev = NULL;
2064 while (nd) {
2065 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2066 pcidev = pci_find_slot(pci_bus, pci_devfn);
2067 if (pcidev) break;
2068 nd = nd->parent;
2069 }
2070 if (pcidev)
2071 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2072 if (!parent) {
2073 printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2074 node->name);
2075 goto fail;
2076 }
2077
2078 res = __request_region(parent, node->addrs[index].address,
2079 node->addrs[index].size, NULL);
2080 if (!res)
2081 goto fail;
2082 nlen = strlen(node->name);
2083 plen = name_postfix ? strlen(name_postfix) : 0;
2084 res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2085 if (res->name) {
2086 strcpy((char *)res->name, node->name);
2087 if (plen)
2088 strcpy((char *)res->name+nlen, name_postfix);
2089 }
2090 return res;
2091fail:
2092 return NULL;
2093}
2094EXPORT_SYMBOL(request_OF_resource);
2095
2096int release_OF_resource(struct device_node *node, int index)
2097{
2098 struct pci_dev* pcidev;
2099 u8 pci_bus, pci_devfn;
2100 unsigned long iomask, start, end;
2101 struct device_node* nd;
2102 struct resource* parent;
2103 struct resource *res = NULL;
2104
2105 if (index >= node->n_addrs)
2106 return -EINVAL;
2107
2108 /* Sanity check on bus space */
2109 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2110 if (iomask & IORESOURCE_MEM)
2111 parent = &iomem_resource;
2112 else if (iomask & IORESOURCE_IO)
2113 parent = &ioport_resource;
2114 else
2115 return -EINVAL;
2116
2117 /* Find a PCI parent if any */
2118 nd = node;
2119 pcidev = NULL;
2120 while(nd) {
2121 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2122 pcidev = pci_find_slot(pci_bus, pci_devfn);
2123 if (pcidev) break;
2124 nd = nd->parent;
2125 }
2126 if (pcidev)
2127 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2128 if (!parent) {
2129 printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2130 node->name);
2131 return -ENODEV;
2132 }
2133
2134 /* Find us in the parent and its childs */
2135 res = parent->child;
2136 start = node->addrs[index].address;
2137 end = start + node->addrs[index].size - 1;
2138 while (res) {
2139 if (res->start == start && res->end == end &&
2140 (res->flags & IORESOURCE_BUSY))
2141 break;
2142 if (res->start <= start && res->end >= end)
2143 res = res->child;
2144 else
2145 res = res->sibling;
2146 }
2147 if (!res)
2148 return -ENODEV;
2149
2150 if (res->name) {
2151 kfree(res->name);
2152 res->name = NULL;
2153 }
2154 release_resource(res);
2155 kfree(res);
2156
2157 return 0;
2158}
2159EXPORT_SYMBOL(release_OF_resource);
This page took 0.109251 seconds and 5 git commands to generate.