Merge remote-tracking branch 'spi/topic/rspi' into spi-pdata
[deliverable/linux.git] / arch / powerpc / kernel / prom_init.c
1 /*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #undef DEBUG_PROM
17
18 #include <stdarg.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/proc_fs.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <asm/prom.h>
32 #include <asm/rtas.h>
33 #include <asm/page.h>
34 #include <asm/processor.h>
35 #include <asm/irq.h>
36 #include <asm/io.h>
37 #include <asm/smp.h>
38 #include <asm/mmu.h>
39 #include <asm/pgtable.h>
40 #include <asm/pci.h>
41 #include <asm/iommu.h>
42 #include <asm/btext.h>
43 #include <asm/sections.h>
44 #include <asm/machdep.h>
45 #include <asm/opal.h>
46
47 #include <linux/linux_logo.h>
48
49 /*
50 * Eventually bump that one up
51 */
52 #define DEVTREE_CHUNK_SIZE 0x100000
53
54 /*
55 * This is the size of the local memory reserve map that gets copied
56 * into the boot params passed to the kernel. That size is totally
57 * flexible as the kernel just reads the list until it encounters an
58 * entry with size 0, so it can be changed without breaking binary
59 * compatibility
60 */
61 #define MEM_RESERVE_MAP_SIZE 8
62
63 /*
64 * prom_init() is called very early on, before the kernel text
65 * and data have been mapped to KERNELBASE. At this point the code
66 * is running at whatever address it has been loaded at.
67 * On ppc32 we compile with -mrelocatable, which means that references
68 * to extern and static variables get relocated automatically.
69 * ppc64 objects are always relocatable, we just need to relocate the
70 * TOC.
71 *
72 * Because OF may have mapped I/O devices into the area starting at
73 * KERNELBASE, particularly on CHRP machines, we can't safely call
74 * OF once the kernel has been mapped to KERNELBASE. Therefore all
75 * OF calls must be done within prom_init().
76 *
77 * ADDR is used in calls to call_prom. The 4th and following
78 * arguments to call_prom should be 32-bit values.
79 * On ppc64, 64 bit values are truncated to 32 bits (and
80 * fortunately don't get interpreted as two arguments).
81 */
82 #define ADDR(x) (u32)(unsigned long)(x)
83
84 #ifdef CONFIG_PPC64
85 #define OF_WORKAROUNDS 0
86 #else
87 #define OF_WORKAROUNDS of_workarounds
88 int of_workarounds;
89 #endif
90
91 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
92 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
93
94 #define PROM_BUG() do { \
95 prom_printf("kernel BUG at %s line 0x%x!\n", \
96 __FILE__, __LINE__); \
97 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
98 } while (0)
99
100 #ifdef DEBUG_PROM
101 #define prom_debug(x...) prom_printf(x)
102 #else
103 #define prom_debug(x...)
104 #endif
105
106
107 typedef u32 prom_arg_t;
108
109 struct prom_args {
110 u32 service;
111 u32 nargs;
112 u32 nret;
113 prom_arg_t args[10];
114 };
115
116 struct prom_t {
117 ihandle root;
118 phandle chosen;
119 int cpu;
120 ihandle stdout;
121 ihandle mmumap;
122 ihandle memory;
123 };
124
125 struct mem_map_entry {
126 u64 base;
127 u64 size;
128 };
129
130 typedef u32 cell_t;
131
132 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
133 unsigned long r6, unsigned long r7, unsigned long r8,
134 unsigned long r9);
135
136 #ifdef CONFIG_PPC64
137 extern int enter_prom(struct prom_args *args, unsigned long entry);
138 #else
139 static inline int enter_prom(struct prom_args *args, unsigned long entry)
140 {
141 return ((int (*)(struct prom_args *))entry)(args);
142 }
143 #endif
144
145 extern void copy_and_flush(unsigned long dest, unsigned long src,
146 unsigned long size, unsigned long offset);
147
148 /* prom structure */
149 static struct prom_t __initdata prom;
150
151 static unsigned long prom_entry __initdata;
152
153 #define PROM_SCRATCH_SIZE 256
154
155 static char __initdata of_stdout_device[256];
156 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
157
158 static unsigned long __initdata dt_header_start;
159 static unsigned long __initdata dt_struct_start, dt_struct_end;
160 static unsigned long __initdata dt_string_start, dt_string_end;
161
162 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
163
164 #ifdef CONFIG_PPC64
165 static int __initdata prom_iommu_force_on;
166 static int __initdata prom_iommu_off;
167 static unsigned long __initdata prom_tce_alloc_start;
168 static unsigned long __initdata prom_tce_alloc_end;
169 #endif
170
171 /* Platforms codes are now obsolete in the kernel. Now only used within this
172 * file and ultimately gone too. Feel free to change them if you need, they
173 * are not shared with anything outside of this file anymore
174 */
175 #define PLATFORM_PSERIES 0x0100
176 #define PLATFORM_PSERIES_LPAR 0x0101
177 #define PLATFORM_LPAR 0x0001
178 #define PLATFORM_POWERMAC 0x0400
179 #define PLATFORM_GENERIC 0x0500
180 #define PLATFORM_OPAL 0x0600
181
182 static int __initdata of_platform;
183
184 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
185
186 static unsigned long __initdata prom_memory_limit;
187
188 static unsigned long __initdata alloc_top;
189 static unsigned long __initdata alloc_top_high;
190 static unsigned long __initdata alloc_bottom;
191 static unsigned long __initdata rmo_top;
192 static unsigned long __initdata ram_top;
193
194 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
195 static int __initdata mem_reserve_cnt;
196
197 static cell_t __initdata regbuf[1024];
198
199
200 /*
201 * Error results ... some OF calls will return "-1" on error, some
202 * will return 0, some will return either. To simplify, here are
203 * macros to use with any ihandle or phandle return value to check if
204 * it is valid
205 */
206
207 #define PROM_ERROR (-1u)
208 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
209 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
210
211
212 /* This is the one and *ONLY* place where we actually call open
213 * firmware.
214 */
215
216 static int __init call_prom(const char *service, int nargs, int nret, ...)
217 {
218 int i;
219 struct prom_args args;
220 va_list list;
221
222 args.service = ADDR(service);
223 args.nargs = nargs;
224 args.nret = nret;
225
226 va_start(list, nret);
227 for (i = 0; i < nargs; i++)
228 args.args[i] = va_arg(list, prom_arg_t);
229 va_end(list);
230
231 for (i = 0; i < nret; i++)
232 args.args[nargs+i] = 0;
233
234 if (enter_prom(&args, prom_entry) < 0)
235 return PROM_ERROR;
236
237 return (nret > 0) ? args.args[nargs] : 0;
238 }
239
240 static int __init call_prom_ret(const char *service, int nargs, int nret,
241 prom_arg_t *rets, ...)
242 {
243 int i;
244 struct prom_args args;
245 va_list list;
246
247 args.service = ADDR(service);
248 args.nargs = nargs;
249 args.nret = nret;
250
251 va_start(list, rets);
252 for (i = 0; i < nargs; i++)
253 args.args[i] = va_arg(list, prom_arg_t);
254 va_end(list);
255
256 for (i = 0; i < nret; i++)
257 args.args[nargs+i] = 0;
258
259 if (enter_prom(&args, prom_entry) < 0)
260 return PROM_ERROR;
261
262 if (rets != NULL)
263 for (i = 1; i < nret; ++i)
264 rets[i-1] = args.args[nargs+i];
265
266 return (nret > 0) ? args.args[nargs] : 0;
267 }
268
269
270 static void __init prom_print(const char *msg)
271 {
272 const char *p, *q;
273
274 if (prom.stdout == 0)
275 return;
276
277 for (p = msg; *p != 0; p = q) {
278 for (q = p; *q != 0 && *q != '\n'; ++q)
279 ;
280 if (q > p)
281 call_prom("write", 3, 1, prom.stdout, p, q - p);
282 if (*q == 0)
283 break;
284 ++q;
285 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
286 }
287 }
288
289
290 static void __init prom_print_hex(unsigned long val)
291 {
292 int i, nibbles = sizeof(val)*2;
293 char buf[sizeof(val)*2+1];
294
295 for (i = nibbles-1; i >= 0; i--) {
296 buf[i] = (val & 0xf) + '0';
297 if (buf[i] > '9')
298 buf[i] += ('a'-'0'-10);
299 val >>= 4;
300 }
301 buf[nibbles] = '\0';
302 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
303 }
304
305 /* max number of decimal digits in an unsigned long */
306 #define UL_DIGITS 21
307 static void __init prom_print_dec(unsigned long val)
308 {
309 int i, size;
310 char buf[UL_DIGITS+1];
311
312 for (i = UL_DIGITS-1; i >= 0; i--) {
313 buf[i] = (val % 10) + '0';
314 val = val/10;
315 if (val == 0)
316 break;
317 }
318 /* shift stuff down */
319 size = UL_DIGITS - i;
320 call_prom("write", 3, 1, prom.stdout, buf+i, size);
321 }
322
323 static void __init prom_printf(const char *format, ...)
324 {
325 const char *p, *q, *s;
326 va_list args;
327 unsigned long v;
328 long vs;
329
330 va_start(args, format);
331 for (p = format; *p != 0; p = q) {
332 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
333 ;
334 if (q > p)
335 call_prom("write", 3, 1, prom.stdout, p, q - p);
336 if (*q == 0)
337 break;
338 if (*q == '\n') {
339 ++q;
340 call_prom("write", 3, 1, prom.stdout,
341 ADDR("\r\n"), 2);
342 continue;
343 }
344 ++q;
345 if (*q == 0)
346 break;
347 switch (*q) {
348 case 's':
349 ++q;
350 s = va_arg(args, const char *);
351 prom_print(s);
352 break;
353 case 'x':
354 ++q;
355 v = va_arg(args, unsigned long);
356 prom_print_hex(v);
357 break;
358 case 'd':
359 ++q;
360 vs = va_arg(args, int);
361 if (vs < 0) {
362 prom_print("-");
363 vs = -vs;
364 }
365 prom_print_dec(vs);
366 break;
367 case 'l':
368 ++q;
369 if (*q == 0)
370 break;
371 else if (*q == 'x') {
372 ++q;
373 v = va_arg(args, unsigned long);
374 prom_print_hex(v);
375 } else if (*q == 'u') { /* '%lu' */
376 ++q;
377 v = va_arg(args, unsigned long);
378 prom_print_dec(v);
379 } else if (*q == 'd') { /* %ld */
380 ++q;
381 vs = va_arg(args, long);
382 if (vs < 0) {
383 prom_print("-");
384 vs = -vs;
385 }
386 prom_print_dec(vs);
387 }
388 break;
389 }
390 }
391 }
392
393
394 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
395 unsigned long align)
396 {
397
398 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
399 /*
400 * Old OF requires we claim physical and virtual separately
401 * and then map explicitly (assuming virtual mode)
402 */
403 int ret;
404 prom_arg_t result;
405
406 ret = call_prom_ret("call-method", 5, 2, &result,
407 ADDR("claim"), prom.memory,
408 align, size, virt);
409 if (ret != 0 || result == -1)
410 return -1;
411 ret = call_prom_ret("call-method", 5, 2, &result,
412 ADDR("claim"), prom.mmumap,
413 align, size, virt);
414 if (ret != 0) {
415 call_prom("call-method", 4, 1, ADDR("release"),
416 prom.memory, size, virt);
417 return -1;
418 }
419 /* the 0x12 is M (coherence) + PP == read/write */
420 call_prom("call-method", 6, 1,
421 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
422 return virt;
423 }
424 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
425 (prom_arg_t)align);
426 }
427
428 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
429 {
430 prom_print(reason);
431 /* Do not call exit because it clears the screen on pmac
432 * it also causes some sort of double-fault on early pmacs */
433 if (of_platform == PLATFORM_POWERMAC)
434 asm("trap\n");
435
436 /* ToDo: should put up an SRC here on pSeries */
437 call_prom("exit", 0, 0);
438
439 for (;;) /* should never get here */
440 ;
441 }
442
443
444 static int __init prom_next_node(phandle *nodep)
445 {
446 phandle node;
447
448 if ((node = *nodep) != 0
449 && (*nodep = call_prom("child", 1, 1, node)) != 0)
450 return 1;
451 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
452 return 1;
453 for (;;) {
454 if ((node = call_prom("parent", 1, 1, node)) == 0)
455 return 0;
456 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
457 return 1;
458 }
459 }
460
461 static int inline prom_getprop(phandle node, const char *pname,
462 void *value, size_t valuelen)
463 {
464 return call_prom("getprop", 4, 1, node, ADDR(pname),
465 (u32)(unsigned long) value, (u32) valuelen);
466 }
467
468 static int inline prom_getproplen(phandle node, const char *pname)
469 {
470 return call_prom("getproplen", 2, 1, node, ADDR(pname));
471 }
472
473 static void add_string(char **str, const char *q)
474 {
475 char *p = *str;
476
477 while (*q)
478 *p++ = *q++;
479 *p++ = ' ';
480 *str = p;
481 }
482
483 static char *tohex(unsigned int x)
484 {
485 static char digits[] = "0123456789abcdef";
486 static char result[9];
487 int i;
488
489 result[8] = 0;
490 i = 8;
491 do {
492 --i;
493 result[i] = digits[x & 0xf];
494 x >>= 4;
495 } while (x != 0 && i > 0);
496 return &result[i];
497 }
498
499 static int __init prom_setprop(phandle node, const char *nodename,
500 const char *pname, void *value, size_t valuelen)
501 {
502 char cmd[256], *p;
503
504 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
505 return call_prom("setprop", 4, 1, node, ADDR(pname),
506 (u32)(unsigned long) value, (u32) valuelen);
507
508 /* gah... setprop doesn't work on longtrail, have to use interpret */
509 p = cmd;
510 add_string(&p, "dev");
511 add_string(&p, nodename);
512 add_string(&p, tohex((u32)(unsigned long) value));
513 add_string(&p, tohex(valuelen));
514 add_string(&p, tohex(ADDR(pname)));
515 add_string(&p, tohex(strlen(pname)));
516 add_string(&p, "property");
517 *p = 0;
518 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
519 }
520
521 /* We can't use the standard versions because of relocation headaches. */
522 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
523 || ('a' <= (c) && (c) <= 'f') \
524 || ('A' <= (c) && (c) <= 'F'))
525
526 #define isdigit(c) ('0' <= (c) && (c) <= '9')
527 #define islower(c) ('a' <= (c) && (c) <= 'z')
528 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
529
530 unsigned long prom_strtoul(const char *cp, const char **endp)
531 {
532 unsigned long result = 0, base = 10, value;
533
534 if (*cp == '0') {
535 base = 8;
536 cp++;
537 if (toupper(*cp) == 'X') {
538 cp++;
539 base = 16;
540 }
541 }
542
543 while (isxdigit(*cp) &&
544 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
545 result = result * base + value;
546 cp++;
547 }
548
549 if (endp)
550 *endp = cp;
551
552 return result;
553 }
554
555 unsigned long prom_memparse(const char *ptr, const char **retptr)
556 {
557 unsigned long ret = prom_strtoul(ptr, retptr);
558 int shift = 0;
559
560 /*
561 * We can't use a switch here because GCC *may* generate a
562 * jump table which won't work, because we're not running at
563 * the address we're linked at.
564 */
565 if ('G' == **retptr || 'g' == **retptr)
566 shift = 30;
567
568 if ('M' == **retptr || 'm' == **retptr)
569 shift = 20;
570
571 if ('K' == **retptr || 'k' == **retptr)
572 shift = 10;
573
574 if (shift) {
575 ret <<= shift;
576 (*retptr)++;
577 }
578
579 return ret;
580 }
581
582 /*
583 * Early parsing of the command line passed to the kernel, used for
584 * "mem=x" and the options that affect the iommu
585 */
586 static void __init early_cmdline_parse(void)
587 {
588 const char *opt;
589
590 char *p;
591 int l = 0;
592
593 prom_cmd_line[0] = 0;
594 p = prom_cmd_line;
595 if ((long)prom.chosen > 0)
596 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
597 #ifdef CONFIG_CMDLINE
598 if (l <= 0 || p[0] == '\0') /* dbl check */
599 strlcpy(prom_cmd_line,
600 CONFIG_CMDLINE, sizeof(prom_cmd_line));
601 #endif /* CONFIG_CMDLINE */
602 prom_printf("command line: %s\n", prom_cmd_line);
603
604 #ifdef CONFIG_PPC64
605 opt = strstr(prom_cmd_line, "iommu=");
606 if (opt) {
607 prom_printf("iommu opt is: %s\n", opt);
608 opt += 6;
609 while (*opt && *opt == ' ')
610 opt++;
611 if (!strncmp(opt, "off", 3))
612 prom_iommu_off = 1;
613 else if (!strncmp(opt, "force", 5))
614 prom_iommu_force_on = 1;
615 }
616 #endif
617 opt = strstr(prom_cmd_line, "mem=");
618 if (opt) {
619 opt += 4;
620 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
621 #ifdef CONFIG_PPC64
622 /* Align to 16 MB == size of ppc64 large page */
623 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
624 #endif
625 }
626 }
627
628 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
629 /*
630 * The architecture vector has an array of PVR mask/value pairs,
631 * followed by # option vectors - 1, followed by the option vectors.
632 *
633 * See prom.h for the definition of the bits specified in the
634 * architecture vector.
635 *
636 * Because the description vector contains a mix of byte and word
637 * values, we declare it as an unsigned char array, and use this
638 * macro to put word values in.
639 */
640 #define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
641 ((x) >> 8) & 0xff, (x) & 0xff
642
643 unsigned char ibm_architecture_vec[] = {
644 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
645 W(0xffff0000), W(0x003e0000), /* POWER6 */
646 W(0xffff0000), W(0x003f0000), /* POWER7 */
647 W(0xffff0000), W(0x004b0000), /* POWER8E */
648 W(0xffff0000), W(0x004d0000), /* POWER8 */
649 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
650 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
651 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
652 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
653 6 - 1, /* 6 option vectors */
654
655 /* option vector 1: processor architectures supported */
656 3 - 2, /* length */
657 0, /* don't ignore, don't halt */
658 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
659 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
660
661 /* option vector 2: Open Firmware options supported */
662 34 - 2, /* length */
663 OV2_REAL_MODE,
664 0, 0,
665 W(0xffffffff), /* real_base */
666 W(0xffffffff), /* real_size */
667 W(0xffffffff), /* virt_base */
668 W(0xffffffff), /* virt_size */
669 W(0xffffffff), /* load_base */
670 W(256), /* 256MB min RMA */
671 W(0xffffffff), /* full client load */
672 0, /* min RMA percentage of total RAM */
673 48, /* max log_2(hash table size) */
674
675 /* option vector 3: processor options supported */
676 3 - 2, /* length */
677 0, /* don't ignore, don't halt */
678 OV3_FP | OV3_VMX | OV3_DFP,
679
680 /* option vector 4: IBM PAPR implementation */
681 3 - 2, /* length */
682 0, /* don't halt */
683 OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
684
685 /* option vector 5: PAPR/OF options */
686 19 - 2, /* length */
687 0, /* don't ignore, don't halt */
688 OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
689 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
690 #ifdef CONFIG_PCI_MSI
691 /* PCIe/MSI support. Without MSI full PCIe is not supported */
692 OV5_FEAT(OV5_MSI),
693 #else
694 0,
695 #endif
696 0,
697 #ifdef CONFIG_PPC_SMLPAR
698 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
699 #else
700 0,
701 #endif
702 OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
703 0,
704 0,
705 0,
706 /* WARNING: The offset of the "number of cores" field below
707 * must match by the macro below. Update the definition if
708 * the structure layout changes.
709 */
710 #define IBM_ARCH_VEC_NRCORES_OFFSET 125
711 W(NR_CPUS), /* number of cores supported */
712 0,
713 0,
714 0,
715 0,
716 OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
717 OV5_FEAT(OV5_PFO_HW_842),
718 OV5_FEAT(OV5_SUB_PROCESSORS),
719 /* option vector 6: IBM PAPR hints */
720 4 - 2, /* length */
721 0,
722 0,
723 OV6_LINUX,
724
725 };
726
727 /* Old method - ELF header with PT_NOTE sections */
728 static struct fake_elf {
729 Elf32_Ehdr elfhdr;
730 Elf32_Phdr phdr[2];
731 struct chrpnote {
732 u32 namesz;
733 u32 descsz;
734 u32 type;
735 char name[8]; /* "PowerPC" */
736 struct chrpdesc {
737 u32 real_mode;
738 u32 real_base;
739 u32 real_size;
740 u32 virt_base;
741 u32 virt_size;
742 u32 load_base;
743 } chrpdesc;
744 } chrpnote;
745 struct rpanote {
746 u32 namesz;
747 u32 descsz;
748 u32 type;
749 char name[24]; /* "IBM,RPA-Client-Config" */
750 struct rpadesc {
751 u32 lpar_affinity;
752 u32 min_rmo_size;
753 u32 min_rmo_percent;
754 u32 max_pft_size;
755 u32 splpar;
756 u32 min_load;
757 u32 new_mem_def;
758 u32 ignore_me;
759 } rpadesc;
760 } rpanote;
761 } fake_elf = {
762 .elfhdr = {
763 .e_ident = { 0x7f, 'E', 'L', 'F',
764 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
765 .e_type = ET_EXEC, /* yeah right */
766 .e_machine = EM_PPC,
767 .e_version = EV_CURRENT,
768 .e_phoff = offsetof(struct fake_elf, phdr),
769 .e_phentsize = sizeof(Elf32_Phdr),
770 .e_phnum = 2
771 },
772 .phdr = {
773 [0] = {
774 .p_type = PT_NOTE,
775 .p_offset = offsetof(struct fake_elf, chrpnote),
776 .p_filesz = sizeof(struct chrpnote)
777 }, [1] = {
778 .p_type = PT_NOTE,
779 .p_offset = offsetof(struct fake_elf, rpanote),
780 .p_filesz = sizeof(struct rpanote)
781 }
782 },
783 .chrpnote = {
784 .namesz = sizeof("PowerPC"),
785 .descsz = sizeof(struct chrpdesc),
786 .type = 0x1275,
787 .name = "PowerPC",
788 .chrpdesc = {
789 .real_mode = ~0U, /* ~0 means "don't care" */
790 .real_base = ~0U,
791 .real_size = ~0U,
792 .virt_base = ~0U,
793 .virt_size = ~0U,
794 .load_base = ~0U
795 },
796 },
797 .rpanote = {
798 .namesz = sizeof("IBM,RPA-Client-Config"),
799 .descsz = sizeof(struct rpadesc),
800 .type = 0x12759999,
801 .name = "IBM,RPA-Client-Config",
802 .rpadesc = {
803 .lpar_affinity = 0,
804 .min_rmo_size = 64, /* in megabytes */
805 .min_rmo_percent = 0,
806 .max_pft_size = 48, /* 2^48 bytes max PFT size */
807 .splpar = 1,
808 .min_load = ~0U,
809 .new_mem_def = 0
810 }
811 }
812 };
813
814 static int __init prom_count_smt_threads(void)
815 {
816 phandle node;
817 char type[64];
818 unsigned int plen;
819
820 /* Pick up th first CPU node we can find */
821 for (node = 0; prom_next_node(&node); ) {
822 type[0] = 0;
823 prom_getprop(node, "device_type", type, sizeof(type));
824
825 if (strcmp(type, "cpu"))
826 continue;
827 /*
828 * There is an entry for each smt thread, each entry being
829 * 4 bytes long. All cpus should have the same number of
830 * smt threads, so return after finding the first.
831 */
832 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
833 if (plen == PROM_ERROR)
834 break;
835 plen >>= 2;
836 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
837
838 /* Sanity check */
839 if (plen < 1 || plen > 64) {
840 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
841 (unsigned long)plen);
842 return 1;
843 }
844 return plen;
845 }
846 prom_debug("No threads found, assuming 1 per core\n");
847
848 return 1;
849
850 }
851
852
853 static void __init prom_send_capabilities(void)
854 {
855 ihandle elfloader, root;
856 prom_arg_t ret;
857 u32 *cores;
858
859 root = call_prom("open", 1, 1, ADDR("/"));
860 if (root != 0) {
861 /* We need to tell the FW about the number of cores we support.
862 *
863 * To do that, we count the number of threads on the first core
864 * (we assume this is the same for all cores) and use it to
865 * divide NR_CPUS.
866 */
867 cores = (u32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
868 if (*cores != NR_CPUS) {
869 prom_printf("WARNING ! "
870 "ibm_architecture_vec structure inconsistent: %lu!\n",
871 *cores);
872 } else {
873 *cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
874 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
875 *cores, NR_CPUS);
876 }
877
878 /* try calling the ibm,client-architecture-support method */
879 prom_printf("Calling ibm,client-architecture-support...");
880 if (call_prom_ret("call-method", 3, 2, &ret,
881 ADDR("ibm,client-architecture-support"),
882 root,
883 ADDR(ibm_architecture_vec)) == 0) {
884 /* the call exists... */
885 if (ret)
886 prom_printf("\nWARNING: ibm,client-architecture"
887 "-support call FAILED!\n");
888 call_prom("close", 1, 0, root);
889 prom_printf(" done\n");
890 return;
891 }
892 call_prom("close", 1, 0, root);
893 prom_printf(" not implemented\n");
894 }
895
896 /* no ibm,client-architecture-support call, try the old way */
897 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
898 if (elfloader == 0) {
899 prom_printf("couldn't open /packages/elf-loader\n");
900 return;
901 }
902 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
903 elfloader, ADDR(&fake_elf));
904 call_prom("close", 1, 0, elfloader);
905 }
906 #endif
907
908 /*
909 * Memory allocation strategy... our layout is normally:
910 *
911 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
912 * rare cases, initrd might end up being before the kernel though.
913 * We assume this won't override the final kernel at 0, we have no
914 * provision to handle that in this version, but it should hopefully
915 * never happen.
916 *
917 * alloc_top is set to the top of RMO, eventually shrink down if the
918 * TCEs overlap
919 *
920 * alloc_bottom is set to the top of kernel/initrd
921 *
922 * from there, allocations are done this way : rtas is allocated
923 * topmost, and the device-tree is allocated from the bottom. We try
924 * to grow the device-tree allocation as we progress. If we can't,
925 * then we fail, we don't currently have a facility to restart
926 * elsewhere, but that shouldn't be necessary.
927 *
928 * Note that calls to reserve_mem have to be done explicitly, memory
929 * allocated with either alloc_up or alloc_down isn't automatically
930 * reserved.
931 */
932
933
934 /*
935 * Allocates memory in the RMO upward from the kernel/initrd
936 *
937 * When align is 0, this is a special case, it means to allocate in place
938 * at the current location of alloc_bottom or fail (that is basically
939 * extending the previous allocation). Used for the device-tree flattening
940 */
941 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
942 {
943 unsigned long base = alloc_bottom;
944 unsigned long addr = 0;
945
946 if (align)
947 base = _ALIGN_UP(base, align);
948 prom_debug("alloc_up(%x, %x)\n", size, align);
949 if (ram_top == 0)
950 prom_panic("alloc_up() called with mem not initialized\n");
951
952 if (align)
953 base = _ALIGN_UP(alloc_bottom, align);
954 else
955 base = alloc_bottom;
956
957 for(; (base + size) <= alloc_top;
958 base = _ALIGN_UP(base + 0x100000, align)) {
959 prom_debug(" trying: 0x%x\n\r", base);
960 addr = (unsigned long)prom_claim(base, size, 0);
961 if (addr != PROM_ERROR && addr != 0)
962 break;
963 addr = 0;
964 if (align == 0)
965 break;
966 }
967 if (addr == 0)
968 return 0;
969 alloc_bottom = addr + size;
970
971 prom_debug(" -> %x\n", addr);
972 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
973 prom_debug(" alloc_top : %x\n", alloc_top);
974 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
975 prom_debug(" rmo_top : %x\n", rmo_top);
976 prom_debug(" ram_top : %x\n", ram_top);
977
978 return addr;
979 }
980
981 /*
982 * Allocates memory downward, either from top of RMO, or if highmem
983 * is set, from the top of RAM. Note that this one doesn't handle
984 * failures. It does claim memory if highmem is not set.
985 */
986 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
987 int highmem)
988 {
989 unsigned long base, addr = 0;
990
991 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
992 highmem ? "(high)" : "(low)");
993 if (ram_top == 0)
994 prom_panic("alloc_down() called with mem not initialized\n");
995
996 if (highmem) {
997 /* Carve out storage for the TCE table. */
998 addr = _ALIGN_DOWN(alloc_top_high - size, align);
999 if (addr <= alloc_bottom)
1000 return 0;
1001 /* Will we bump into the RMO ? If yes, check out that we
1002 * didn't overlap existing allocations there, if we did,
1003 * we are dead, we must be the first in town !
1004 */
1005 if (addr < rmo_top) {
1006 /* Good, we are first */
1007 if (alloc_top == rmo_top)
1008 alloc_top = rmo_top = addr;
1009 else
1010 return 0;
1011 }
1012 alloc_top_high = addr;
1013 goto bail;
1014 }
1015
1016 base = _ALIGN_DOWN(alloc_top - size, align);
1017 for (; base > alloc_bottom;
1018 base = _ALIGN_DOWN(base - 0x100000, align)) {
1019 prom_debug(" trying: 0x%x\n\r", base);
1020 addr = (unsigned long)prom_claim(base, size, 0);
1021 if (addr != PROM_ERROR && addr != 0)
1022 break;
1023 addr = 0;
1024 }
1025 if (addr == 0)
1026 return 0;
1027 alloc_top = addr;
1028
1029 bail:
1030 prom_debug(" -> %x\n", addr);
1031 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1032 prom_debug(" alloc_top : %x\n", alloc_top);
1033 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1034 prom_debug(" rmo_top : %x\n", rmo_top);
1035 prom_debug(" ram_top : %x\n", ram_top);
1036
1037 return addr;
1038 }
1039
1040 /*
1041 * Parse a "reg" cell
1042 */
1043 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1044 {
1045 cell_t *p = *cellp;
1046 unsigned long r = 0;
1047
1048 /* Ignore more than 2 cells */
1049 while (s > sizeof(unsigned long) / 4) {
1050 p++;
1051 s--;
1052 }
1053 r = *p++;
1054 #ifdef CONFIG_PPC64
1055 if (s > 1) {
1056 r <<= 32;
1057 r |= *(p++);
1058 }
1059 #endif
1060 *cellp = p;
1061 return r;
1062 }
1063
1064 /*
1065 * Very dumb function for adding to the memory reserve list, but
1066 * we don't need anything smarter at this point
1067 *
1068 * XXX Eventually check for collisions. They should NEVER happen.
1069 * If problems seem to show up, it would be a good start to track
1070 * them down.
1071 */
1072 static void __init reserve_mem(u64 base, u64 size)
1073 {
1074 u64 top = base + size;
1075 unsigned long cnt = mem_reserve_cnt;
1076
1077 if (size == 0)
1078 return;
1079
1080 /* We need to always keep one empty entry so that we
1081 * have our terminator with "size" set to 0 since we are
1082 * dumb and just copy this entire array to the boot params
1083 */
1084 base = _ALIGN_DOWN(base, PAGE_SIZE);
1085 top = _ALIGN_UP(top, PAGE_SIZE);
1086 size = top - base;
1087
1088 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1089 prom_panic("Memory reserve map exhausted !\n");
1090 mem_reserve_map[cnt].base = base;
1091 mem_reserve_map[cnt].size = size;
1092 mem_reserve_cnt = cnt + 1;
1093 }
1094
1095 /*
1096 * Initialize memory allocation mechanism, parse "memory" nodes and
1097 * obtain that way the top of memory and RMO to setup out local allocator
1098 */
1099 static void __init prom_init_mem(void)
1100 {
1101 phandle node;
1102 char *path, type[64];
1103 unsigned int plen;
1104 cell_t *p, *endp;
1105 u32 rac, rsc;
1106
1107 /*
1108 * We iterate the memory nodes to find
1109 * 1) top of RMO (first node)
1110 * 2) top of memory
1111 */
1112 rac = 2;
1113 prom_getprop(prom.root, "#address-cells", &rac, sizeof(rac));
1114 rsc = 1;
1115 prom_getprop(prom.root, "#size-cells", &rsc, sizeof(rsc));
1116 prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
1117 prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
1118
1119 prom_debug("scanning memory:\n");
1120 path = prom_scratch;
1121
1122 for (node = 0; prom_next_node(&node); ) {
1123 type[0] = 0;
1124 prom_getprop(node, "device_type", type, sizeof(type));
1125
1126 if (type[0] == 0) {
1127 /*
1128 * CHRP Longtrail machines have no device_type
1129 * on the memory node, so check the name instead...
1130 */
1131 prom_getprop(node, "name", type, sizeof(type));
1132 }
1133 if (strcmp(type, "memory"))
1134 continue;
1135
1136 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1137 if (plen > sizeof(regbuf)) {
1138 prom_printf("memory node too large for buffer !\n");
1139 plen = sizeof(regbuf);
1140 }
1141 p = regbuf;
1142 endp = p + (plen / sizeof(cell_t));
1143
1144 #ifdef DEBUG_PROM
1145 memset(path, 0, PROM_SCRATCH_SIZE);
1146 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1147 prom_debug(" node %s :\n", path);
1148 #endif /* DEBUG_PROM */
1149
1150 while ((endp - p) >= (rac + rsc)) {
1151 unsigned long base, size;
1152
1153 base = prom_next_cell(rac, &p);
1154 size = prom_next_cell(rsc, &p);
1155
1156 if (size == 0)
1157 continue;
1158 prom_debug(" %x %x\n", base, size);
1159 if (base == 0 && (of_platform & PLATFORM_LPAR))
1160 rmo_top = size;
1161 if ((base + size) > ram_top)
1162 ram_top = base + size;
1163 }
1164 }
1165
1166 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1167
1168 /*
1169 * If prom_memory_limit is set we reduce the upper limits *except* for
1170 * alloc_top_high. This must be the real top of RAM so we can put
1171 * TCE's up there.
1172 */
1173
1174 alloc_top_high = ram_top;
1175
1176 if (prom_memory_limit) {
1177 if (prom_memory_limit <= alloc_bottom) {
1178 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1179 prom_memory_limit);
1180 prom_memory_limit = 0;
1181 } else if (prom_memory_limit >= ram_top) {
1182 prom_printf("Ignoring mem=%x >= ram_top.\n",
1183 prom_memory_limit);
1184 prom_memory_limit = 0;
1185 } else {
1186 ram_top = prom_memory_limit;
1187 rmo_top = min(rmo_top, prom_memory_limit);
1188 }
1189 }
1190
1191 /*
1192 * Setup our top alloc point, that is top of RMO or top of
1193 * segment 0 when running non-LPAR.
1194 * Some RS64 machines have buggy firmware where claims up at
1195 * 1GB fail. Cap at 768MB as a workaround.
1196 * Since 768MB is plenty of room, and we need to cap to something
1197 * reasonable on 32-bit, cap at 768MB on all machines.
1198 */
1199 if (!rmo_top)
1200 rmo_top = ram_top;
1201 rmo_top = min(0x30000000ul, rmo_top);
1202 alloc_top = rmo_top;
1203 alloc_top_high = ram_top;
1204
1205 /*
1206 * Check if we have an initrd after the kernel but still inside
1207 * the RMO. If we do move our bottom point to after it.
1208 */
1209 if (prom_initrd_start &&
1210 prom_initrd_start < rmo_top &&
1211 prom_initrd_end > alloc_bottom)
1212 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1213
1214 prom_printf("memory layout at init:\n");
1215 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1216 prom_printf(" alloc_bottom : %x\n", alloc_bottom);
1217 prom_printf(" alloc_top : %x\n", alloc_top);
1218 prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
1219 prom_printf(" rmo_top : %x\n", rmo_top);
1220 prom_printf(" ram_top : %x\n", ram_top);
1221 }
1222
1223 static void __init prom_close_stdin(void)
1224 {
1225 ihandle val;
1226
1227 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0)
1228 call_prom("close", 1, 0, val);
1229 }
1230
1231 #ifdef CONFIG_PPC_POWERNV
1232
1233 static u64 __initdata prom_opal_size;
1234 static u64 __initdata prom_opal_align;
1235 static int __initdata prom_rtas_start_cpu;
1236 static u64 __initdata prom_rtas_data;
1237 static u64 __initdata prom_rtas_entry;
1238
1239 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1240 static u64 __initdata prom_opal_base;
1241 static u64 __initdata prom_opal_entry;
1242 #endif
1243
1244 /* XXX Don't change this structure without updating opal-takeover.S */
1245 static struct opal_secondary_data {
1246 s64 ack; /* 0 */
1247 u64 go; /* 8 */
1248 struct opal_takeover_args args; /* 16 */
1249 } opal_secondary_data;
1250
1251 extern char opal_secondary_entry;
1252
1253 static void __init prom_query_opal(void)
1254 {
1255 long rc;
1256
1257 /* We must not query for OPAL presence on a machine that
1258 * supports TNK takeover (970 blades), as this uses the same
1259 * h-call with different arguments and will crash
1260 */
1261 if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
1262 ADDR("/tnk-memory-map")))) {
1263 prom_printf("TNK takeover detected, skipping OPAL check\n");
1264 return;
1265 }
1266
1267 prom_printf("Querying for OPAL presence... ");
1268 rc = opal_query_takeover(&prom_opal_size,
1269 &prom_opal_align);
1270 prom_debug("(rc = %ld) ", rc);
1271 if (rc != 0) {
1272 prom_printf("not there.\n");
1273 return;
1274 }
1275 of_platform = PLATFORM_OPAL;
1276 prom_printf(" there !\n");
1277 prom_debug(" opal_size = 0x%lx\n", prom_opal_size);
1278 prom_debug(" opal_align = 0x%lx\n", prom_opal_align);
1279 if (prom_opal_align < 0x10000)
1280 prom_opal_align = 0x10000;
1281 }
1282
1283 static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...)
1284 {
1285 struct rtas_args rtas_args;
1286 va_list list;
1287 int i;
1288
1289 rtas_args.token = token;
1290 rtas_args.nargs = nargs;
1291 rtas_args.nret = nret;
1292 rtas_args.rets = (rtas_arg_t *)&(rtas_args.args[nargs]);
1293 va_start(list, outputs);
1294 for (i = 0; i < nargs; ++i)
1295 rtas_args.args[i] = va_arg(list, rtas_arg_t);
1296 va_end(list);
1297
1298 for (i = 0; i < nret; ++i)
1299 rtas_args.rets[i] = 0;
1300
1301 opal_enter_rtas(&rtas_args, prom_rtas_data,
1302 prom_rtas_entry);
1303
1304 if (nret > 1 && outputs != NULL)
1305 for (i = 0; i < nret-1; ++i)
1306 outputs[i] = rtas_args.rets[i+1];
1307 return (nret > 0)? rtas_args.rets[0]: 0;
1308 }
1309
1310 static void __init prom_opal_hold_cpus(void)
1311 {
1312 int i, cnt, cpu, rc;
1313 long j;
1314 phandle node;
1315 char type[64];
1316 u32 servers[8];
1317 void *entry = (unsigned long *)&opal_secondary_entry;
1318 struct opal_secondary_data *data = &opal_secondary_data;
1319
1320 prom_debug("prom_opal_hold_cpus: start...\n");
1321 prom_debug(" - entry = 0x%x\n", entry);
1322 prom_debug(" - data = 0x%x\n", data);
1323
1324 data->ack = -1;
1325 data->go = 0;
1326
1327 /* look for cpus */
1328 for (node = 0; prom_next_node(&node); ) {
1329 type[0] = 0;
1330 prom_getprop(node, "device_type", type, sizeof(type));
1331 if (strcmp(type, "cpu") != 0)
1332 continue;
1333
1334 /* Skip non-configured cpus. */
1335 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1336 if (strcmp(type, "okay") != 0)
1337 continue;
1338
1339 cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
1340 sizeof(servers));
1341 if (cnt == PROM_ERROR)
1342 break;
1343 cnt >>= 2;
1344 for (i = 0; i < cnt; i++) {
1345 cpu = servers[i];
1346 prom_debug("CPU %d ... ", cpu);
1347 if (cpu == prom.cpu) {
1348 prom_debug("booted !\n");
1349 continue;
1350 }
1351 prom_debug("starting ... ");
1352
1353 /* Init the acknowledge var which will be reset by
1354 * the secondary cpu when it awakens from its OF
1355 * spinloop.
1356 */
1357 data->ack = -1;
1358 rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
1359 NULL, cpu, entry, data);
1360 prom_debug("rtas rc=%d ...", rc);
1361
1362 for (j = 0; j < 100000000 && data->ack == -1; j++) {
1363 HMT_low();
1364 mb();
1365 }
1366 HMT_medium();
1367 if (data->ack != -1)
1368 prom_debug("done, PIR=0x%x\n", data->ack);
1369 else
1370 prom_debug("timeout !\n");
1371 }
1372 }
1373 prom_debug("prom_opal_hold_cpus: end...\n");
1374 }
1375
1376 static void __init prom_opal_takeover(void)
1377 {
1378 struct opal_secondary_data *data = &opal_secondary_data;
1379 struct opal_takeover_args *args = &data->args;
1380 u64 align = prom_opal_align;
1381 u64 top_addr, opal_addr;
1382
1383 args->k_image = (u64)_stext;
1384 args->k_size = _end - _stext;
1385 args->k_entry = 0;
1386 args->k_entry2 = 0x60;
1387
1388 top_addr = _ALIGN_UP(args->k_size, align);
1389
1390 if (prom_initrd_start != 0) {
1391 args->rd_image = prom_initrd_start;
1392 args->rd_size = prom_initrd_end - args->rd_image;
1393 args->rd_loc = top_addr;
1394 top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
1395 }
1396
1397 /* Pickup an address for the HAL. We want to go really high
1398 * up to avoid problem with future kexecs. On the other hand
1399 * we don't want to be all over the TCEs on P5IOC2 machines
1400 * which are going to be up there too. We assume the machine
1401 * has plenty of memory, and we ask for the HAL for now to
1402 * be just below the 1G point, or above the initrd
1403 */
1404 opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
1405 if (opal_addr < top_addr)
1406 opal_addr = top_addr;
1407 args->hal_addr = opal_addr;
1408
1409 /* Copy the command line to the kernel image */
1410 strlcpy(boot_command_line, prom_cmd_line,
1411 COMMAND_LINE_SIZE);
1412
1413 prom_debug(" k_image = 0x%lx\n", args->k_image);
1414 prom_debug(" k_size = 0x%lx\n", args->k_size);
1415 prom_debug(" k_entry = 0x%lx\n", args->k_entry);
1416 prom_debug(" k_entry2 = 0x%lx\n", args->k_entry2);
1417 prom_debug(" hal_addr = 0x%lx\n", args->hal_addr);
1418 prom_debug(" rd_image = 0x%lx\n", args->rd_image);
1419 prom_debug(" rd_size = 0x%lx\n", args->rd_size);
1420 prom_debug(" rd_loc = 0x%lx\n", args->rd_loc);
1421 prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
1422 prom_close_stdin();
1423 mb();
1424 data->go = 1;
1425 for (;;)
1426 opal_do_takeover(args);
1427 }
1428
1429 /*
1430 * Allocate room for and instantiate OPAL
1431 */
1432 static void __init prom_instantiate_opal(void)
1433 {
1434 phandle opal_node;
1435 ihandle opal_inst;
1436 u64 base, entry;
1437 u64 size = 0, align = 0x10000;
1438 u32 rets[2];
1439
1440 prom_debug("prom_instantiate_opal: start...\n");
1441
1442 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1443 prom_debug("opal_node: %x\n", opal_node);
1444 if (!PHANDLE_VALID(opal_node))
1445 return;
1446
1447 prom_getprop(opal_node, "opal-runtime-size", &size, sizeof(size));
1448 if (size == 0)
1449 return;
1450 prom_getprop(opal_node, "opal-runtime-alignment", &align,
1451 sizeof(align));
1452
1453 base = alloc_down(size, align, 0);
1454 if (base == 0) {
1455 prom_printf("OPAL allocation failed !\n");
1456 return;
1457 }
1458
1459 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1460 if (!IHANDLE_VALID(opal_inst)) {
1461 prom_printf("opening opal package failed (%x)\n", opal_inst);
1462 return;
1463 }
1464
1465 prom_printf("instantiating opal at 0x%x...", base);
1466
1467 if (call_prom_ret("call-method", 4, 3, rets,
1468 ADDR("load-opal-runtime"),
1469 opal_inst,
1470 base >> 32, base & 0xffffffff) != 0
1471 || (rets[0] == 0 && rets[1] == 0)) {
1472 prom_printf(" failed\n");
1473 return;
1474 }
1475 entry = (((u64)rets[0]) << 32) | rets[1];
1476
1477 prom_printf(" done\n");
1478
1479 reserve_mem(base, size);
1480
1481 prom_debug("opal base = 0x%x\n", base);
1482 prom_debug("opal align = 0x%x\n", align);
1483 prom_debug("opal entry = 0x%x\n", entry);
1484 prom_debug("opal size = 0x%x\n", (long)size);
1485
1486 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1487 &base, sizeof(base));
1488 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1489 &entry, sizeof(entry));
1490
1491 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1492 prom_opal_base = base;
1493 prom_opal_entry = entry;
1494 #endif
1495 prom_debug("prom_instantiate_opal: end...\n");
1496 }
1497
1498 #endif /* CONFIG_PPC_POWERNV */
1499
1500 /*
1501 * Allocate room for and instantiate RTAS
1502 */
1503 static void __init prom_instantiate_rtas(void)
1504 {
1505 phandle rtas_node;
1506 ihandle rtas_inst;
1507 u32 base, entry = 0;
1508 u32 size = 0;
1509
1510 prom_debug("prom_instantiate_rtas: start...\n");
1511
1512 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1513 prom_debug("rtas_node: %x\n", rtas_node);
1514 if (!PHANDLE_VALID(rtas_node))
1515 return;
1516
1517 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
1518 if (size == 0)
1519 return;
1520
1521 base = alloc_down(size, PAGE_SIZE, 0);
1522 if (base == 0)
1523 prom_panic("Could not allocate memory for RTAS\n");
1524
1525 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1526 if (!IHANDLE_VALID(rtas_inst)) {
1527 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1528 return;
1529 }
1530
1531 prom_printf("instantiating rtas at 0x%x...", base);
1532
1533 if (call_prom_ret("call-method", 3, 2, &entry,
1534 ADDR("instantiate-rtas"),
1535 rtas_inst, base) != 0
1536 || entry == 0) {
1537 prom_printf(" failed\n");
1538 return;
1539 }
1540 prom_printf(" done\n");
1541
1542 reserve_mem(base, size);
1543
1544 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1545 &base, sizeof(base));
1546 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1547 &entry, sizeof(entry));
1548
1549 #ifdef CONFIG_PPC_POWERNV
1550 /* PowerVN takeover hack */
1551 prom_rtas_data = base;
1552 prom_rtas_entry = entry;
1553 prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
1554 #endif
1555 prom_debug("rtas base = 0x%x\n", base);
1556 prom_debug("rtas entry = 0x%x\n", entry);
1557 prom_debug("rtas size = 0x%x\n", (long)size);
1558
1559 prom_debug("prom_instantiate_rtas: end...\n");
1560 }
1561
1562 #ifdef CONFIG_PPC64
1563 /*
1564 * Allocate room for and instantiate Stored Measurement Log (SML)
1565 */
1566 static void __init prom_instantiate_sml(void)
1567 {
1568 phandle ibmvtpm_node;
1569 ihandle ibmvtpm_inst;
1570 u32 entry = 0, size = 0;
1571 u64 base;
1572
1573 prom_debug("prom_instantiate_sml: start...\n");
1574
1575 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
1576 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1577 if (!PHANDLE_VALID(ibmvtpm_node))
1578 return;
1579
1580 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
1581 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1582 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1583 return;
1584 }
1585
1586 if (call_prom_ret("call-method", 2, 2, &size,
1587 ADDR("sml-get-handover-size"),
1588 ibmvtpm_inst) != 0 || size == 0) {
1589 prom_printf("SML get handover size failed\n");
1590 return;
1591 }
1592
1593 base = alloc_down(size, PAGE_SIZE, 0);
1594 if (base == 0)
1595 prom_panic("Could not allocate memory for sml\n");
1596
1597 prom_printf("instantiating sml at 0x%x...", base);
1598
1599 if (call_prom_ret("call-method", 4, 2, &entry,
1600 ADDR("sml-handover"),
1601 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1602 prom_printf("SML handover failed\n");
1603 return;
1604 }
1605 prom_printf(" done\n");
1606
1607 reserve_mem(base, size);
1608
1609 prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
1610 &base, sizeof(base));
1611 prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
1612 &size, sizeof(size));
1613
1614 prom_debug("sml base = 0x%x\n", base);
1615 prom_debug("sml size = 0x%x\n", (long)size);
1616
1617 prom_debug("prom_instantiate_sml: end...\n");
1618 }
1619
1620 /*
1621 * Allocate room for and initialize TCE tables
1622 */
1623 static void __init prom_initialize_tce_table(void)
1624 {
1625 phandle node;
1626 ihandle phb_node;
1627 char compatible[64], type[64], model[64];
1628 char *path = prom_scratch;
1629 u64 base, align;
1630 u32 minalign, minsize;
1631 u64 tce_entry, *tce_entryp;
1632 u64 local_alloc_top, local_alloc_bottom;
1633 u64 i;
1634
1635 if (prom_iommu_off)
1636 return;
1637
1638 prom_debug("starting prom_initialize_tce_table\n");
1639
1640 /* Cache current top of allocs so we reserve a single block */
1641 local_alloc_top = alloc_top_high;
1642 local_alloc_bottom = local_alloc_top;
1643
1644 /* Search all nodes looking for PHBs. */
1645 for (node = 0; prom_next_node(&node); ) {
1646 compatible[0] = 0;
1647 type[0] = 0;
1648 model[0] = 0;
1649 prom_getprop(node, "compatible",
1650 compatible, sizeof(compatible));
1651 prom_getprop(node, "device_type", type, sizeof(type));
1652 prom_getprop(node, "model", model, sizeof(model));
1653
1654 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1655 continue;
1656
1657 /* Keep the old logic intact to avoid regression. */
1658 if (compatible[0] != 0) {
1659 if ((strstr(compatible, "python") == NULL) &&
1660 (strstr(compatible, "Speedwagon") == NULL) &&
1661 (strstr(compatible, "Winnipeg") == NULL))
1662 continue;
1663 } else if (model[0] != 0) {
1664 if ((strstr(model, "ython") == NULL) &&
1665 (strstr(model, "peedwagon") == NULL) &&
1666 (strstr(model, "innipeg") == NULL))
1667 continue;
1668 }
1669
1670 if (prom_getprop(node, "tce-table-minalign", &minalign,
1671 sizeof(minalign)) == PROM_ERROR)
1672 minalign = 0;
1673 if (prom_getprop(node, "tce-table-minsize", &minsize,
1674 sizeof(minsize)) == PROM_ERROR)
1675 minsize = 4UL << 20;
1676
1677 /*
1678 * Even though we read what OF wants, we just set the table
1679 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1680 * By doing this, we avoid the pitfalls of trying to DMA to
1681 * MMIO space and the DMA alias hole.
1682 *
1683 * On POWER4, firmware sets the TCE region by assuming
1684 * each TCE table is 8MB. Using this memory for anything
1685 * else will impact performance, so we always allocate 8MB.
1686 * Anton
1687 */
1688 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1689 minsize = 8UL << 20;
1690 else
1691 minsize = 4UL << 20;
1692
1693 /* Align to the greater of the align or size */
1694 align = max(minalign, minsize);
1695 base = alloc_down(minsize, align, 1);
1696 if (base == 0)
1697 prom_panic("ERROR, cannot find space for TCE table.\n");
1698 if (base < local_alloc_bottom)
1699 local_alloc_bottom = base;
1700
1701 /* It seems OF doesn't null-terminate the path :-( */
1702 memset(path, 0, PROM_SCRATCH_SIZE);
1703 /* Call OF to setup the TCE hardware */
1704 if (call_prom("package-to-path", 3, 1, node,
1705 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1706 prom_printf("package-to-path failed\n");
1707 }
1708
1709 /* Save away the TCE table attributes for later use. */
1710 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1711 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1712
1713 prom_debug("TCE table: %s\n", path);
1714 prom_debug("\tnode = 0x%x\n", node);
1715 prom_debug("\tbase = 0x%x\n", base);
1716 prom_debug("\tsize = 0x%x\n", minsize);
1717
1718 /* Initialize the table to have a one-to-one mapping
1719 * over the allocated size.
1720 */
1721 tce_entryp = (u64 *)base;
1722 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1723 tce_entry = (i << PAGE_SHIFT);
1724 tce_entry |= 0x3;
1725 *tce_entryp = tce_entry;
1726 }
1727
1728 prom_printf("opening PHB %s", path);
1729 phb_node = call_prom("open", 1, 1, path);
1730 if (phb_node == 0)
1731 prom_printf("... failed\n");
1732 else
1733 prom_printf("... done\n");
1734
1735 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1736 phb_node, -1, minsize,
1737 (u32) base, (u32) (base >> 32));
1738 call_prom("close", 1, 0, phb_node);
1739 }
1740
1741 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1742
1743 /* These are only really needed if there is a memory limit in
1744 * effect, but we don't know so export them always. */
1745 prom_tce_alloc_start = local_alloc_bottom;
1746 prom_tce_alloc_end = local_alloc_top;
1747
1748 /* Flag the first invalid entry */
1749 prom_debug("ending prom_initialize_tce_table\n");
1750 }
1751 #endif
1752
1753 /*
1754 * With CHRP SMP we need to use the OF to start the other processors.
1755 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1756 * so we have to put the processors into a holding pattern controlled
1757 * by the kernel (not OF) before we destroy the OF.
1758 *
1759 * This uses a chunk of low memory, puts some holding pattern
1760 * code there and sends the other processors off to there until
1761 * smp_boot_cpus tells them to do something. The holding pattern
1762 * checks that address until its cpu # is there, when it is that
1763 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1764 * of setting those values.
1765 *
1766 * We also use physical address 0x4 here to tell when a cpu
1767 * is in its holding pattern code.
1768 *
1769 * -- Cort
1770 */
1771 /*
1772 * We want to reference the copy of __secondary_hold_* in the
1773 * 0 - 0x100 address range
1774 */
1775 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1776
1777 static void __init prom_hold_cpus(void)
1778 {
1779 unsigned long i;
1780 unsigned int reg;
1781 phandle node;
1782 char type[64];
1783 unsigned long *spinloop
1784 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1785 unsigned long *acknowledge
1786 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1787 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1788
1789 prom_debug("prom_hold_cpus: start...\n");
1790 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1791 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1792 prom_debug(" 1) acknowledge = 0x%x\n",
1793 (unsigned long)acknowledge);
1794 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1795 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1796
1797 /* Set the common spinloop variable, so all of the secondary cpus
1798 * will block when they are awakened from their OF spinloop.
1799 * This must occur for both SMP and non SMP kernels, since OF will
1800 * be trashed when we move the kernel.
1801 */
1802 *spinloop = 0;
1803
1804 /* look for cpus */
1805 for (node = 0; prom_next_node(&node); ) {
1806 type[0] = 0;
1807 prom_getprop(node, "device_type", type, sizeof(type));
1808 if (strcmp(type, "cpu") != 0)
1809 continue;
1810
1811 /* Skip non-configured cpus. */
1812 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1813 if (strcmp(type, "okay") != 0)
1814 continue;
1815
1816 reg = -1;
1817 prom_getprop(node, "reg", &reg, sizeof(reg));
1818
1819 prom_debug("cpu hw idx = %lu\n", reg);
1820
1821 /* Init the acknowledge var which will be reset by
1822 * the secondary cpu when it awakens from its OF
1823 * spinloop.
1824 */
1825 *acknowledge = (unsigned long)-1;
1826
1827 if (reg != prom.cpu) {
1828 /* Primary Thread of non-boot cpu or any thread */
1829 prom_printf("starting cpu hw idx %lu... ", reg);
1830 call_prom("start-cpu", 3, 0, node,
1831 secondary_hold, reg);
1832
1833 for (i = 0; (i < 100000000) &&
1834 (*acknowledge == ((unsigned long)-1)); i++ )
1835 mb();
1836
1837 if (*acknowledge == reg)
1838 prom_printf("done\n");
1839 else
1840 prom_printf("failed: %x\n", *acknowledge);
1841 }
1842 #ifdef CONFIG_SMP
1843 else
1844 prom_printf("boot cpu hw idx %lu\n", reg);
1845 #endif /* CONFIG_SMP */
1846 }
1847
1848 prom_debug("prom_hold_cpus: end...\n");
1849 }
1850
1851
1852 static void __init prom_init_client_services(unsigned long pp)
1853 {
1854 /* Get a handle to the prom entry point before anything else */
1855 prom_entry = pp;
1856
1857 /* get a handle for the stdout device */
1858 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1859 if (!PHANDLE_VALID(prom.chosen))
1860 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1861
1862 /* get device tree root */
1863 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1864 if (!PHANDLE_VALID(prom.root))
1865 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1866
1867 prom.mmumap = 0;
1868 }
1869
1870 #ifdef CONFIG_PPC32
1871 /*
1872 * For really old powermacs, we need to map things we claim.
1873 * For that, we need the ihandle of the mmu.
1874 * Also, on the longtrail, we need to work around other bugs.
1875 */
1876 static void __init prom_find_mmu(void)
1877 {
1878 phandle oprom;
1879 char version[64];
1880
1881 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1882 if (!PHANDLE_VALID(oprom))
1883 return;
1884 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1885 return;
1886 version[sizeof(version) - 1] = 0;
1887 /* XXX might need to add other versions here */
1888 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1889 of_workarounds = OF_WA_CLAIM;
1890 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1891 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1892 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1893 } else
1894 return;
1895 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
1896 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
1897 sizeof(prom.mmumap));
1898 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
1899 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1900 }
1901 #else
1902 #define prom_find_mmu()
1903 #endif
1904
1905 static void __init prom_init_stdout(void)
1906 {
1907 char *path = of_stdout_device;
1908 char type[16];
1909 u32 val;
1910
1911 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
1912 prom_panic("cannot find stdout");
1913
1914 prom.stdout = val;
1915
1916 /* Get the full OF pathname of the stdout device */
1917 memset(path, 0, 256);
1918 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1919 val = call_prom("instance-to-package", 1, 1, prom.stdout);
1920 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1921 &val, sizeof(val));
1922 prom_printf("OF stdout device is: %s\n", of_stdout_device);
1923 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
1924 path, strlen(path) + 1);
1925
1926 /* If it's a display, note it */
1927 memset(type, 0, sizeof(type));
1928 prom_getprop(val, "device_type", type, sizeof(type));
1929 if (strcmp(type, "display") == 0)
1930 prom_setprop(val, path, "linux,boot-display", NULL, 0);
1931 }
1932
1933 static int __init prom_find_machine_type(void)
1934 {
1935 char compat[256];
1936 int len, i = 0;
1937 #ifdef CONFIG_PPC64
1938 phandle rtas;
1939 int x;
1940 #endif
1941
1942 /* Look for a PowerMac or a Cell */
1943 len = prom_getprop(prom.root, "compatible",
1944 compat, sizeof(compat)-1);
1945 if (len > 0) {
1946 compat[len] = 0;
1947 while (i < len) {
1948 char *p = &compat[i];
1949 int sl = strlen(p);
1950 if (sl == 0)
1951 break;
1952 if (strstr(p, "Power Macintosh") ||
1953 strstr(p, "MacRISC"))
1954 return PLATFORM_POWERMAC;
1955 #ifdef CONFIG_PPC64
1956 /* We must make sure we don't detect the IBM Cell
1957 * blades as pSeries due to some firmware issues,
1958 * so we do it here.
1959 */
1960 if (strstr(p, "IBM,CBEA") ||
1961 strstr(p, "IBM,CPBW-1.0"))
1962 return PLATFORM_GENERIC;
1963 #endif /* CONFIG_PPC64 */
1964 i += sl + 1;
1965 }
1966 }
1967 #ifdef CONFIG_PPC64
1968 /* Try to detect OPAL */
1969 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
1970 return PLATFORM_OPAL;
1971
1972 /* Try to figure out if it's an IBM pSeries or any other
1973 * PAPR compliant platform. We assume it is if :
1974 * - /device_type is "chrp" (please, do NOT use that for future
1975 * non-IBM designs !
1976 * - it has /rtas
1977 */
1978 len = prom_getprop(prom.root, "device_type",
1979 compat, sizeof(compat)-1);
1980 if (len <= 0)
1981 return PLATFORM_GENERIC;
1982 if (strcmp(compat, "chrp"))
1983 return PLATFORM_GENERIC;
1984
1985 /* Default to pSeries. We need to know if we are running LPAR */
1986 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1987 if (!PHANDLE_VALID(rtas))
1988 return PLATFORM_GENERIC;
1989 x = prom_getproplen(rtas, "ibm,hypertas-functions");
1990 if (x != PROM_ERROR) {
1991 prom_debug("Hypertas detected, assuming LPAR !\n");
1992 return PLATFORM_PSERIES_LPAR;
1993 }
1994 return PLATFORM_PSERIES;
1995 #else
1996 return PLATFORM_GENERIC;
1997 #endif
1998 }
1999
2000 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2001 {
2002 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2003 }
2004
2005 /*
2006 * If we have a display that we don't know how to drive,
2007 * we will want to try to execute OF's open method for it
2008 * later. However, OF will probably fall over if we do that
2009 * we've taken over the MMU.
2010 * So we check whether we will need to open the display,
2011 * and if so, open it now.
2012 */
2013 static void __init prom_check_displays(void)
2014 {
2015 char type[16], *path;
2016 phandle node;
2017 ihandle ih;
2018 int i;
2019
2020 static unsigned char default_colors[] = {
2021 0x00, 0x00, 0x00,
2022 0x00, 0x00, 0xaa,
2023 0x00, 0xaa, 0x00,
2024 0x00, 0xaa, 0xaa,
2025 0xaa, 0x00, 0x00,
2026 0xaa, 0x00, 0xaa,
2027 0xaa, 0xaa, 0x00,
2028 0xaa, 0xaa, 0xaa,
2029 0x55, 0x55, 0x55,
2030 0x55, 0x55, 0xff,
2031 0x55, 0xff, 0x55,
2032 0x55, 0xff, 0xff,
2033 0xff, 0x55, 0x55,
2034 0xff, 0x55, 0xff,
2035 0xff, 0xff, 0x55,
2036 0xff, 0xff, 0xff
2037 };
2038 const unsigned char *clut;
2039
2040 prom_debug("Looking for displays\n");
2041 for (node = 0; prom_next_node(&node); ) {
2042 memset(type, 0, sizeof(type));
2043 prom_getprop(node, "device_type", type, sizeof(type));
2044 if (strcmp(type, "display") != 0)
2045 continue;
2046
2047 /* It seems OF doesn't null-terminate the path :-( */
2048 path = prom_scratch;
2049 memset(path, 0, PROM_SCRATCH_SIZE);
2050
2051 /*
2052 * leave some room at the end of the path for appending extra
2053 * arguments
2054 */
2055 if (call_prom("package-to-path", 3, 1, node, path,
2056 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2057 continue;
2058 prom_printf("found display : %s, opening... ", path);
2059
2060 ih = call_prom("open", 1, 1, path);
2061 if (ih == 0) {
2062 prom_printf("failed\n");
2063 continue;
2064 }
2065
2066 /* Success */
2067 prom_printf("done\n");
2068 prom_setprop(node, path, "linux,opened", NULL, 0);
2069
2070 /* Setup a usable color table when the appropriate
2071 * method is available. Should update this to set-colors */
2072 clut = default_colors;
2073 for (i = 0; i < 16; i++, clut += 3)
2074 if (prom_set_color(ih, i, clut[0], clut[1],
2075 clut[2]) != 0)
2076 break;
2077
2078 #ifdef CONFIG_LOGO_LINUX_CLUT224
2079 clut = PTRRELOC(logo_linux_clut224.clut);
2080 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2081 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2082 clut[2]) != 0)
2083 break;
2084 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2085 }
2086 }
2087
2088
2089 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2090 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2091 unsigned long needed, unsigned long align)
2092 {
2093 void *ret;
2094
2095 *mem_start = _ALIGN(*mem_start, align);
2096 while ((*mem_start + needed) > *mem_end) {
2097 unsigned long room, chunk;
2098
2099 prom_debug("Chunk exhausted, claiming more at %x...\n",
2100 alloc_bottom);
2101 room = alloc_top - alloc_bottom;
2102 if (room > DEVTREE_CHUNK_SIZE)
2103 room = DEVTREE_CHUNK_SIZE;
2104 if (room < PAGE_SIZE)
2105 prom_panic("No memory for flatten_device_tree "
2106 "(no room)\n");
2107 chunk = alloc_up(room, 0);
2108 if (chunk == 0)
2109 prom_panic("No memory for flatten_device_tree "
2110 "(claim failed)\n");
2111 *mem_end = chunk + room;
2112 }
2113
2114 ret = (void *)*mem_start;
2115 *mem_start += needed;
2116
2117 return ret;
2118 }
2119
2120 #define dt_push_token(token, mem_start, mem_end) \
2121 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
2122
2123 static unsigned long __init dt_find_string(char *str)
2124 {
2125 char *s, *os;
2126
2127 s = os = (char *)dt_string_start;
2128 s += 4;
2129 while (s < (char *)dt_string_end) {
2130 if (strcmp(s, str) == 0)
2131 return s - os;
2132 s += strlen(s) + 1;
2133 }
2134 return 0;
2135 }
2136
2137 /*
2138 * The Open Firmware 1275 specification states properties must be 31 bytes or
2139 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2140 */
2141 #define MAX_PROPERTY_NAME 64
2142
2143 static void __init scan_dt_build_strings(phandle node,
2144 unsigned long *mem_start,
2145 unsigned long *mem_end)
2146 {
2147 char *prev_name, *namep, *sstart;
2148 unsigned long soff;
2149 phandle child;
2150
2151 sstart = (char *)dt_string_start;
2152
2153 /* get and store all property names */
2154 prev_name = "";
2155 for (;;) {
2156 /* 64 is max len of name including nul. */
2157 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2158 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2159 /* No more nodes: unwind alloc */
2160 *mem_start = (unsigned long)namep;
2161 break;
2162 }
2163
2164 /* skip "name" */
2165 if (strcmp(namep, "name") == 0) {
2166 *mem_start = (unsigned long)namep;
2167 prev_name = "name";
2168 continue;
2169 }
2170 /* get/create string entry */
2171 soff = dt_find_string(namep);
2172 if (soff != 0) {
2173 *mem_start = (unsigned long)namep;
2174 namep = sstart + soff;
2175 } else {
2176 /* Trim off some if we can */
2177 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2178 dt_string_end = *mem_start;
2179 }
2180 prev_name = namep;
2181 }
2182
2183 /* do all our children */
2184 child = call_prom("child", 1, 1, node);
2185 while (child != 0) {
2186 scan_dt_build_strings(child, mem_start, mem_end);
2187 child = call_prom("peer", 1, 1, child);
2188 }
2189 }
2190
2191 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2192 unsigned long *mem_end)
2193 {
2194 phandle child;
2195 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2196 unsigned long soff;
2197 unsigned char *valp;
2198 static char pname[MAX_PROPERTY_NAME];
2199 int l, room, has_phandle = 0;
2200
2201 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2202
2203 /* get the node's full name */
2204 namep = (char *)*mem_start;
2205 room = *mem_end - *mem_start;
2206 if (room > 255)
2207 room = 255;
2208 l = call_prom("package-to-path", 3, 1, node, namep, room);
2209 if (l >= 0) {
2210 /* Didn't fit? Get more room. */
2211 if (l >= room) {
2212 if (l >= *mem_end - *mem_start)
2213 namep = make_room(mem_start, mem_end, l+1, 1);
2214 call_prom("package-to-path", 3, 1, node, namep, l);
2215 }
2216 namep[l] = '\0';
2217
2218 /* Fixup an Apple bug where they have bogus \0 chars in the
2219 * middle of the path in some properties, and extract
2220 * the unit name (everything after the last '/').
2221 */
2222 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2223 if (*p == '/')
2224 lp = namep;
2225 else if (*p != 0)
2226 *lp++ = *p;
2227 }
2228 *lp = 0;
2229 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2230 }
2231
2232 /* get it again for debugging */
2233 path = prom_scratch;
2234 memset(path, 0, PROM_SCRATCH_SIZE);
2235 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2236
2237 /* get and store all properties */
2238 prev_name = "";
2239 sstart = (char *)dt_string_start;
2240 for (;;) {
2241 if (call_prom("nextprop", 3, 1, node, prev_name,
2242 pname) != 1)
2243 break;
2244
2245 /* skip "name" */
2246 if (strcmp(pname, "name") == 0) {
2247 prev_name = "name";
2248 continue;
2249 }
2250
2251 /* find string offset */
2252 soff = dt_find_string(pname);
2253 if (soff == 0) {
2254 prom_printf("WARNING: Can't find string index for"
2255 " <%s>, node %s\n", pname, path);
2256 break;
2257 }
2258 prev_name = sstart + soff;
2259
2260 /* get length */
2261 l = call_prom("getproplen", 2, 1, node, pname);
2262
2263 /* sanity checks */
2264 if (l == PROM_ERROR)
2265 continue;
2266
2267 /* push property head */
2268 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2269 dt_push_token(l, mem_start, mem_end);
2270 dt_push_token(soff, mem_start, mem_end);
2271
2272 /* push property content */
2273 valp = make_room(mem_start, mem_end, l, 4);
2274 call_prom("getprop", 4, 1, node, pname, valp, l);
2275 *mem_start = _ALIGN(*mem_start, 4);
2276
2277 if (!strcmp(pname, "phandle"))
2278 has_phandle = 1;
2279 }
2280
2281 /* Add a "linux,phandle" property if no "phandle" property already
2282 * existed (can happen with OPAL)
2283 */
2284 if (!has_phandle) {
2285 soff = dt_find_string("linux,phandle");
2286 if (soff == 0)
2287 prom_printf("WARNING: Can't find string index for"
2288 " <linux-phandle> node %s\n", path);
2289 else {
2290 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2291 dt_push_token(4, mem_start, mem_end);
2292 dt_push_token(soff, mem_start, mem_end);
2293 valp = make_room(mem_start, mem_end, 4, 4);
2294 *(u32 *)valp = node;
2295 }
2296 }
2297
2298 /* do all our children */
2299 child = call_prom("child", 1, 1, node);
2300 while (child != 0) {
2301 scan_dt_build_struct(child, mem_start, mem_end);
2302 child = call_prom("peer", 1, 1, child);
2303 }
2304
2305 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2306 }
2307
2308 static void __init flatten_device_tree(void)
2309 {
2310 phandle root;
2311 unsigned long mem_start, mem_end, room;
2312 struct boot_param_header *hdr;
2313 char *namep;
2314 u64 *rsvmap;
2315
2316 /*
2317 * Check how much room we have between alloc top & bottom (+/- a
2318 * few pages), crop to 1MB, as this is our "chunk" size
2319 */
2320 room = alloc_top - alloc_bottom - 0x4000;
2321 if (room > DEVTREE_CHUNK_SIZE)
2322 room = DEVTREE_CHUNK_SIZE;
2323 prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2324
2325 /* Now try to claim that */
2326 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2327 if (mem_start == 0)
2328 prom_panic("Can't allocate initial device-tree chunk\n");
2329 mem_end = mem_start + room;
2330
2331 /* Get root of tree */
2332 root = call_prom("peer", 1, 1, (phandle)0);
2333 if (root == (phandle)0)
2334 prom_panic ("couldn't get device tree root\n");
2335
2336 /* Build header and make room for mem rsv map */
2337 mem_start = _ALIGN(mem_start, 4);
2338 hdr = make_room(&mem_start, &mem_end,
2339 sizeof(struct boot_param_header), 4);
2340 dt_header_start = (unsigned long)hdr;
2341 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2342
2343 /* Start of strings */
2344 mem_start = PAGE_ALIGN(mem_start);
2345 dt_string_start = mem_start;
2346 mem_start += 4; /* hole */
2347
2348 /* Add "linux,phandle" in there, we'll need it */
2349 namep = make_room(&mem_start, &mem_end, 16, 1);
2350 strcpy(namep, "linux,phandle");
2351 mem_start = (unsigned long)namep + strlen(namep) + 1;
2352
2353 /* Build string array */
2354 prom_printf("Building dt strings...\n");
2355 scan_dt_build_strings(root, &mem_start, &mem_end);
2356 dt_string_end = mem_start;
2357
2358 /* Build structure */
2359 mem_start = PAGE_ALIGN(mem_start);
2360 dt_struct_start = mem_start;
2361 prom_printf("Building dt structure...\n");
2362 scan_dt_build_struct(root, &mem_start, &mem_end);
2363 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2364 dt_struct_end = PAGE_ALIGN(mem_start);
2365
2366 /* Finish header */
2367 hdr->boot_cpuid_phys = prom.cpu;
2368 hdr->magic = OF_DT_HEADER;
2369 hdr->totalsize = dt_struct_end - dt_header_start;
2370 hdr->off_dt_struct = dt_struct_start - dt_header_start;
2371 hdr->off_dt_strings = dt_string_start - dt_header_start;
2372 hdr->dt_strings_size = dt_string_end - dt_string_start;
2373 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - dt_header_start;
2374 hdr->version = OF_DT_VERSION;
2375 /* Version 16 is not backward compatible */
2376 hdr->last_comp_version = 0x10;
2377
2378 /* Copy the reserve map in */
2379 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2380
2381 #ifdef DEBUG_PROM
2382 {
2383 int i;
2384 prom_printf("reserved memory map:\n");
2385 for (i = 0; i < mem_reserve_cnt; i++)
2386 prom_printf(" %x - %x\n",
2387 mem_reserve_map[i].base,
2388 mem_reserve_map[i].size);
2389 }
2390 #endif
2391 /* Bump mem_reserve_cnt to cause further reservations to fail
2392 * since it's too late.
2393 */
2394 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2395
2396 prom_printf("Device tree strings 0x%x -> 0x%x\n",
2397 dt_string_start, dt_string_end);
2398 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2399 dt_struct_start, dt_struct_end);
2400
2401 }
2402
2403 #ifdef CONFIG_PPC_MAPLE
2404 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2405 * The values are bad, and it doesn't even have the right number of cells. */
2406 static void __init fixup_device_tree_maple(void)
2407 {
2408 phandle isa;
2409 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2410 u32 isa_ranges[6];
2411 char *name;
2412
2413 name = "/ht@0/isa@4";
2414 isa = call_prom("finddevice", 1, 1, ADDR(name));
2415 if (!PHANDLE_VALID(isa)) {
2416 name = "/ht@0/isa@6";
2417 isa = call_prom("finddevice", 1, 1, ADDR(name));
2418 rloc = 0x01003000; /* IO space; PCI device = 6 */
2419 }
2420 if (!PHANDLE_VALID(isa))
2421 return;
2422
2423 if (prom_getproplen(isa, "ranges") != 12)
2424 return;
2425 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2426 == PROM_ERROR)
2427 return;
2428
2429 if (isa_ranges[0] != 0x1 ||
2430 isa_ranges[1] != 0xf4000000 ||
2431 isa_ranges[2] != 0x00010000)
2432 return;
2433
2434 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2435
2436 isa_ranges[0] = 0x1;
2437 isa_ranges[1] = 0x0;
2438 isa_ranges[2] = rloc;
2439 isa_ranges[3] = 0x0;
2440 isa_ranges[4] = 0x0;
2441 isa_ranges[5] = 0x00010000;
2442 prom_setprop(isa, name, "ranges",
2443 isa_ranges, sizeof(isa_ranges));
2444 }
2445
2446 #define CPC925_MC_START 0xf8000000
2447 #define CPC925_MC_LENGTH 0x1000000
2448 /* The values for memory-controller don't have right number of cells */
2449 static void __init fixup_device_tree_maple_memory_controller(void)
2450 {
2451 phandle mc;
2452 u32 mc_reg[4];
2453 char *name = "/hostbridge@f8000000";
2454 u32 ac, sc;
2455
2456 mc = call_prom("finddevice", 1, 1, ADDR(name));
2457 if (!PHANDLE_VALID(mc))
2458 return;
2459
2460 if (prom_getproplen(mc, "reg") != 8)
2461 return;
2462
2463 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2464 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2465 if ((ac != 2) || (sc != 2))
2466 return;
2467
2468 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2469 return;
2470
2471 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2472 return;
2473
2474 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2475
2476 mc_reg[0] = 0x0;
2477 mc_reg[1] = CPC925_MC_START;
2478 mc_reg[2] = 0x0;
2479 mc_reg[3] = CPC925_MC_LENGTH;
2480 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2481 }
2482 #else
2483 #define fixup_device_tree_maple()
2484 #define fixup_device_tree_maple_memory_controller()
2485 #endif
2486
2487 #ifdef CONFIG_PPC_CHRP
2488 /*
2489 * Pegasos and BriQ lacks the "ranges" property in the isa node
2490 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2491 * Pegasos has the IDE configured in legacy mode, but advertised as native
2492 */
2493 static void __init fixup_device_tree_chrp(void)
2494 {
2495 phandle ph;
2496 u32 prop[6];
2497 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2498 char *name;
2499 int rc;
2500
2501 name = "/pci@80000000/isa@c";
2502 ph = call_prom("finddevice", 1, 1, ADDR(name));
2503 if (!PHANDLE_VALID(ph)) {
2504 name = "/pci@ff500000/isa@6";
2505 ph = call_prom("finddevice", 1, 1, ADDR(name));
2506 rloc = 0x01003000; /* IO space; PCI device = 6 */
2507 }
2508 if (PHANDLE_VALID(ph)) {
2509 rc = prom_getproplen(ph, "ranges");
2510 if (rc == 0 || rc == PROM_ERROR) {
2511 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2512
2513 prop[0] = 0x1;
2514 prop[1] = 0x0;
2515 prop[2] = rloc;
2516 prop[3] = 0x0;
2517 prop[4] = 0x0;
2518 prop[5] = 0x00010000;
2519 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2520 }
2521 }
2522
2523 name = "/pci@80000000/ide@C,1";
2524 ph = call_prom("finddevice", 1, 1, ADDR(name));
2525 if (PHANDLE_VALID(ph)) {
2526 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2527 prop[0] = 14;
2528 prop[1] = 0x0;
2529 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2530 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2531 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2532 if (rc == sizeof(u32)) {
2533 prop[0] &= ~0x5;
2534 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2535 }
2536 }
2537 }
2538 #else
2539 #define fixup_device_tree_chrp()
2540 #endif
2541
2542 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2543 static void __init fixup_device_tree_pmac(void)
2544 {
2545 phandle u3, i2c, mpic;
2546 u32 u3_rev;
2547 u32 interrupts[2];
2548 u32 parent;
2549
2550 /* Some G5s have a missing interrupt definition, fix it up here */
2551 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2552 if (!PHANDLE_VALID(u3))
2553 return;
2554 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2555 if (!PHANDLE_VALID(i2c))
2556 return;
2557 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2558 if (!PHANDLE_VALID(mpic))
2559 return;
2560
2561 /* check if proper rev of u3 */
2562 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2563 == PROM_ERROR)
2564 return;
2565 if (u3_rev < 0x35 || u3_rev > 0x39)
2566 return;
2567 /* does it need fixup ? */
2568 if (prom_getproplen(i2c, "interrupts") > 0)
2569 return;
2570
2571 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2572
2573 /* interrupt on this revision of u3 is number 0 and level */
2574 interrupts[0] = 0;
2575 interrupts[1] = 1;
2576 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2577 &interrupts, sizeof(interrupts));
2578 parent = (u32)mpic;
2579 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2580 &parent, sizeof(parent));
2581 }
2582 #else
2583 #define fixup_device_tree_pmac()
2584 #endif
2585
2586 #ifdef CONFIG_PPC_EFIKA
2587 /*
2588 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2589 * to talk to the phy. If the phy-handle property is missing, then this
2590 * function is called to add the appropriate nodes and link it to the
2591 * ethernet node.
2592 */
2593 static void __init fixup_device_tree_efika_add_phy(void)
2594 {
2595 u32 node;
2596 char prop[64];
2597 int rv;
2598
2599 /* Check if /builtin/ethernet exists - bail if it doesn't */
2600 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2601 if (!PHANDLE_VALID(node))
2602 return;
2603
2604 /* Check if the phy-handle property exists - bail if it does */
2605 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2606 if (!rv)
2607 return;
2608
2609 /*
2610 * At this point the ethernet device doesn't have a phy described.
2611 * Now we need to add the missing phy node and linkage
2612 */
2613
2614 /* Check for an MDIO bus node - if missing then create one */
2615 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2616 if (!PHANDLE_VALID(node)) {
2617 prom_printf("Adding Ethernet MDIO node\n");
2618 call_prom("interpret", 1, 1,
2619 " s\" /builtin\" find-device"
2620 " new-device"
2621 " 1 encode-int s\" #address-cells\" property"
2622 " 0 encode-int s\" #size-cells\" property"
2623 " s\" mdio\" device-name"
2624 " s\" fsl,mpc5200b-mdio\" encode-string"
2625 " s\" compatible\" property"
2626 " 0xf0003000 0x400 reg"
2627 " 0x2 encode-int"
2628 " 0x5 encode-int encode+"
2629 " 0x3 encode-int encode+"
2630 " s\" interrupts\" property"
2631 " finish-device");
2632 };
2633
2634 /* Check for a PHY device node - if missing then create one and
2635 * give it's phandle to the ethernet node */
2636 node = call_prom("finddevice", 1, 1,
2637 ADDR("/builtin/mdio/ethernet-phy"));
2638 if (!PHANDLE_VALID(node)) {
2639 prom_printf("Adding Ethernet PHY node\n");
2640 call_prom("interpret", 1, 1,
2641 " s\" /builtin/mdio\" find-device"
2642 " new-device"
2643 " s\" ethernet-phy\" device-name"
2644 " 0x10 encode-int s\" reg\" property"
2645 " my-self"
2646 " ihandle>phandle"
2647 " finish-device"
2648 " s\" /builtin/ethernet\" find-device"
2649 " encode-int"
2650 " s\" phy-handle\" property"
2651 " device-end");
2652 }
2653 }
2654
2655 static void __init fixup_device_tree_efika(void)
2656 {
2657 int sound_irq[3] = { 2, 2, 0 };
2658 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2659 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2660 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2661 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2662 u32 node;
2663 char prop[64];
2664 int rv, len;
2665
2666 /* Check if we're really running on a EFIKA */
2667 node = call_prom("finddevice", 1, 1, ADDR("/"));
2668 if (!PHANDLE_VALID(node))
2669 return;
2670
2671 rv = prom_getprop(node, "model", prop, sizeof(prop));
2672 if (rv == PROM_ERROR)
2673 return;
2674 if (strcmp(prop, "EFIKA5K2"))
2675 return;
2676
2677 prom_printf("Applying EFIKA device tree fixups\n");
2678
2679 /* Claiming to be 'chrp' is death */
2680 node = call_prom("finddevice", 1, 1, ADDR("/"));
2681 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2682 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2683 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2684
2685 /* CODEGEN,description is exposed in /proc/cpuinfo so
2686 fix that too */
2687 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2688 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2689 prom_setprop(node, "/", "CODEGEN,description",
2690 "Efika 5200B PowerPC System",
2691 sizeof("Efika 5200B PowerPC System"));
2692
2693 /* Fixup bestcomm interrupts property */
2694 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2695 if (PHANDLE_VALID(node)) {
2696 len = prom_getproplen(node, "interrupts");
2697 if (len == 12) {
2698 prom_printf("Fixing bestcomm interrupts property\n");
2699 prom_setprop(node, "/builtin/bestcom", "interrupts",
2700 bcomm_irq, sizeof(bcomm_irq));
2701 }
2702 }
2703
2704 /* Fixup sound interrupts property */
2705 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2706 if (PHANDLE_VALID(node)) {
2707 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2708 if (rv == PROM_ERROR) {
2709 prom_printf("Adding sound interrupts property\n");
2710 prom_setprop(node, "/builtin/sound", "interrupts",
2711 sound_irq, sizeof(sound_irq));
2712 }
2713 }
2714
2715 /* Make sure ethernet phy-handle property exists */
2716 fixup_device_tree_efika_add_phy();
2717 }
2718 #else
2719 #define fixup_device_tree_efika()
2720 #endif
2721
2722 static void __init fixup_device_tree(void)
2723 {
2724 fixup_device_tree_maple();
2725 fixup_device_tree_maple_memory_controller();
2726 fixup_device_tree_chrp();
2727 fixup_device_tree_pmac();
2728 fixup_device_tree_efika();
2729 }
2730
2731 static void __init prom_find_boot_cpu(void)
2732 {
2733 u32 getprop_rval;
2734 ihandle prom_cpu;
2735 phandle cpu_pkg;
2736
2737 prom.cpu = 0;
2738 if (prom_getprop(prom.chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
2739 return;
2740
2741 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2742
2743 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
2744 prom.cpu = getprop_rval;
2745
2746 prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2747 }
2748
2749 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2750 {
2751 #ifdef CONFIG_BLK_DEV_INITRD
2752 if (r3 && r4 && r4 != 0xdeadbeef) {
2753 unsigned long val;
2754
2755 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2756 prom_initrd_end = prom_initrd_start + r4;
2757
2758 val = prom_initrd_start;
2759 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2760 &val, sizeof(val));
2761 val = prom_initrd_end;
2762 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2763 &val, sizeof(val));
2764
2765 reserve_mem(prom_initrd_start,
2766 prom_initrd_end - prom_initrd_start);
2767
2768 prom_debug("initrd_start=0x%x\n", prom_initrd_start);
2769 prom_debug("initrd_end=0x%x\n", prom_initrd_end);
2770 }
2771 #endif /* CONFIG_BLK_DEV_INITRD */
2772 }
2773
2774 #ifdef CONFIG_PPC64
2775 #ifdef CONFIG_RELOCATABLE
2776 static void reloc_toc(void)
2777 {
2778 }
2779
2780 static void unreloc_toc(void)
2781 {
2782 }
2783 #else
2784 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2785 {
2786 unsigned long i;
2787 unsigned long *toc_entry;
2788
2789 /* Get the start of the TOC by using r2 directly. */
2790 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2791
2792 for (i = 0; i < nr_entries; i++) {
2793 *toc_entry = *toc_entry + offset;
2794 toc_entry++;
2795 }
2796 }
2797
2798 static void reloc_toc(void)
2799 {
2800 unsigned long offset = reloc_offset();
2801 unsigned long nr_entries =
2802 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2803
2804 __reloc_toc(offset, nr_entries);
2805
2806 mb();
2807 }
2808
2809 static void unreloc_toc(void)
2810 {
2811 unsigned long offset = reloc_offset();
2812 unsigned long nr_entries =
2813 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2814
2815 mb();
2816
2817 __reloc_toc(-offset, nr_entries);
2818 }
2819 #endif
2820 #endif
2821
2822 /*
2823 * We enter here early on, when the Open Firmware prom is still
2824 * handling exceptions and the MMU hash table for us.
2825 */
2826
2827 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2828 unsigned long pp,
2829 unsigned long r6, unsigned long r7,
2830 unsigned long kbase)
2831 {
2832 unsigned long hdr;
2833
2834 #ifdef CONFIG_PPC32
2835 unsigned long offset = reloc_offset();
2836 reloc_got2(offset);
2837 #else
2838 reloc_toc();
2839 #endif
2840
2841 /*
2842 * First zero the BSS
2843 */
2844 memset(&__bss_start, 0, __bss_stop - __bss_start);
2845
2846 /*
2847 * Init interface to Open Firmware, get some node references,
2848 * like /chosen
2849 */
2850 prom_init_client_services(pp);
2851
2852 /*
2853 * See if this OF is old enough that we need to do explicit maps
2854 * and other workarounds
2855 */
2856 prom_find_mmu();
2857
2858 /*
2859 * Init prom stdout device
2860 */
2861 prom_init_stdout();
2862
2863 prom_printf("Preparing to boot %s", linux_banner);
2864
2865 /*
2866 * Get default machine type. At this point, we do not differentiate
2867 * between pSeries SMP and pSeries LPAR
2868 */
2869 of_platform = prom_find_machine_type();
2870 prom_printf("Detected machine type: %x\n", of_platform);
2871
2872 #ifndef CONFIG_NONSTATIC_KERNEL
2873 /* Bail if this is a kdump kernel. */
2874 if (PHYSICAL_START > 0)
2875 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2876 #endif
2877
2878 /*
2879 * Check for an initrd
2880 */
2881 prom_check_initrd(r3, r4);
2882
2883 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2884 /*
2885 * On pSeries, inform the firmware about our capabilities
2886 */
2887 if (of_platform == PLATFORM_PSERIES ||
2888 of_platform == PLATFORM_PSERIES_LPAR)
2889 prom_send_capabilities();
2890 #endif
2891
2892 /*
2893 * Copy the CPU hold code
2894 */
2895 if (of_platform != PLATFORM_POWERMAC)
2896 copy_and_flush(0, kbase, 0x100, 0);
2897
2898 /*
2899 * Do early parsing of command line
2900 */
2901 early_cmdline_parse();
2902
2903 /*
2904 * Initialize memory management within prom_init
2905 */
2906 prom_init_mem();
2907
2908 /*
2909 * Determine which cpu is actually running right _now_
2910 */
2911 prom_find_boot_cpu();
2912
2913 /*
2914 * Initialize display devices
2915 */
2916 prom_check_displays();
2917
2918 #ifdef CONFIG_PPC64
2919 /*
2920 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2921 * that uses the allocator, we need to make sure we get the top of memory
2922 * available for us here...
2923 */
2924 if (of_platform == PLATFORM_PSERIES)
2925 prom_initialize_tce_table();
2926 #endif
2927
2928 /*
2929 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
2930 * have a usable RTAS implementation.
2931 */
2932 if (of_platform != PLATFORM_POWERMAC &&
2933 of_platform != PLATFORM_OPAL)
2934 prom_instantiate_rtas();
2935
2936 #ifdef CONFIG_PPC_POWERNV
2937 /* Detect HAL and try instanciating it & doing takeover */
2938 if (of_platform == PLATFORM_PSERIES_LPAR) {
2939 prom_query_opal();
2940 if (of_platform == PLATFORM_OPAL) {
2941 prom_opal_hold_cpus();
2942 prom_opal_takeover();
2943 }
2944 } else if (of_platform == PLATFORM_OPAL)
2945 prom_instantiate_opal();
2946 #endif
2947
2948 #ifdef CONFIG_PPC64
2949 /* instantiate sml */
2950 prom_instantiate_sml();
2951 #endif
2952
2953 /*
2954 * On non-powermacs, put all CPUs in spin-loops.
2955 *
2956 * PowerMacs use a different mechanism to spin CPUs
2957 */
2958 if (of_platform != PLATFORM_POWERMAC &&
2959 of_platform != PLATFORM_OPAL)
2960 prom_hold_cpus();
2961
2962 /*
2963 * Fill in some infos for use by the kernel later on
2964 */
2965 if (prom_memory_limit)
2966 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
2967 &prom_memory_limit,
2968 sizeof(prom_memory_limit));
2969 #ifdef CONFIG_PPC64
2970 if (prom_iommu_off)
2971 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
2972 NULL, 0);
2973
2974 if (prom_iommu_force_on)
2975 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
2976 NULL, 0);
2977
2978 if (prom_tce_alloc_start) {
2979 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
2980 &prom_tce_alloc_start,
2981 sizeof(prom_tce_alloc_start));
2982 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
2983 &prom_tce_alloc_end,
2984 sizeof(prom_tce_alloc_end));
2985 }
2986 #endif
2987
2988 /*
2989 * Fixup any known bugs in the device-tree
2990 */
2991 fixup_device_tree();
2992
2993 /*
2994 * Now finally create the flattened device-tree
2995 */
2996 prom_printf("copying OF device tree...\n");
2997 flatten_device_tree();
2998
2999 /*
3000 * in case stdin is USB and still active on IBM machines...
3001 * Unfortunately quiesce crashes on some powermacs if we have
3002 * closed stdin already (in particular the powerbook 101). It
3003 * appears that the OPAL version of OFW doesn't like it either.
3004 */
3005 if (of_platform != PLATFORM_POWERMAC &&
3006 of_platform != PLATFORM_OPAL)
3007 prom_close_stdin();
3008
3009 /*
3010 * Call OF "quiesce" method to shut down pending DMA's from
3011 * devices etc...
3012 */
3013 prom_printf("Calling quiesce...\n");
3014 call_prom("quiesce", 0, 0);
3015
3016 /*
3017 * And finally, call the kernel passing it the flattened device
3018 * tree and NULL as r5, thus triggering the new entry point which
3019 * is common to us and kexec
3020 */
3021 hdr = dt_header_start;
3022
3023 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3024 if (of_platform != PLATFORM_OPAL) {
3025 prom_printf("returning from prom_init\n");
3026 prom_debug("->dt_header_start=0x%x\n", hdr);
3027 }
3028
3029 #ifdef CONFIG_PPC32
3030 reloc_got2(-offset);
3031 #else
3032 unreloc_toc();
3033 #endif
3034
3035 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3036 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3037 __start(hdr, kbase, 0, 0, 0,
3038 prom_opal_base, prom_opal_entry);
3039 #else
3040 __start(hdr, kbase, 0, 0, 0, 0, 0);
3041 #endif
3042
3043 return 0;
3044 }
This page took 0.098942 seconds and 5 git commands to generate.