misc: mic: depend on X86 for both host and card drivers.
[deliverable/linux.git] / arch / powerpc / kernel / prom_init.c
1 /*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #undef DEBUG_PROM
17
18 #include <stdarg.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/proc_fs.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <asm/prom.h>
32 #include <asm/rtas.h>
33 #include <asm/page.h>
34 #include <asm/processor.h>
35 #include <asm/irq.h>
36 #include <asm/io.h>
37 #include <asm/smp.h>
38 #include <asm/mmu.h>
39 #include <asm/pgtable.h>
40 #include <asm/pci.h>
41 #include <asm/iommu.h>
42 #include <asm/btext.h>
43 #include <asm/sections.h>
44 #include <asm/machdep.h>
45 #include <asm/opal.h>
46
47 #include <linux/linux_logo.h>
48
49 /*
50 * Eventually bump that one up
51 */
52 #define DEVTREE_CHUNK_SIZE 0x100000
53
54 /*
55 * This is the size of the local memory reserve map that gets copied
56 * into the boot params passed to the kernel. That size is totally
57 * flexible as the kernel just reads the list until it encounters an
58 * entry with size 0, so it can be changed without breaking binary
59 * compatibility
60 */
61 #define MEM_RESERVE_MAP_SIZE 8
62
63 /*
64 * prom_init() is called very early on, before the kernel text
65 * and data have been mapped to KERNELBASE. At this point the code
66 * is running at whatever address it has been loaded at.
67 * On ppc32 we compile with -mrelocatable, which means that references
68 * to extern and static variables get relocated automatically.
69 * ppc64 objects are always relocatable, we just need to relocate the
70 * TOC.
71 *
72 * Because OF may have mapped I/O devices into the area starting at
73 * KERNELBASE, particularly on CHRP machines, we can't safely call
74 * OF once the kernel has been mapped to KERNELBASE. Therefore all
75 * OF calls must be done within prom_init().
76 *
77 * ADDR is used in calls to call_prom. The 4th and following
78 * arguments to call_prom should be 32-bit values.
79 * On ppc64, 64 bit values are truncated to 32 bits (and
80 * fortunately don't get interpreted as two arguments).
81 */
82 #define ADDR(x) (u32)(unsigned long)(x)
83
84 #ifdef CONFIG_PPC64
85 #define OF_WORKAROUNDS 0
86 #else
87 #define OF_WORKAROUNDS of_workarounds
88 int of_workarounds;
89 #endif
90
91 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
92 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
93
94 #define PROM_BUG() do { \
95 prom_printf("kernel BUG at %s line 0x%x!\n", \
96 __FILE__, __LINE__); \
97 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
98 } while (0)
99
100 #ifdef DEBUG_PROM
101 #define prom_debug(x...) prom_printf(x)
102 #else
103 #define prom_debug(x...)
104 #endif
105
106
107 typedef u32 prom_arg_t;
108
109 struct prom_args {
110 __be32 service;
111 __be32 nargs;
112 __be32 nret;
113 __be32 args[10];
114 };
115
116 struct prom_t {
117 ihandle root;
118 phandle chosen;
119 int cpu;
120 ihandle stdout;
121 ihandle mmumap;
122 ihandle memory;
123 };
124
125 struct mem_map_entry {
126 __be64 base;
127 __be64 size;
128 };
129
130 typedef __be32 cell_t;
131
132 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
133 unsigned long r6, unsigned long r7, unsigned long r8,
134 unsigned long r9);
135
136 #ifdef CONFIG_PPC64
137 extern int enter_prom(struct prom_args *args, unsigned long entry);
138 #else
139 static inline int enter_prom(struct prom_args *args, unsigned long entry)
140 {
141 return ((int (*)(struct prom_args *))entry)(args);
142 }
143 #endif
144
145 extern void copy_and_flush(unsigned long dest, unsigned long src,
146 unsigned long size, unsigned long offset);
147
148 /* prom structure */
149 static struct prom_t __initdata prom;
150
151 static unsigned long prom_entry __initdata;
152
153 #define PROM_SCRATCH_SIZE 256
154
155 static char __initdata of_stdout_device[256];
156 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
157
158 static unsigned long __initdata dt_header_start;
159 static unsigned long __initdata dt_struct_start, dt_struct_end;
160 static unsigned long __initdata dt_string_start, dt_string_end;
161
162 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
163
164 #ifdef CONFIG_PPC64
165 static int __initdata prom_iommu_force_on;
166 static int __initdata prom_iommu_off;
167 static unsigned long __initdata prom_tce_alloc_start;
168 static unsigned long __initdata prom_tce_alloc_end;
169 #endif
170
171 /* Platforms codes are now obsolete in the kernel. Now only used within this
172 * file and ultimately gone too. Feel free to change them if you need, they
173 * are not shared with anything outside of this file anymore
174 */
175 #define PLATFORM_PSERIES 0x0100
176 #define PLATFORM_PSERIES_LPAR 0x0101
177 #define PLATFORM_LPAR 0x0001
178 #define PLATFORM_POWERMAC 0x0400
179 #define PLATFORM_GENERIC 0x0500
180 #define PLATFORM_OPAL 0x0600
181
182 static int __initdata of_platform;
183
184 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
185
186 static unsigned long __initdata prom_memory_limit;
187
188 static unsigned long __initdata alloc_top;
189 static unsigned long __initdata alloc_top_high;
190 static unsigned long __initdata alloc_bottom;
191 static unsigned long __initdata rmo_top;
192 static unsigned long __initdata ram_top;
193
194 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
195 static int __initdata mem_reserve_cnt;
196
197 static cell_t __initdata regbuf[1024];
198
199
200 /*
201 * Error results ... some OF calls will return "-1" on error, some
202 * will return 0, some will return either. To simplify, here are
203 * macros to use with any ihandle or phandle return value to check if
204 * it is valid
205 */
206
207 #define PROM_ERROR (-1u)
208 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
209 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
210
211
212 /* This is the one and *ONLY* place where we actually call open
213 * firmware.
214 */
215
216 static int __init call_prom(const char *service, int nargs, int nret, ...)
217 {
218 int i;
219 struct prom_args args;
220 va_list list;
221
222 args.service = cpu_to_be32(ADDR(service));
223 args.nargs = cpu_to_be32(nargs);
224 args.nret = cpu_to_be32(nret);
225
226 va_start(list, nret);
227 for (i = 0; i < nargs; i++)
228 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
229 va_end(list);
230
231 for (i = 0; i < nret; i++)
232 args.args[nargs+i] = 0;
233
234 if (enter_prom(&args, prom_entry) < 0)
235 return PROM_ERROR;
236
237 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
238 }
239
240 static int __init call_prom_ret(const char *service, int nargs, int nret,
241 prom_arg_t *rets, ...)
242 {
243 int i;
244 struct prom_args args;
245 va_list list;
246
247 args.service = cpu_to_be32(ADDR(service));
248 args.nargs = cpu_to_be32(nargs);
249 args.nret = cpu_to_be32(nret);
250
251 va_start(list, rets);
252 for (i = 0; i < nargs; i++)
253 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
254 va_end(list);
255
256 for (i = 0; i < nret; i++)
257 args.args[nargs+i] = 0;
258
259 if (enter_prom(&args, prom_entry) < 0)
260 return PROM_ERROR;
261
262 if (rets != NULL)
263 for (i = 1; i < nret; ++i)
264 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
265
266 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
267 }
268
269
270 static void __init prom_print(const char *msg)
271 {
272 const char *p, *q;
273
274 if (prom.stdout == 0)
275 return;
276
277 for (p = msg; *p != 0; p = q) {
278 for (q = p; *q != 0 && *q != '\n'; ++q)
279 ;
280 if (q > p)
281 call_prom("write", 3, 1, prom.stdout, p, q - p);
282 if (*q == 0)
283 break;
284 ++q;
285 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
286 }
287 }
288
289
290 static void __init prom_print_hex(unsigned long val)
291 {
292 int i, nibbles = sizeof(val)*2;
293 char buf[sizeof(val)*2+1];
294
295 for (i = nibbles-1; i >= 0; i--) {
296 buf[i] = (val & 0xf) + '0';
297 if (buf[i] > '9')
298 buf[i] += ('a'-'0'-10);
299 val >>= 4;
300 }
301 buf[nibbles] = '\0';
302 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
303 }
304
305 /* max number of decimal digits in an unsigned long */
306 #define UL_DIGITS 21
307 static void __init prom_print_dec(unsigned long val)
308 {
309 int i, size;
310 char buf[UL_DIGITS+1];
311
312 for (i = UL_DIGITS-1; i >= 0; i--) {
313 buf[i] = (val % 10) + '0';
314 val = val/10;
315 if (val == 0)
316 break;
317 }
318 /* shift stuff down */
319 size = UL_DIGITS - i;
320 call_prom("write", 3, 1, prom.stdout, buf+i, size);
321 }
322
323 static void __init prom_printf(const char *format, ...)
324 {
325 const char *p, *q, *s;
326 va_list args;
327 unsigned long v;
328 long vs;
329
330 va_start(args, format);
331 for (p = format; *p != 0; p = q) {
332 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
333 ;
334 if (q > p)
335 call_prom("write", 3, 1, prom.stdout, p, q - p);
336 if (*q == 0)
337 break;
338 if (*q == '\n') {
339 ++q;
340 call_prom("write", 3, 1, prom.stdout,
341 ADDR("\r\n"), 2);
342 continue;
343 }
344 ++q;
345 if (*q == 0)
346 break;
347 switch (*q) {
348 case 's':
349 ++q;
350 s = va_arg(args, const char *);
351 prom_print(s);
352 break;
353 case 'x':
354 ++q;
355 v = va_arg(args, unsigned long);
356 prom_print_hex(v);
357 break;
358 case 'd':
359 ++q;
360 vs = va_arg(args, int);
361 if (vs < 0) {
362 prom_print("-");
363 vs = -vs;
364 }
365 prom_print_dec(vs);
366 break;
367 case 'l':
368 ++q;
369 if (*q == 0)
370 break;
371 else if (*q == 'x') {
372 ++q;
373 v = va_arg(args, unsigned long);
374 prom_print_hex(v);
375 } else if (*q == 'u') { /* '%lu' */
376 ++q;
377 v = va_arg(args, unsigned long);
378 prom_print_dec(v);
379 } else if (*q == 'd') { /* %ld */
380 ++q;
381 vs = va_arg(args, long);
382 if (vs < 0) {
383 prom_print("-");
384 vs = -vs;
385 }
386 prom_print_dec(vs);
387 }
388 break;
389 }
390 }
391 }
392
393
394 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
395 unsigned long align)
396 {
397
398 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
399 /*
400 * Old OF requires we claim physical and virtual separately
401 * and then map explicitly (assuming virtual mode)
402 */
403 int ret;
404 prom_arg_t result;
405
406 ret = call_prom_ret("call-method", 5, 2, &result,
407 ADDR("claim"), prom.memory,
408 align, size, virt);
409 if (ret != 0 || result == -1)
410 return -1;
411 ret = call_prom_ret("call-method", 5, 2, &result,
412 ADDR("claim"), prom.mmumap,
413 align, size, virt);
414 if (ret != 0) {
415 call_prom("call-method", 4, 1, ADDR("release"),
416 prom.memory, size, virt);
417 return -1;
418 }
419 /* the 0x12 is M (coherence) + PP == read/write */
420 call_prom("call-method", 6, 1,
421 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
422 return virt;
423 }
424 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
425 (prom_arg_t)align);
426 }
427
428 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
429 {
430 prom_print(reason);
431 /* Do not call exit because it clears the screen on pmac
432 * it also causes some sort of double-fault on early pmacs */
433 if (of_platform == PLATFORM_POWERMAC)
434 asm("trap\n");
435
436 /* ToDo: should put up an SRC here on pSeries */
437 call_prom("exit", 0, 0);
438
439 for (;;) /* should never get here */
440 ;
441 }
442
443
444 static int __init prom_next_node(phandle *nodep)
445 {
446 phandle node;
447
448 if ((node = *nodep) != 0
449 && (*nodep = call_prom("child", 1, 1, node)) != 0)
450 return 1;
451 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
452 return 1;
453 for (;;) {
454 if ((node = call_prom("parent", 1, 1, node)) == 0)
455 return 0;
456 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
457 return 1;
458 }
459 }
460
461 static int inline prom_getprop(phandle node, const char *pname,
462 void *value, size_t valuelen)
463 {
464 return call_prom("getprop", 4, 1, node, ADDR(pname),
465 (u32)(unsigned long) value, (u32) valuelen);
466 }
467
468 static int inline prom_getproplen(phandle node, const char *pname)
469 {
470 return call_prom("getproplen", 2, 1, node, ADDR(pname));
471 }
472
473 static void add_string(char **str, const char *q)
474 {
475 char *p = *str;
476
477 while (*q)
478 *p++ = *q++;
479 *p++ = ' ';
480 *str = p;
481 }
482
483 static char *tohex(unsigned int x)
484 {
485 static char digits[] = "0123456789abcdef";
486 static char result[9];
487 int i;
488
489 result[8] = 0;
490 i = 8;
491 do {
492 --i;
493 result[i] = digits[x & 0xf];
494 x >>= 4;
495 } while (x != 0 && i > 0);
496 return &result[i];
497 }
498
499 static int __init prom_setprop(phandle node, const char *nodename,
500 const char *pname, void *value, size_t valuelen)
501 {
502 char cmd[256], *p;
503
504 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
505 return call_prom("setprop", 4, 1, node, ADDR(pname),
506 (u32)(unsigned long) value, (u32) valuelen);
507
508 /* gah... setprop doesn't work on longtrail, have to use interpret */
509 p = cmd;
510 add_string(&p, "dev");
511 add_string(&p, nodename);
512 add_string(&p, tohex((u32)(unsigned long) value));
513 add_string(&p, tohex(valuelen));
514 add_string(&p, tohex(ADDR(pname)));
515 add_string(&p, tohex(strlen(pname)));
516 add_string(&p, "property");
517 *p = 0;
518 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
519 }
520
521 /* We can't use the standard versions because of relocation headaches. */
522 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
523 || ('a' <= (c) && (c) <= 'f') \
524 || ('A' <= (c) && (c) <= 'F'))
525
526 #define isdigit(c) ('0' <= (c) && (c) <= '9')
527 #define islower(c) ('a' <= (c) && (c) <= 'z')
528 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
529
530 static unsigned long prom_strtoul(const char *cp, const char **endp)
531 {
532 unsigned long result = 0, base = 10, value;
533
534 if (*cp == '0') {
535 base = 8;
536 cp++;
537 if (toupper(*cp) == 'X') {
538 cp++;
539 base = 16;
540 }
541 }
542
543 while (isxdigit(*cp) &&
544 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
545 result = result * base + value;
546 cp++;
547 }
548
549 if (endp)
550 *endp = cp;
551
552 return result;
553 }
554
555 static unsigned long prom_memparse(const char *ptr, const char **retptr)
556 {
557 unsigned long ret = prom_strtoul(ptr, retptr);
558 int shift = 0;
559
560 /*
561 * We can't use a switch here because GCC *may* generate a
562 * jump table which won't work, because we're not running at
563 * the address we're linked at.
564 */
565 if ('G' == **retptr || 'g' == **retptr)
566 shift = 30;
567
568 if ('M' == **retptr || 'm' == **retptr)
569 shift = 20;
570
571 if ('K' == **retptr || 'k' == **retptr)
572 shift = 10;
573
574 if (shift) {
575 ret <<= shift;
576 (*retptr)++;
577 }
578
579 return ret;
580 }
581
582 /*
583 * Early parsing of the command line passed to the kernel, used for
584 * "mem=x" and the options that affect the iommu
585 */
586 static void __init early_cmdline_parse(void)
587 {
588 const char *opt;
589
590 char *p;
591 int l = 0;
592
593 prom_cmd_line[0] = 0;
594 p = prom_cmd_line;
595 if ((long)prom.chosen > 0)
596 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
597 #ifdef CONFIG_CMDLINE
598 if (l <= 0 || p[0] == '\0') /* dbl check */
599 strlcpy(prom_cmd_line,
600 CONFIG_CMDLINE, sizeof(prom_cmd_line));
601 #endif /* CONFIG_CMDLINE */
602 prom_printf("command line: %s\n", prom_cmd_line);
603
604 #ifdef CONFIG_PPC64
605 opt = strstr(prom_cmd_line, "iommu=");
606 if (opt) {
607 prom_printf("iommu opt is: %s\n", opt);
608 opt += 6;
609 while (*opt && *opt == ' ')
610 opt++;
611 if (!strncmp(opt, "off", 3))
612 prom_iommu_off = 1;
613 else if (!strncmp(opt, "force", 5))
614 prom_iommu_force_on = 1;
615 }
616 #endif
617 opt = strstr(prom_cmd_line, "mem=");
618 if (opt) {
619 opt += 4;
620 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
621 #ifdef CONFIG_PPC64
622 /* Align to 16 MB == size of ppc64 large page */
623 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
624 #endif
625 }
626 }
627
628 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
629 /*
630 * The architecture vector has an array of PVR mask/value pairs,
631 * followed by # option vectors - 1, followed by the option vectors.
632 *
633 * See prom.h for the definition of the bits specified in the
634 * architecture vector.
635 *
636 * Because the description vector contains a mix of byte and word
637 * values, we declare it as an unsigned char array, and use this
638 * macro to put word values in.
639 */
640 #define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
641 ((x) >> 8) & 0xff, (x) & 0xff
642
643 unsigned char ibm_architecture_vec[] = {
644 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
645 W(0xffff0000), W(0x003e0000), /* POWER6 */
646 W(0xffff0000), W(0x003f0000), /* POWER7 */
647 W(0xffff0000), W(0x004b0000), /* POWER8E */
648 W(0xffff0000), W(0x004d0000), /* POWER8 */
649 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
650 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
651 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
652 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
653 6 - 1, /* 6 option vectors */
654
655 /* option vector 1: processor architectures supported */
656 3 - 2, /* length */
657 0, /* don't ignore, don't halt */
658 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
659 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
660
661 /* option vector 2: Open Firmware options supported */
662 34 - 2, /* length */
663 OV2_REAL_MODE,
664 0, 0,
665 W(0xffffffff), /* real_base */
666 W(0xffffffff), /* real_size */
667 W(0xffffffff), /* virt_base */
668 W(0xffffffff), /* virt_size */
669 W(0xffffffff), /* load_base */
670 W(256), /* 256MB min RMA */
671 W(0xffffffff), /* full client load */
672 0, /* min RMA percentage of total RAM */
673 48, /* max log_2(hash table size) */
674
675 /* option vector 3: processor options supported */
676 3 - 2, /* length */
677 0, /* don't ignore, don't halt */
678 OV3_FP | OV3_VMX | OV3_DFP,
679
680 /* option vector 4: IBM PAPR implementation */
681 3 - 2, /* length */
682 0, /* don't halt */
683 OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
684
685 /* option vector 5: PAPR/OF options */
686 19 - 2, /* length */
687 0, /* don't ignore, don't halt */
688 OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
689 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
690 #ifdef CONFIG_PCI_MSI
691 /* PCIe/MSI support. Without MSI full PCIe is not supported */
692 OV5_FEAT(OV5_MSI),
693 #else
694 0,
695 #endif
696 0,
697 #ifdef CONFIG_PPC_SMLPAR
698 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
699 #else
700 0,
701 #endif
702 OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
703 0,
704 0,
705 0,
706 /* WARNING: The offset of the "number of cores" field below
707 * must match by the macro below. Update the definition if
708 * the structure layout changes.
709 */
710 #define IBM_ARCH_VEC_NRCORES_OFFSET 125
711 W(NR_CPUS), /* number of cores supported */
712 0,
713 0,
714 0,
715 0,
716 OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
717 OV5_FEAT(OV5_PFO_HW_842),
718 OV5_FEAT(OV5_SUB_PROCESSORS),
719 /* option vector 6: IBM PAPR hints */
720 4 - 2, /* length */
721 0,
722 0,
723 OV6_LINUX,
724
725 };
726
727 /* Old method - ELF header with PT_NOTE sections only works on BE */
728 #ifdef __BIG_ENDIAN__
729 static struct fake_elf {
730 Elf32_Ehdr elfhdr;
731 Elf32_Phdr phdr[2];
732 struct chrpnote {
733 u32 namesz;
734 u32 descsz;
735 u32 type;
736 char name[8]; /* "PowerPC" */
737 struct chrpdesc {
738 u32 real_mode;
739 u32 real_base;
740 u32 real_size;
741 u32 virt_base;
742 u32 virt_size;
743 u32 load_base;
744 } chrpdesc;
745 } chrpnote;
746 struct rpanote {
747 u32 namesz;
748 u32 descsz;
749 u32 type;
750 char name[24]; /* "IBM,RPA-Client-Config" */
751 struct rpadesc {
752 u32 lpar_affinity;
753 u32 min_rmo_size;
754 u32 min_rmo_percent;
755 u32 max_pft_size;
756 u32 splpar;
757 u32 min_load;
758 u32 new_mem_def;
759 u32 ignore_me;
760 } rpadesc;
761 } rpanote;
762 } fake_elf = {
763 .elfhdr = {
764 .e_ident = { 0x7f, 'E', 'L', 'F',
765 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
766 .e_type = ET_EXEC, /* yeah right */
767 .e_machine = EM_PPC,
768 .e_version = EV_CURRENT,
769 .e_phoff = offsetof(struct fake_elf, phdr),
770 .e_phentsize = sizeof(Elf32_Phdr),
771 .e_phnum = 2
772 },
773 .phdr = {
774 [0] = {
775 .p_type = PT_NOTE,
776 .p_offset = offsetof(struct fake_elf, chrpnote),
777 .p_filesz = sizeof(struct chrpnote)
778 }, [1] = {
779 .p_type = PT_NOTE,
780 .p_offset = offsetof(struct fake_elf, rpanote),
781 .p_filesz = sizeof(struct rpanote)
782 }
783 },
784 .chrpnote = {
785 .namesz = sizeof("PowerPC"),
786 .descsz = sizeof(struct chrpdesc),
787 .type = 0x1275,
788 .name = "PowerPC",
789 .chrpdesc = {
790 .real_mode = ~0U, /* ~0 means "don't care" */
791 .real_base = ~0U,
792 .real_size = ~0U,
793 .virt_base = ~0U,
794 .virt_size = ~0U,
795 .load_base = ~0U
796 },
797 },
798 .rpanote = {
799 .namesz = sizeof("IBM,RPA-Client-Config"),
800 .descsz = sizeof(struct rpadesc),
801 .type = 0x12759999,
802 .name = "IBM,RPA-Client-Config",
803 .rpadesc = {
804 .lpar_affinity = 0,
805 .min_rmo_size = 64, /* in megabytes */
806 .min_rmo_percent = 0,
807 .max_pft_size = 48, /* 2^48 bytes max PFT size */
808 .splpar = 1,
809 .min_load = ~0U,
810 .new_mem_def = 0
811 }
812 }
813 };
814 #endif /* __BIG_ENDIAN__ */
815
816 static int __init prom_count_smt_threads(void)
817 {
818 phandle node;
819 char type[64];
820 unsigned int plen;
821
822 /* Pick up th first CPU node we can find */
823 for (node = 0; prom_next_node(&node); ) {
824 type[0] = 0;
825 prom_getprop(node, "device_type", type, sizeof(type));
826
827 if (strcmp(type, "cpu"))
828 continue;
829 /*
830 * There is an entry for each smt thread, each entry being
831 * 4 bytes long. All cpus should have the same number of
832 * smt threads, so return after finding the first.
833 */
834 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
835 if (plen == PROM_ERROR)
836 break;
837 plen >>= 2;
838 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
839
840 /* Sanity check */
841 if (plen < 1 || plen > 64) {
842 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
843 (unsigned long)plen);
844 return 1;
845 }
846 return plen;
847 }
848 prom_debug("No threads found, assuming 1 per core\n");
849
850 return 1;
851
852 }
853
854
855 static void __init prom_send_capabilities(void)
856 {
857 ihandle root;
858 prom_arg_t ret;
859 __be32 *cores;
860
861 root = call_prom("open", 1, 1, ADDR("/"));
862 if (root != 0) {
863 /* We need to tell the FW about the number of cores we support.
864 *
865 * To do that, we count the number of threads on the first core
866 * (we assume this is the same for all cores) and use it to
867 * divide NR_CPUS.
868 */
869 cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
870 if (be32_to_cpup(cores) != NR_CPUS) {
871 prom_printf("WARNING ! "
872 "ibm_architecture_vec structure inconsistent: %lu!\n",
873 be32_to_cpup(cores));
874 } else {
875 *cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()));
876 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
877 be32_to_cpup(cores), NR_CPUS);
878 }
879
880 /* try calling the ibm,client-architecture-support method */
881 prom_printf("Calling ibm,client-architecture-support...");
882 if (call_prom_ret("call-method", 3, 2, &ret,
883 ADDR("ibm,client-architecture-support"),
884 root,
885 ADDR(ibm_architecture_vec)) == 0) {
886 /* the call exists... */
887 if (ret)
888 prom_printf("\nWARNING: ibm,client-architecture"
889 "-support call FAILED!\n");
890 call_prom("close", 1, 0, root);
891 prom_printf(" done\n");
892 return;
893 }
894 call_prom("close", 1, 0, root);
895 prom_printf(" not implemented\n");
896 }
897
898 #ifdef __BIG_ENDIAN__
899 {
900 ihandle elfloader;
901
902 /* no ibm,client-architecture-support call, try the old way */
903 elfloader = call_prom("open", 1, 1,
904 ADDR("/packages/elf-loader"));
905 if (elfloader == 0) {
906 prom_printf("couldn't open /packages/elf-loader\n");
907 return;
908 }
909 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
910 elfloader, ADDR(&fake_elf));
911 call_prom("close", 1, 0, elfloader);
912 }
913 #endif /* __BIG_ENDIAN__ */
914 }
915 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
916
917 /*
918 * Memory allocation strategy... our layout is normally:
919 *
920 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
921 * rare cases, initrd might end up being before the kernel though.
922 * We assume this won't override the final kernel at 0, we have no
923 * provision to handle that in this version, but it should hopefully
924 * never happen.
925 *
926 * alloc_top is set to the top of RMO, eventually shrink down if the
927 * TCEs overlap
928 *
929 * alloc_bottom is set to the top of kernel/initrd
930 *
931 * from there, allocations are done this way : rtas is allocated
932 * topmost, and the device-tree is allocated from the bottom. We try
933 * to grow the device-tree allocation as we progress. If we can't,
934 * then we fail, we don't currently have a facility to restart
935 * elsewhere, but that shouldn't be necessary.
936 *
937 * Note that calls to reserve_mem have to be done explicitly, memory
938 * allocated with either alloc_up or alloc_down isn't automatically
939 * reserved.
940 */
941
942
943 /*
944 * Allocates memory in the RMO upward from the kernel/initrd
945 *
946 * When align is 0, this is a special case, it means to allocate in place
947 * at the current location of alloc_bottom or fail (that is basically
948 * extending the previous allocation). Used for the device-tree flattening
949 */
950 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
951 {
952 unsigned long base = alloc_bottom;
953 unsigned long addr = 0;
954
955 if (align)
956 base = _ALIGN_UP(base, align);
957 prom_debug("alloc_up(%x, %x)\n", size, align);
958 if (ram_top == 0)
959 prom_panic("alloc_up() called with mem not initialized\n");
960
961 if (align)
962 base = _ALIGN_UP(alloc_bottom, align);
963 else
964 base = alloc_bottom;
965
966 for(; (base + size) <= alloc_top;
967 base = _ALIGN_UP(base + 0x100000, align)) {
968 prom_debug(" trying: 0x%x\n\r", base);
969 addr = (unsigned long)prom_claim(base, size, 0);
970 if (addr != PROM_ERROR && addr != 0)
971 break;
972 addr = 0;
973 if (align == 0)
974 break;
975 }
976 if (addr == 0)
977 return 0;
978 alloc_bottom = addr + size;
979
980 prom_debug(" -> %x\n", addr);
981 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
982 prom_debug(" alloc_top : %x\n", alloc_top);
983 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
984 prom_debug(" rmo_top : %x\n", rmo_top);
985 prom_debug(" ram_top : %x\n", ram_top);
986
987 return addr;
988 }
989
990 /*
991 * Allocates memory downward, either from top of RMO, or if highmem
992 * is set, from the top of RAM. Note that this one doesn't handle
993 * failures. It does claim memory if highmem is not set.
994 */
995 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
996 int highmem)
997 {
998 unsigned long base, addr = 0;
999
1000 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1001 highmem ? "(high)" : "(low)");
1002 if (ram_top == 0)
1003 prom_panic("alloc_down() called with mem not initialized\n");
1004
1005 if (highmem) {
1006 /* Carve out storage for the TCE table. */
1007 addr = _ALIGN_DOWN(alloc_top_high - size, align);
1008 if (addr <= alloc_bottom)
1009 return 0;
1010 /* Will we bump into the RMO ? If yes, check out that we
1011 * didn't overlap existing allocations there, if we did,
1012 * we are dead, we must be the first in town !
1013 */
1014 if (addr < rmo_top) {
1015 /* Good, we are first */
1016 if (alloc_top == rmo_top)
1017 alloc_top = rmo_top = addr;
1018 else
1019 return 0;
1020 }
1021 alloc_top_high = addr;
1022 goto bail;
1023 }
1024
1025 base = _ALIGN_DOWN(alloc_top - size, align);
1026 for (; base > alloc_bottom;
1027 base = _ALIGN_DOWN(base - 0x100000, align)) {
1028 prom_debug(" trying: 0x%x\n\r", base);
1029 addr = (unsigned long)prom_claim(base, size, 0);
1030 if (addr != PROM_ERROR && addr != 0)
1031 break;
1032 addr = 0;
1033 }
1034 if (addr == 0)
1035 return 0;
1036 alloc_top = addr;
1037
1038 bail:
1039 prom_debug(" -> %x\n", addr);
1040 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1041 prom_debug(" alloc_top : %x\n", alloc_top);
1042 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1043 prom_debug(" rmo_top : %x\n", rmo_top);
1044 prom_debug(" ram_top : %x\n", ram_top);
1045
1046 return addr;
1047 }
1048
1049 /*
1050 * Parse a "reg" cell
1051 */
1052 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1053 {
1054 cell_t *p = *cellp;
1055 unsigned long r = 0;
1056
1057 /* Ignore more than 2 cells */
1058 while (s > sizeof(unsigned long) / 4) {
1059 p++;
1060 s--;
1061 }
1062 r = be32_to_cpu(*p++);
1063 #ifdef CONFIG_PPC64
1064 if (s > 1) {
1065 r <<= 32;
1066 r |= be32_to_cpu(*(p++));
1067 }
1068 #endif
1069 *cellp = p;
1070 return r;
1071 }
1072
1073 /*
1074 * Very dumb function for adding to the memory reserve list, but
1075 * we don't need anything smarter at this point
1076 *
1077 * XXX Eventually check for collisions. They should NEVER happen.
1078 * If problems seem to show up, it would be a good start to track
1079 * them down.
1080 */
1081 static void __init reserve_mem(u64 base, u64 size)
1082 {
1083 u64 top = base + size;
1084 unsigned long cnt = mem_reserve_cnt;
1085
1086 if (size == 0)
1087 return;
1088
1089 /* We need to always keep one empty entry so that we
1090 * have our terminator with "size" set to 0 since we are
1091 * dumb and just copy this entire array to the boot params
1092 */
1093 base = _ALIGN_DOWN(base, PAGE_SIZE);
1094 top = _ALIGN_UP(top, PAGE_SIZE);
1095 size = top - base;
1096
1097 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1098 prom_panic("Memory reserve map exhausted !\n");
1099 mem_reserve_map[cnt].base = cpu_to_be64(base);
1100 mem_reserve_map[cnt].size = cpu_to_be64(size);
1101 mem_reserve_cnt = cnt + 1;
1102 }
1103
1104 /*
1105 * Initialize memory allocation mechanism, parse "memory" nodes and
1106 * obtain that way the top of memory and RMO to setup out local allocator
1107 */
1108 static void __init prom_init_mem(void)
1109 {
1110 phandle node;
1111 char *path, type[64];
1112 unsigned int plen;
1113 cell_t *p, *endp;
1114 __be32 val;
1115 u32 rac, rsc;
1116
1117 /*
1118 * We iterate the memory nodes to find
1119 * 1) top of RMO (first node)
1120 * 2) top of memory
1121 */
1122 val = cpu_to_be32(2);
1123 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1124 rac = be32_to_cpu(val);
1125 val = cpu_to_be32(1);
1126 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1127 rsc = be32_to_cpu(val);
1128 prom_debug("root_addr_cells: %x\n", rac);
1129 prom_debug("root_size_cells: %x\n", rsc);
1130
1131 prom_debug("scanning memory:\n");
1132 path = prom_scratch;
1133
1134 for (node = 0; prom_next_node(&node); ) {
1135 type[0] = 0;
1136 prom_getprop(node, "device_type", type, sizeof(type));
1137
1138 if (type[0] == 0) {
1139 /*
1140 * CHRP Longtrail machines have no device_type
1141 * on the memory node, so check the name instead...
1142 */
1143 prom_getprop(node, "name", type, sizeof(type));
1144 }
1145 if (strcmp(type, "memory"))
1146 continue;
1147
1148 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1149 if (plen > sizeof(regbuf)) {
1150 prom_printf("memory node too large for buffer !\n");
1151 plen = sizeof(regbuf);
1152 }
1153 p = regbuf;
1154 endp = p + (plen / sizeof(cell_t));
1155
1156 #ifdef DEBUG_PROM
1157 memset(path, 0, PROM_SCRATCH_SIZE);
1158 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1159 prom_debug(" node %s :\n", path);
1160 #endif /* DEBUG_PROM */
1161
1162 while ((endp - p) >= (rac + rsc)) {
1163 unsigned long base, size;
1164
1165 base = prom_next_cell(rac, &p);
1166 size = prom_next_cell(rsc, &p);
1167
1168 if (size == 0)
1169 continue;
1170 prom_debug(" %x %x\n", base, size);
1171 if (base == 0 && (of_platform & PLATFORM_LPAR))
1172 rmo_top = size;
1173 if ((base + size) > ram_top)
1174 ram_top = base + size;
1175 }
1176 }
1177
1178 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1179
1180 /*
1181 * If prom_memory_limit is set we reduce the upper limits *except* for
1182 * alloc_top_high. This must be the real top of RAM so we can put
1183 * TCE's up there.
1184 */
1185
1186 alloc_top_high = ram_top;
1187
1188 if (prom_memory_limit) {
1189 if (prom_memory_limit <= alloc_bottom) {
1190 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1191 prom_memory_limit);
1192 prom_memory_limit = 0;
1193 } else if (prom_memory_limit >= ram_top) {
1194 prom_printf("Ignoring mem=%x >= ram_top.\n",
1195 prom_memory_limit);
1196 prom_memory_limit = 0;
1197 } else {
1198 ram_top = prom_memory_limit;
1199 rmo_top = min(rmo_top, prom_memory_limit);
1200 }
1201 }
1202
1203 /*
1204 * Setup our top alloc point, that is top of RMO or top of
1205 * segment 0 when running non-LPAR.
1206 * Some RS64 machines have buggy firmware where claims up at
1207 * 1GB fail. Cap at 768MB as a workaround.
1208 * Since 768MB is plenty of room, and we need to cap to something
1209 * reasonable on 32-bit, cap at 768MB on all machines.
1210 */
1211 if (!rmo_top)
1212 rmo_top = ram_top;
1213 rmo_top = min(0x30000000ul, rmo_top);
1214 alloc_top = rmo_top;
1215 alloc_top_high = ram_top;
1216
1217 /*
1218 * Check if we have an initrd after the kernel but still inside
1219 * the RMO. If we do move our bottom point to after it.
1220 */
1221 if (prom_initrd_start &&
1222 prom_initrd_start < rmo_top &&
1223 prom_initrd_end > alloc_bottom)
1224 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1225
1226 prom_printf("memory layout at init:\n");
1227 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1228 prom_printf(" alloc_bottom : %x\n", alloc_bottom);
1229 prom_printf(" alloc_top : %x\n", alloc_top);
1230 prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
1231 prom_printf(" rmo_top : %x\n", rmo_top);
1232 prom_printf(" ram_top : %x\n", ram_top);
1233 }
1234
1235 static void __init prom_close_stdin(void)
1236 {
1237 __be32 val;
1238 ihandle stdin;
1239
1240 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1241 stdin = be32_to_cpu(val);
1242 call_prom("close", 1, 0, stdin);
1243 }
1244 }
1245
1246 #ifdef CONFIG_PPC_POWERNV
1247
1248 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1249 static u64 __initdata prom_opal_base;
1250 static u64 __initdata prom_opal_entry;
1251 #endif
1252
1253 #ifdef __BIG_ENDIAN__
1254 /* XXX Don't change this structure without updating opal-takeover.S */
1255 static struct opal_secondary_data {
1256 s64 ack; /* 0 */
1257 u64 go; /* 8 */
1258 struct opal_takeover_args args; /* 16 */
1259 } opal_secondary_data;
1260
1261 static u64 __initdata prom_opal_align;
1262 static u64 __initdata prom_opal_size;
1263 static int __initdata prom_rtas_start_cpu;
1264 static u64 __initdata prom_rtas_data;
1265 static u64 __initdata prom_rtas_entry;
1266
1267 extern char opal_secondary_entry;
1268
1269 static void __init prom_query_opal(void)
1270 {
1271 long rc;
1272
1273 /* We must not query for OPAL presence on a machine that
1274 * supports TNK takeover (970 blades), as this uses the same
1275 * h-call with different arguments and will crash
1276 */
1277 if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
1278 ADDR("/tnk-memory-map")))) {
1279 prom_printf("TNK takeover detected, skipping OPAL check\n");
1280 return;
1281 }
1282
1283 prom_printf("Querying for OPAL presence... ");
1284
1285 rc = opal_query_takeover(&prom_opal_size,
1286 &prom_opal_align);
1287 prom_debug("(rc = %ld) ", rc);
1288 if (rc != 0) {
1289 prom_printf("not there.\n");
1290 return;
1291 }
1292 of_platform = PLATFORM_OPAL;
1293 prom_printf(" there !\n");
1294 prom_debug(" opal_size = 0x%lx\n", prom_opal_size);
1295 prom_debug(" opal_align = 0x%lx\n", prom_opal_align);
1296 if (prom_opal_align < 0x10000)
1297 prom_opal_align = 0x10000;
1298 }
1299
1300 static int __init prom_rtas_call(int token, int nargs, int nret,
1301 int *outputs, ...)
1302 {
1303 struct rtas_args rtas_args;
1304 va_list list;
1305 int i;
1306
1307 rtas_args.token = token;
1308 rtas_args.nargs = nargs;
1309 rtas_args.nret = nret;
1310 rtas_args.rets = (rtas_arg_t *)&(rtas_args.args[nargs]);
1311 va_start(list, outputs);
1312 for (i = 0; i < nargs; ++i)
1313 rtas_args.args[i] = va_arg(list, rtas_arg_t);
1314 va_end(list);
1315
1316 for (i = 0; i < nret; ++i)
1317 rtas_args.rets[i] = 0;
1318
1319 opal_enter_rtas(&rtas_args, prom_rtas_data,
1320 prom_rtas_entry);
1321
1322 if (nret > 1 && outputs != NULL)
1323 for (i = 0; i < nret-1; ++i)
1324 outputs[i] = rtas_args.rets[i+1];
1325 return (nret > 0)? rtas_args.rets[0]: 0;
1326 }
1327
1328 static void __init prom_opal_hold_cpus(void)
1329 {
1330 int i, cnt, cpu, rc;
1331 long j;
1332 phandle node;
1333 char type[64];
1334 u32 servers[8];
1335 void *entry = (unsigned long *)&opal_secondary_entry;
1336 struct opal_secondary_data *data = &opal_secondary_data;
1337
1338 prom_debug("prom_opal_hold_cpus: start...\n");
1339 prom_debug(" - entry = 0x%x\n", entry);
1340 prom_debug(" - data = 0x%x\n", data);
1341
1342 data->ack = -1;
1343 data->go = 0;
1344
1345 /* look for cpus */
1346 for (node = 0; prom_next_node(&node); ) {
1347 type[0] = 0;
1348 prom_getprop(node, "device_type", type, sizeof(type));
1349 if (strcmp(type, "cpu") != 0)
1350 continue;
1351
1352 /* Skip non-configured cpus. */
1353 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1354 if (strcmp(type, "okay") != 0)
1355 continue;
1356
1357 cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
1358 sizeof(servers));
1359 if (cnt == PROM_ERROR)
1360 break;
1361 cnt >>= 2;
1362 for (i = 0; i < cnt; i++) {
1363 cpu = servers[i];
1364 prom_debug("CPU %d ... ", cpu);
1365 if (cpu == prom.cpu) {
1366 prom_debug("booted !\n");
1367 continue;
1368 }
1369 prom_debug("starting ... ");
1370
1371 /* Init the acknowledge var which will be reset by
1372 * the secondary cpu when it awakens from its OF
1373 * spinloop.
1374 */
1375 data->ack = -1;
1376 rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
1377 NULL, cpu, entry, data);
1378 prom_debug("rtas rc=%d ...", rc);
1379
1380 for (j = 0; j < 100000000 && data->ack == -1; j++) {
1381 HMT_low();
1382 mb();
1383 }
1384 HMT_medium();
1385 if (data->ack != -1)
1386 prom_debug("done, PIR=0x%x\n", data->ack);
1387 else
1388 prom_debug("timeout !\n");
1389 }
1390 }
1391 prom_debug("prom_opal_hold_cpus: end...\n");
1392 }
1393
1394 static void __init prom_opal_takeover(void)
1395 {
1396 struct opal_secondary_data *data = &opal_secondary_data;
1397 struct opal_takeover_args *args = &data->args;
1398 u64 align = prom_opal_align;
1399 u64 top_addr, opal_addr;
1400
1401 args->k_image = (u64)_stext;
1402 args->k_size = _end - _stext;
1403 args->k_entry = 0;
1404 args->k_entry2 = 0x60;
1405
1406 top_addr = _ALIGN_UP(args->k_size, align);
1407
1408 if (prom_initrd_start != 0) {
1409 args->rd_image = prom_initrd_start;
1410 args->rd_size = prom_initrd_end - args->rd_image;
1411 args->rd_loc = top_addr;
1412 top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
1413 }
1414
1415 /* Pickup an address for the HAL. We want to go really high
1416 * up to avoid problem with future kexecs. On the other hand
1417 * we don't want to be all over the TCEs on P5IOC2 machines
1418 * which are going to be up there too. We assume the machine
1419 * has plenty of memory, and we ask for the HAL for now to
1420 * be just below the 1G point, or above the initrd
1421 */
1422 opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
1423 if (opal_addr < top_addr)
1424 opal_addr = top_addr;
1425 args->hal_addr = opal_addr;
1426
1427 /* Copy the command line to the kernel image */
1428 strlcpy(boot_command_line, prom_cmd_line,
1429 COMMAND_LINE_SIZE);
1430
1431 prom_debug(" k_image = 0x%lx\n", args->k_image);
1432 prom_debug(" k_size = 0x%lx\n", args->k_size);
1433 prom_debug(" k_entry = 0x%lx\n", args->k_entry);
1434 prom_debug(" k_entry2 = 0x%lx\n", args->k_entry2);
1435 prom_debug(" hal_addr = 0x%lx\n", args->hal_addr);
1436 prom_debug(" rd_image = 0x%lx\n", args->rd_image);
1437 prom_debug(" rd_size = 0x%lx\n", args->rd_size);
1438 prom_debug(" rd_loc = 0x%lx\n", args->rd_loc);
1439 prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
1440 prom_close_stdin();
1441 mb();
1442 data->go = 1;
1443 for (;;)
1444 opal_do_takeover(args);
1445 }
1446 #endif /* __BIG_ENDIAN__ */
1447
1448 /*
1449 * Allocate room for and instantiate OPAL
1450 */
1451 static void __init prom_instantiate_opal(void)
1452 {
1453 phandle opal_node;
1454 ihandle opal_inst;
1455 u64 base, entry;
1456 u64 size = 0, align = 0x10000;
1457 __be64 val64;
1458 u32 rets[2];
1459
1460 prom_debug("prom_instantiate_opal: start...\n");
1461
1462 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1463 prom_debug("opal_node: %x\n", opal_node);
1464 if (!PHANDLE_VALID(opal_node))
1465 return;
1466
1467 val64 = 0;
1468 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1469 size = be64_to_cpu(val64);
1470 if (size == 0)
1471 return;
1472 val64 = 0;
1473 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1474 align = be64_to_cpu(val64);
1475
1476 base = alloc_down(size, align, 0);
1477 if (base == 0) {
1478 prom_printf("OPAL allocation failed !\n");
1479 return;
1480 }
1481
1482 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1483 if (!IHANDLE_VALID(opal_inst)) {
1484 prom_printf("opening opal package failed (%x)\n", opal_inst);
1485 return;
1486 }
1487
1488 prom_printf("instantiating opal at 0x%x...", base);
1489
1490 if (call_prom_ret("call-method", 4, 3, rets,
1491 ADDR("load-opal-runtime"),
1492 opal_inst,
1493 base >> 32, base & 0xffffffff) != 0
1494 || (rets[0] == 0 && rets[1] == 0)) {
1495 prom_printf(" failed\n");
1496 return;
1497 }
1498 entry = (((u64)rets[0]) << 32) | rets[1];
1499
1500 prom_printf(" done\n");
1501
1502 reserve_mem(base, size);
1503
1504 prom_debug("opal base = 0x%x\n", base);
1505 prom_debug("opal align = 0x%x\n", align);
1506 prom_debug("opal entry = 0x%x\n", entry);
1507 prom_debug("opal size = 0x%x\n", (long)size);
1508
1509 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1510 &base, sizeof(base));
1511 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1512 &entry, sizeof(entry));
1513
1514 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1515 prom_opal_base = base;
1516 prom_opal_entry = entry;
1517 #endif
1518 prom_debug("prom_instantiate_opal: end...\n");
1519 }
1520
1521 #endif /* CONFIG_PPC_POWERNV */
1522
1523 /*
1524 * Allocate room for and instantiate RTAS
1525 */
1526 static void __init prom_instantiate_rtas(void)
1527 {
1528 phandle rtas_node;
1529 ihandle rtas_inst;
1530 u32 base, entry = 0;
1531 __be32 val;
1532 u32 size = 0;
1533
1534 prom_debug("prom_instantiate_rtas: start...\n");
1535
1536 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1537 prom_debug("rtas_node: %x\n", rtas_node);
1538 if (!PHANDLE_VALID(rtas_node))
1539 return;
1540
1541 val = 0;
1542 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1543 size = be32_to_cpu(val);
1544 if (size == 0)
1545 return;
1546
1547 base = alloc_down(size, PAGE_SIZE, 0);
1548 if (base == 0)
1549 prom_panic("Could not allocate memory for RTAS\n");
1550
1551 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1552 if (!IHANDLE_VALID(rtas_inst)) {
1553 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1554 return;
1555 }
1556
1557 prom_printf("instantiating rtas at 0x%x...", base);
1558
1559 if (call_prom_ret("call-method", 3, 2, &entry,
1560 ADDR("instantiate-rtas"),
1561 rtas_inst, base) != 0
1562 || entry == 0) {
1563 prom_printf(" failed\n");
1564 return;
1565 }
1566 prom_printf(" done\n");
1567
1568 reserve_mem(base, size);
1569
1570 val = cpu_to_be32(base);
1571 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1572 &val, sizeof(val));
1573 val = cpu_to_be32(entry);
1574 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1575 &val, sizeof(val));
1576
1577 #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1578 /* PowerVN takeover hack */
1579 prom_rtas_data = base;
1580 prom_rtas_entry = entry;
1581 prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
1582 #endif
1583 prom_debug("rtas base = 0x%x\n", base);
1584 prom_debug("rtas entry = 0x%x\n", entry);
1585 prom_debug("rtas size = 0x%x\n", (long)size);
1586
1587 prom_debug("prom_instantiate_rtas: end...\n");
1588 }
1589
1590 #ifdef CONFIG_PPC64
1591 /*
1592 * Allocate room for and instantiate Stored Measurement Log (SML)
1593 */
1594 static void __init prom_instantiate_sml(void)
1595 {
1596 phandle ibmvtpm_node;
1597 ihandle ibmvtpm_inst;
1598 u32 entry = 0, size = 0;
1599 u64 base;
1600
1601 prom_debug("prom_instantiate_sml: start...\n");
1602
1603 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
1604 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1605 if (!PHANDLE_VALID(ibmvtpm_node))
1606 return;
1607
1608 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
1609 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1610 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1611 return;
1612 }
1613
1614 if (call_prom_ret("call-method", 2, 2, &size,
1615 ADDR("sml-get-handover-size"),
1616 ibmvtpm_inst) != 0 || size == 0) {
1617 prom_printf("SML get handover size failed\n");
1618 return;
1619 }
1620
1621 base = alloc_down(size, PAGE_SIZE, 0);
1622 if (base == 0)
1623 prom_panic("Could not allocate memory for sml\n");
1624
1625 prom_printf("instantiating sml at 0x%x...", base);
1626
1627 if (call_prom_ret("call-method", 4, 2, &entry,
1628 ADDR("sml-handover"),
1629 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1630 prom_printf("SML handover failed\n");
1631 return;
1632 }
1633 prom_printf(" done\n");
1634
1635 reserve_mem(base, size);
1636
1637 prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
1638 &base, sizeof(base));
1639 prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
1640 &size, sizeof(size));
1641
1642 prom_debug("sml base = 0x%x\n", base);
1643 prom_debug("sml size = 0x%x\n", (long)size);
1644
1645 prom_debug("prom_instantiate_sml: end...\n");
1646 }
1647
1648 /*
1649 * Allocate room for and initialize TCE tables
1650 */
1651 #ifdef __BIG_ENDIAN__
1652 static void __init prom_initialize_tce_table(void)
1653 {
1654 phandle node;
1655 ihandle phb_node;
1656 char compatible[64], type[64], model[64];
1657 char *path = prom_scratch;
1658 u64 base, align;
1659 u32 minalign, minsize;
1660 u64 tce_entry, *tce_entryp;
1661 u64 local_alloc_top, local_alloc_bottom;
1662 u64 i;
1663
1664 if (prom_iommu_off)
1665 return;
1666
1667 prom_debug("starting prom_initialize_tce_table\n");
1668
1669 /* Cache current top of allocs so we reserve a single block */
1670 local_alloc_top = alloc_top_high;
1671 local_alloc_bottom = local_alloc_top;
1672
1673 /* Search all nodes looking for PHBs. */
1674 for (node = 0; prom_next_node(&node); ) {
1675 compatible[0] = 0;
1676 type[0] = 0;
1677 model[0] = 0;
1678 prom_getprop(node, "compatible",
1679 compatible, sizeof(compatible));
1680 prom_getprop(node, "device_type", type, sizeof(type));
1681 prom_getprop(node, "model", model, sizeof(model));
1682
1683 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1684 continue;
1685
1686 /* Keep the old logic intact to avoid regression. */
1687 if (compatible[0] != 0) {
1688 if ((strstr(compatible, "python") == NULL) &&
1689 (strstr(compatible, "Speedwagon") == NULL) &&
1690 (strstr(compatible, "Winnipeg") == NULL))
1691 continue;
1692 } else if (model[0] != 0) {
1693 if ((strstr(model, "ython") == NULL) &&
1694 (strstr(model, "peedwagon") == NULL) &&
1695 (strstr(model, "innipeg") == NULL))
1696 continue;
1697 }
1698
1699 if (prom_getprop(node, "tce-table-minalign", &minalign,
1700 sizeof(minalign)) == PROM_ERROR)
1701 minalign = 0;
1702 if (prom_getprop(node, "tce-table-minsize", &minsize,
1703 sizeof(minsize)) == PROM_ERROR)
1704 minsize = 4UL << 20;
1705
1706 /*
1707 * Even though we read what OF wants, we just set the table
1708 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1709 * By doing this, we avoid the pitfalls of trying to DMA to
1710 * MMIO space and the DMA alias hole.
1711 *
1712 * On POWER4, firmware sets the TCE region by assuming
1713 * each TCE table is 8MB. Using this memory for anything
1714 * else will impact performance, so we always allocate 8MB.
1715 * Anton
1716 */
1717 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1718 minsize = 8UL << 20;
1719 else
1720 minsize = 4UL << 20;
1721
1722 /* Align to the greater of the align or size */
1723 align = max(minalign, minsize);
1724 base = alloc_down(minsize, align, 1);
1725 if (base == 0)
1726 prom_panic("ERROR, cannot find space for TCE table.\n");
1727 if (base < local_alloc_bottom)
1728 local_alloc_bottom = base;
1729
1730 /* It seems OF doesn't null-terminate the path :-( */
1731 memset(path, 0, PROM_SCRATCH_SIZE);
1732 /* Call OF to setup the TCE hardware */
1733 if (call_prom("package-to-path", 3, 1, node,
1734 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1735 prom_printf("package-to-path failed\n");
1736 }
1737
1738 /* Save away the TCE table attributes for later use. */
1739 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1740 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1741
1742 prom_debug("TCE table: %s\n", path);
1743 prom_debug("\tnode = 0x%x\n", node);
1744 prom_debug("\tbase = 0x%x\n", base);
1745 prom_debug("\tsize = 0x%x\n", minsize);
1746
1747 /* Initialize the table to have a one-to-one mapping
1748 * over the allocated size.
1749 */
1750 tce_entryp = (u64 *)base;
1751 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1752 tce_entry = (i << PAGE_SHIFT);
1753 tce_entry |= 0x3;
1754 *tce_entryp = tce_entry;
1755 }
1756
1757 prom_printf("opening PHB %s", path);
1758 phb_node = call_prom("open", 1, 1, path);
1759 if (phb_node == 0)
1760 prom_printf("... failed\n");
1761 else
1762 prom_printf("... done\n");
1763
1764 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1765 phb_node, -1, minsize,
1766 (u32) base, (u32) (base >> 32));
1767 call_prom("close", 1, 0, phb_node);
1768 }
1769
1770 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1771
1772 /* These are only really needed if there is a memory limit in
1773 * effect, but we don't know so export them always. */
1774 prom_tce_alloc_start = local_alloc_bottom;
1775 prom_tce_alloc_end = local_alloc_top;
1776
1777 /* Flag the first invalid entry */
1778 prom_debug("ending prom_initialize_tce_table\n");
1779 }
1780 #endif /* __BIG_ENDIAN__ */
1781 #endif /* CONFIG_PPC64 */
1782
1783 /*
1784 * With CHRP SMP we need to use the OF to start the other processors.
1785 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1786 * so we have to put the processors into a holding pattern controlled
1787 * by the kernel (not OF) before we destroy the OF.
1788 *
1789 * This uses a chunk of low memory, puts some holding pattern
1790 * code there and sends the other processors off to there until
1791 * smp_boot_cpus tells them to do something. The holding pattern
1792 * checks that address until its cpu # is there, when it is that
1793 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1794 * of setting those values.
1795 *
1796 * We also use physical address 0x4 here to tell when a cpu
1797 * is in its holding pattern code.
1798 *
1799 * -- Cort
1800 */
1801 /*
1802 * We want to reference the copy of __secondary_hold_* in the
1803 * 0 - 0x100 address range
1804 */
1805 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1806
1807 static void __init prom_hold_cpus(void)
1808 {
1809 unsigned long i;
1810 phandle node;
1811 char type[64];
1812 unsigned long *spinloop
1813 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1814 unsigned long *acknowledge
1815 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1816 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1817
1818 prom_debug("prom_hold_cpus: start...\n");
1819 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1820 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1821 prom_debug(" 1) acknowledge = 0x%x\n",
1822 (unsigned long)acknowledge);
1823 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1824 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1825
1826 /* Set the common spinloop variable, so all of the secondary cpus
1827 * will block when they are awakened from their OF spinloop.
1828 * This must occur for both SMP and non SMP kernels, since OF will
1829 * be trashed when we move the kernel.
1830 */
1831 *spinloop = 0;
1832
1833 /* look for cpus */
1834 for (node = 0; prom_next_node(&node); ) {
1835 unsigned int cpu_no;
1836 __be32 reg;
1837
1838 type[0] = 0;
1839 prom_getprop(node, "device_type", type, sizeof(type));
1840 if (strcmp(type, "cpu") != 0)
1841 continue;
1842
1843 /* Skip non-configured cpus. */
1844 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1845 if (strcmp(type, "okay") != 0)
1846 continue;
1847
1848 reg = cpu_to_be32(-1); /* make sparse happy */
1849 prom_getprop(node, "reg", &reg, sizeof(reg));
1850 cpu_no = be32_to_cpu(reg);
1851
1852 prom_debug("cpu hw idx = %lu\n", cpu_no);
1853
1854 /* Init the acknowledge var which will be reset by
1855 * the secondary cpu when it awakens from its OF
1856 * spinloop.
1857 */
1858 *acknowledge = (unsigned long)-1;
1859
1860 if (cpu_no != prom.cpu) {
1861 /* Primary Thread of non-boot cpu or any thread */
1862 prom_printf("starting cpu hw idx %lu... ", cpu_no);
1863 call_prom("start-cpu", 3, 0, node,
1864 secondary_hold, cpu_no);
1865
1866 for (i = 0; (i < 100000000) &&
1867 (*acknowledge == ((unsigned long)-1)); i++ )
1868 mb();
1869
1870 if (*acknowledge == cpu_no)
1871 prom_printf("done\n");
1872 else
1873 prom_printf("failed: %x\n", *acknowledge);
1874 }
1875 #ifdef CONFIG_SMP
1876 else
1877 prom_printf("boot cpu hw idx %lu\n", cpu_no);
1878 #endif /* CONFIG_SMP */
1879 }
1880
1881 prom_debug("prom_hold_cpus: end...\n");
1882 }
1883
1884
1885 static void __init prom_init_client_services(unsigned long pp)
1886 {
1887 /* Get a handle to the prom entry point before anything else */
1888 prom_entry = pp;
1889
1890 /* get a handle for the stdout device */
1891 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1892 if (!PHANDLE_VALID(prom.chosen))
1893 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1894
1895 /* get device tree root */
1896 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1897 if (!PHANDLE_VALID(prom.root))
1898 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1899
1900 prom.mmumap = 0;
1901 }
1902
1903 #ifdef CONFIG_PPC32
1904 /*
1905 * For really old powermacs, we need to map things we claim.
1906 * For that, we need the ihandle of the mmu.
1907 * Also, on the longtrail, we need to work around other bugs.
1908 */
1909 static void __init prom_find_mmu(void)
1910 {
1911 phandle oprom;
1912 char version[64];
1913
1914 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1915 if (!PHANDLE_VALID(oprom))
1916 return;
1917 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1918 return;
1919 version[sizeof(version) - 1] = 0;
1920 /* XXX might need to add other versions here */
1921 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1922 of_workarounds = OF_WA_CLAIM;
1923 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1924 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1925 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1926 } else
1927 return;
1928 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
1929 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
1930 sizeof(prom.mmumap));
1931 prom.mmumap = be32_to_cpu(prom.mmumap);
1932 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
1933 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1934 }
1935 #else
1936 #define prom_find_mmu()
1937 #endif
1938
1939 static void __init prom_init_stdout(void)
1940 {
1941 char *path = of_stdout_device;
1942 char type[16];
1943 phandle stdout_node;
1944 __be32 val;
1945
1946 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
1947 prom_panic("cannot find stdout");
1948
1949 prom.stdout = be32_to_cpu(val);
1950
1951 /* Get the full OF pathname of the stdout device */
1952 memset(path, 0, 256);
1953 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1954 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1955 val = cpu_to_be32(stdout_node);
1956 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1957 &val, sizeof(val));
1958 prom_printf("OF stdout device is: %s\n", of_stdout_device);
1959 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
1960 path, strlen(path) + 1);
1961
1962 /* If it's a display, note it */
1963 memset(type, 0, sizeof(type));
1964 prom_getprop(stdout_node, "device_type", type, sizeof(type));
1965 if (strcmp(type, "display") == 0)
1966 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
1967 }
1968
1969 static int __init prom_find_machine_type(void)
1970 {
1971 char compat[256];
1972 int len, i = 0;
1973 #ifdef CONFIG_PPC64
1974 phandle rtas;
1975 int x;
1976 #endif
1977
1978 /* Look for a PowerMac or a Cell */
1979 len = prom_getprop(prom.root, "compatible",
1980 compat, sizeof(compat)-1);
1981 if (len > 0) {
1982 compat[len] = 0;
1983 while (i < len) {
1984 char *p = &compat[i];
1985 int sl = strlen(p);
1986 if (sl == 0)
1987 break;
1988 if (strstr(p, "Power Macintosh") ||
1989 strstr(p, "MacRISC"))
1990 return PLATFORM_POWERMAC;
1991 #ifdef CONFIG_PPC64
1992 /* We must make sure we don't detect the IBM Cell
1993 * blades as pSeries due to some firmware issues,
1994 * so we do it here.
1995 */
1996 if (strstr(p, "IBM,CBEA") ||
1997 strstr(p, "IBM,CPBW-1.0"))
1998 return PLATFORM_GENERIC;
1999 #endif /* CONFIG_PPC64 */
2000 i += sl + 1;
2001 }
2002 }
2003 #ifdef CONFIG_PPC64
2004 /* Try to detect OPAL */
2005 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2006 return PLATFORM_OPAL;
2007
2008 /* Try to figure out if it's an IBM pSeries or any other
2009 * PAPR compliant platform. We assume it is if :
2010 * - /device_type is "chrp" (please, do NOT use that for future
2011 * non-IBM designs !
2012 * - it has /rtas
2013 */
2014 len = prom_getprop(prom.root, "device_type",
2015 compat, sizeof(compat)-1);
2016 if (len <= 0)
2017 return PLATFORM_GENERIC;
2018 if (strcmp(compat, "chrp"))
2019 return PLATFORM_GENERIC;
2020
2021 /* Default to pSeries. We need to know if we are running LPAR */
2022 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2023 if (!PHANDLE_VALID(rtas))
2024 return PLATFORM_GENERIC;
2025 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2026 if (x != PROM_ERROR) {
2027 prom_debug("Hypertas detected, assuming LPAR !\n");
2028 return PLATFORM_PSERIES_LPAR;
2029 }
2030 return PLATFORM_PSERIES;
2031 #else
2032 return PLATFORM_GENERIC;
2033 #endif
2034 }
2035
2036 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2037 {
2038 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2039 }
2040
2041 /*
2042 * If we have a display that we don't know how to drive,
2043 * we will want to try to execute OF's open method for it
2044 * later. However, OF will probably fall over if we do that
2045 * we've taken over the MMU.
2046 * So we check whether we will need to open the display,
2047 * and if so, open it now.
2048 */
2049 static void __init prom_check_displays(void)
2050 {
2051 char type[16], *path;
2052 phandle node;
2053 ihandle ih;
2054 int i;
2055
2056 static unsigned char default_colors[] = {
2057 0x00, 0x00, 0x00,
2058 0x00, 0x00, 0xaa,
2059 0x00, 0xaa, 0x00,
2060 0x00, 0xaa, 0xaa,
2061 0xaa, 0x00, 0x00,
2062 0xaa, 0x00, 0xaa,
2063 0xaa, 0xaa, 0x00,
2064 0xaa, 0xaa, 0xaa,
2065 0x55, 0x55, 0x55,
2066 0x55, 0x55, 0xff,
2067 0x55, 0xff, 0x55,
2068 0x55, 0xff, 0xff,
2069 0xff, 0x55, 0x55,
2070 0xff, 0x55, 0xff,
2071 0xff, 0xff, 0x55,
2072 0xff, 0xff, 0xff
2073 };
2074 const unsigned char *clut;
2075
2076 prom_debug("Looking for displays\n");
2077 for (node = 0; prom_next_node(&node); ) {
2078 memset(type, 0, sizeof(type));
2079 prom_getprop(node, "device_type", type, sizeof(type));
2080 if (strcmp(type, "display") != 0)
2081 continue;
2082
2083 /* It seems OF doesn't null-terminate the path :-( */
2084 path = prom_scratch;
2085 memset(path, 0, PROM_SCRATCH_SIZE);
2086
2087 /*
2088 * leave some room at the end of the path for appending extra
2089 * arguments
2090 */
2091 if (call_prom("package-to-path", 3, 1, node, path,
2092 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2093 continue;
2094 prom_printf("found display : %s, opening... ", path);
2095
2096 ih = call_prom("open", 1, 1, path);
2097 if (ih == 0) {
2098 prom_printf("failed\n");
2099 continue;
2100 }
2101
2102 /* Success */
2103 prom_printf("done\n");
2104 prom_setprop(node, path, "linux,opened", NULL, 0);
2105
2106 /* Setup a usable color table when the appropriate
2107 * method is available. Should update this to set-colors */
2108 clut = default_colors;
2109 for (i = 0; i < 16; i++, clut += 3)
2110 if (prom_set_color(ih, i, clut[0], clut[1],
2111 clut[2]) != 0)
2112 break;
2113
2114 #ifdef CONFIG_LOGO_LINUX_CLUT224
2115 clut = PTRRELOC(logo_linux_clut224.clut);
2116 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2117 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2118 clut[2]) != 0)
2119 break;
2120 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2121
2122 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2123 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2124 PROM_ERROR) {
2125 u32 width, height, pitch, addr;
2126
2127 prom_printf("Setting btext !\n");
2128 prom_getprop(node, "width", &width, 4);
2129 prom_getprop(node, "height", &height, 4);
2130 prom_getprop(node, "linebytes", &pitch, 4);
2131 prom_getprop(node, "address", &addr, 4);
2132 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2133 width, height, pitch, addr);
2134 btext_setup_display(width, height, 8, pitch, addr);
2135 }
2136 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2137 }
2138 }
2139
2140
2141 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2142 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2143 unsigned long needed, unsigned long align)
2144 {
2145 void *ret;
2146
2147 *mem_start = _ALIGN(*mem_start, align);
2148 while ((*mem_start + needed) > *mem_end) {
2149 unsigned long room, chunk;
2150
2151 prom_debug("Chunk exhausted, claiming more at %x...\n",
2152 alloc_bottom);
2153 room = alloc_top - alloc_bottom;
2154 if (room > DEVTREE_CHUNK_SIZE)
2155 room = DEVTREE_CHUNK_SIZE;
2156 if (room < PAGE_SIZE)
2157 prom_panic("No memory for flatten_device_tree "
2158 "(no room)\n");
2159 chunk = alloc_up(room, 0);
2160 if (chunk == 0)
2161 prom_panic("No memory for flatten_device_tree "
2162 "(claim failed)\n");
2163 *mem_end = chunk + room;
2164 }
2165
2166 ret = (void *)*mem_start;
2167 *mem_start += needed;
2168
2169 return ret;
2170 }
2171
2172 #define dt_push_token(token, mem_start, mem_end) do { \
2173 void *room = make_room(mem_start, mem_end, 4, 4); \
2174 *(__be32 *)room = cpu_to_be32(token); \
2175 } while(0)
2176
2177 static unsigned long __init dt_find_string(char *str)
2178 {
2179 char *s, *os;
2180
2181 s = os = (char *)dt_string_start;
2182 s += 4;
2183 while (s < (char *)dt_string_end) {
2184 if (strcmp(s, str) == 0)
2185 return s - os;
2186 s += strlen(s) + 1;
2187 }
2188 return 0;
2189 }
2190
2191 /*
2192 * The Open Firmware 1275 specification states properties must be 31 bytes or
2193 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2194 */
2195 #define MAX_PROPERTY_NAME 64
2196
2197 static void __init scan_dt_build_strings(phandle node,
2198 unsigned long *mem_start,
2199 unsigned long *mem_end)
2200 {
2201 char *prev_name, *namep, *sstart;
2202 unsigned long soff;
2203 phandle child;
2204
2205 sstart = (char *)dt_string_start;
2206
2207 /* get and store all property names */
2208 prev_name = "";
2209 for (;;) {
2210 /* 64 is max len of name including nul. */
2211 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2212 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2213 /* No more nodes: unwind alloc */
2214 *mem_start = (unsigned long)namep;
2215 break;
2216 }
2217
2218 /* skip "name" */
2219 if (strcmp(namep, "name") == 0) {
2220 *mem_start = (unsigned long)namep;
2221 prev_name = "name";
2222 continue;
2223 }
2224 /* get/create string entry */
2225 soff = dt_find_string(namep);
2226 if (soff != 0) {
2227 *mem_start = (unsigned long)namep;
2228 namep = sstart + soff;
2229 } else {
2230 /* Trim off some if we can */
2231 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2232 dt_string_end = *mem_start;
2233 }
2234 prev_name = namep;
2235 }
2236
2237 /* do all our children */
2238 child = call_prom("child", 1, 1, node);
2239 while (child != 0) {
2240 scan_dt_build_strings(child, mem_start, mem_end);
2241 child = call_prom("peer", 1, 1, child);
2242 }
2243 }
2244
2245 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2246 unsigned long *mem_end)
2247 {
2248 phandle child;
2249 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2250 unsigned long soff;
2251 unsigned char *valp;
2252 static char pname[MAX_PROPERTY_NAME];
2253 int l, room, has_phandle = 0;
2254
2255 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2256
2257 /* get the node's full name */
2258 namep = (char *)*mem_start;
2259 room = *mem_end - *mem_start;
2260 if (room > 255)
2261 room = 255;
2262 l = call_prom("package-to-path", 3, 1, node, namep, room);
2263 if (l >= 0) {
2264 /* Didn't fit? Get more room. */
2265 if (l >= room) {
2266 if (l >= *mem_end - *mem_start)
2267 namep = make_room(mem_start, mem_end, l+1, 1);
2268 call_prom("package-to-path", 3, 1, node, namep, l);
2269 }
2270 namep[l] = '\0';
2271
2272 /* Fixup an Apple bug where they have bogus \0 chars in the
2273 * middle of the path in some properties, and extract
2274 * the unit name (everything after the last '/').
2275 */
2276 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2277 if (*p == '/')
2278 lp = namep;
2279 else if (*p != 0)
2280 *lp++ = *p;
2281 }
2282 *lp = 0;
2283 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2284 }
2285
2286 /* get it again for debugging */
2287 path = prom_scratch;
2288 memset(path, 0, PROM_SCRATCH_SIZE);
2289 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2290
2291 /* get and store all properties */
2292 prev_name = "";
2293 sstart = (char *)dt_string_start;
2294 for (;;) {
2295 if (call_prom("nextprop", 3, 1, node, prev_name,
2296 pname) != 1)
2297 break;
2298
2299 /* skip "name" */
2300 if (strcmp(pname, "name") == 0) {
2301 prev_name = "name";
2302 continue;
2303 }
2304
2305 /* find string offset */
2306 soff = dt_find_string(pname);
2307 if (soff == 0) {
2308 prom_printf("WARNING: Can't find string index for"
2309 " <%s>, node %s\n", pname, path);
2310 break;
2311 }
2312 prev_name = sstart + soff;
2313
2314 /* get length */
2315 l = call_prom("getproplen", 2, 1, node, pname);
2316
2317 /* sanity checks */
2318 if (l == PROM_ERROR)
2319 continue;
2320
2321 /* push property head */
2322 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2323 dt_push_token(l, mem_start, mem_end);
2324 dt_push_token(soff, mem_start, mem_end);
2325
2326 /* push property content */
2327 valp = make_room(mem_start, mem_end, l, 4);
2328 call_prom("getprop", 4, 1, node, pname, valp, l);
2329 *mem_start = _ALIGN(*mem_start, 4);
2330
2331 if (!strcmp(pname, "phandle"))
2332 has_phandle = 1;
2333 }
2334
2335 /* Add a "linux,phandle" property if no "phandle" property already
2336 * existed (can happen with OPAL)
2337 */
2338 if (!has_phandle) {
2339 soff = dt_find_string("linux,phandle");
2340 if (soff == 0)
2341 prom_printf("WARNING: Can't find string index for"
2342 " <linux-phandle> node %s\n", path);
2343 else {
2344 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2345 dt_push_token(4, mem_start, mem_end);
2346 dt_push_token(soff, mem_start, mem_end);
2347 valp = make_room(mem_start, mem_end, 4, 4);
2348 *(__be32 *)valp = cpu_to_be32(node);
2349 }
2350 }
2351
2352 /* do all our children */
2353 child = call_prom("child", 1, 1, node);
2354 while (child != 0) {
2355 scan_dt_build_struct(child, mem_start, mem_end);
2356 child = call_prom("peer", 1, 1, child);
2357 }
2358
2359 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2360 }
2361
2362 static void __init flatten_device_tree(void)
2363 {
2364 phandle root;
2365 unsigned long mem_start, mem_end, room;
2366 struct boot_param_header *hdr;
2367 char *namep;
2368 u64 *rsvmap;
2369
2370 /*
2371 * Check how much room we have between alloc top & bottom (+/- a
2372 * few pages), crop to 1MB, as this is our "chunk" size
2373 */
2374 room = alloc_top - alloc_bottom - 0x4000;
2375 if (room > DEVTREE_CHUNK_SIZE)
2376 room = DEVTREE_CHUNK_SIZE;
2377 prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2378
2379 /* Now try to claim that */
2380 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2381 if (mem_start == 0)
2382 prom_panic("Can't allocate initial device-tree chunk\n");
2383 mem_end = mem_start + room;
2384
2385 /* Get root of tree */
2386 root = call_prom("peer", 1, 1, (phandle)0);
2387 if (root == (phandle)0)
2388 prom_panic ("couldn't get device tree root\n");
2389
2390 /* Build header and make room for mem rsv map */
2391 mem_start = _ALIGN(mem_start, 4);
2392 hdr = make_room(&mem_start, &mem_end,
2393 sizeof(struct boot_param_header), 4);
2394 dt_header_start = (unsigned long)hdr;
2395 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2396
2397 /* Start of strings */
2398 mem_start = PAGE_ALIGN(mem_start);
2399 dt_string_start = mem_start;
2400 mem_start += 4; /* hole */
2401
2402 /* Add "linux,phandle" in there, we'll need it */
2403 namep = make_room(&mem_start, &mem_end, 16, 1);
2404 strcpy(namep, "linux,phandle");
2405 mem_start = (unsigned long)namep + strlen(namep) + 1;
2406
2407 /* Build string array */
2408 prom_printf("Building dt strings...\n");
2409 scan_dt_build_strings(root, &mem_start, &mem_end);
2410 dt_string_end = mem_start;
2411
2412 /* Build structure */
2413 mem_start = PAGE_ALIGN(mem_start);
2414 dt_struct_start = mem_start;
2415 prom_printf("Building dt structure...\n");
2416 scan_dt_build_struct(root, &mem_start, &mem_end);
2417 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2418 dt_struct_end = PAGE_ALIGN(mem_start);
2419
2420 /* Finish header */
2421 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2422 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2423 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2424 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2425 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2426 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2427 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2428 hdr->version = cpu_to_be32(OF_DT_VERSION);
2429 /* Version 16 is not backward compatible */
2430 hdr->last_comp_version = cpu_to_be32(0x10);
2431
2432 /* Copy the reserve map in */
2433 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2434
2435 #ifdef DEBUG_PROM
2436 {
2437 int i;
2438 prom_printf("reserved memory map:\n");
2439 for (i = 0; i < mem_reserve_cnt; i++)
2440 prom_printf(" %x - %x\n",
2441 be64_to_cpu(mem_reserve_map[i].base),
2442 be64_to_cpu(mem_reserve_map[i].size));
2443 }
2444 #endif
2445 /* Bump mem_reserve_cnt to cause further reservations to fail
2446 * since it's too late.
2447 */
2448 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2449
2450 prom_printf("Device tree strings 0x%x -> 0x%x\n",
2451 dt_string_start, dt_string_end);
2452 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2453 dt_struct_start, dt_struct_end);
2454 }
2455
2456 #ifdef CONFIG_PPC_MAPLE
2457 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2458 * The values are bad, and it doesn't even have the right number of cells. */
2459 static void __init fixup_device_tree_maple(void)
2460 {
2461 phandle isa;
2462 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2463 u32 isa_ranges[6];
2464 char *name;
2465
2466 name = "/ht@0/isa@4";
2467 isa = call_prom("finddevice", 1, 1, ADDR(name));
2468 if (!PHANDLE_VALID(isa)) {
2469 name = "/ht@0/isa@6";
2470 isa = call_prom("finddevice", 1, 1, ADDR(name));
2471 rloc = 0x01003000; /* IO space; PCI device = 6 */
2472 }
2473 if (!PHANDLE_VALID(isa))
2474 return;
2475
2476 if (prom_getproplen(isa, "ranges") != 12)
2477 return;
2478 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2479 == PROM_ERROR)
2480 return;
2481
2482 if (isa_ranges[0] != 0x1 ||
2483 isa_ranges[1] != 0xf4000000 ||
2484 isa_ranges[2] != 0x00010000)
2485 return;
2486
2487 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2488
2489 isa_ranges[0] = 0x1;
2490 isa_ranges[1] = 0x0;
2491 isa_ranges[2] = rloc;
2492 isa_ranges[3] = 0x0;
2493 isa_ranges[4] = 0x0;
2494 isa_ranges[5] = 0x00010000;
2495 prom_setprop(isa, name, "ranges",
2496 isa_ranges, sizeof(isa_ranges));
2497 }
2498
2499 #define CPC925_MC_START 0xf8000000
2500 #define CPC925_MC_LENGTH 0x1000000
2501 /* The values for memory-controller don't have right number of cells */
2502 static void __init fixup_device_tree_maple_memory_controller(void)
2503 {
2504 phandle mc;
2505 u32 mc_reg[4];
2506 char *name = "/hostbridge@f8000000";
2507 u32 ac, sc;
2508
2509 mc = call_prom("finddevice", 1, 1, ADDR(name));
2510 if (!PHANDLE_VALID(mc))
2511 return;
2512
2513 if (prom_getproplen(mc, "reg") != 8)
2514 return;
2515
2516 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2517 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2518 if ((ac != 2) || (sc != 2))
2519 return;
2520
2521 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2522 return;
2523
2524 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2525 return;
2526
2527 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2528
2529 mc_reg[0] = 0x0;
2530 mc_reg[1] = CPC925_MC_START;
2531 mc_reg[2] = 0x0;
2532 mc_reg[3] = CPC925_MC_LENGTH;
2533 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2534 }
2535 #else
2536 #define fixup_device_tree_maple()
2537 #define fixup_device_tree_maple_memory_controller()
2538 #endif
2539
2540 #ifdef CONFIG_PPC_CHRP
2541 /*
2542 * Pegasos and BriQ lacks the "ranges" property in the isa node
2543 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2544 * Pegasos has the IDE configured in legacy mode, but advertised as native
2545 */
2546 static void __init fixup_device_tree_chrp(void)
2547 {
2548 phandle ph;
2549 u32 prop[6];
2550 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2551 char *name;
2552 int rc;
2553
2554 name = "/pci@80000000/isa@c";
2555 ph = call_prom("finddevice", 1, 1, ADDR(name));
2556 if (!PHANDLE_VALID(ph)) {
2557 name = "/pci@ff500000/isa@6";
2558 ph = call_prom("finddevice", 1, 1, ADDR(name));
2559 rloc = 0x01003000; /* IO space; PCI device = 6 */
2560 }
2561 if (PHANDLE_VALID(ph)) {
2562 rc = prom_getproplen(ph, "ranges");
2563 if (rc == 0 || rc == PROM_ERROR) {
2564 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2565
2566 prop[0] = 0x1;
2567 prop[1] = 0x0;
2568 prop[2] = rloc;
2569 prop[3] = 0x0;
2570 prop[4] = 0x0;
2571 prop[5] = 0x00010000;
2572 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2573 }
2574 }
2575
2576 name = "/pci@80000000/ide@C,1";
2577 ph = call_prom("finddevice", 1, 1, ADDR(name));
2578 if (PHANDLE_VALID(ph)) {
2579 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2580 prop[0] = 14;
2581 prop[1] = 0x0;
2582 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2583 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2584 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2585 if (rc == sizeof(u32)) {
2586 prop[0] &= ~0x5;
2587 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2588 }
2589 }
2590 }
2591 #else
2592 #define fixup_device_tree_chrp()
2593 #endif
2594
2595 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2596 static void __init fixup_device_tree_pmac(void)
2597 {
2598 phandle u3, i2c, mpic;
2599 u32 u3_rev;
2600 u32 interrupts[2];
2601 u32 parent;
2602
2603 /* Some G5s have a missing interrupt definition, fix it up here */
2604 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2605 if (!PHANDLE_VALID(u3))
2606 return;
2607 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2608 if (!PHANDLE_VALID(i2c))
2609 return;
2610 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2611 if (!PHANDLE_VALID(mpic))
2612 return;
2613
2614 /* check if proper rev of u3 */
2615 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2616 == PROM_ERROR)
2617 return;
2618 if (u3_rev < 0x35 || u3_rev > 0x39)
2619 return;
2620 /* does it need fixup ? */
2621 if (prom_getproplen(i2c, "interrupts") > 0)
2622 return;
2623
2624 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2625
2626 /* interrupt on this revision of u3 is number 0 and level */
2627 interrupts[0] = 0;
2628 interrupts[1] = 1;
2629 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2630 &interrupts, sizeof(interrupts));
2631 parent = (u32)mpic;
2632 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2633 &parent, sizeof(parent));
2634 }
2635 #else
2636 #define fixup_device_tree_pmac()
2637 #endif
2638
2639 #ifdef CONFIG_PPC_EFIKA
2640 /*
2641 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2642 * to talk to the phy. If the phy-handle property is missing, then this
2643 * function is called to add the appropriate nodes and link it to the
2644 * ethernet node.
2645 */
2646 static void __init fixup_device_tree_efika_add_phy(void)
2647 {
2648 u32 node;
2649 char prop[64];
2650 int rv;
2651
2652 /* Check if /builtin/ethernet exists - bail if it doesn't */
2653 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2654 if (!PHANDLE_VALID(node))
2655 return;
2656
2657 /* Check if the phy-handle property exists - bail if it does */
2658 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2659 if (!rv)
2660 return;
2661
2662 /*
2663 * At this point the ethernet device doesn't have a phy described.
2664 * Now we need to add the missing phy node and linkage
2665 */
2666
2667 /* Check for an MDIO bus node - if missing then create one */
2668 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2669 if (!PHANDLE_VALID(node)) {
2670 prom_printf("Adding Ethernet MDIO node\n");
2671 call_prom("interpret", 1, 1,
2672 " s\" /builtin\" find-device"
2673 " new-device"
2674 " 1 encode-int s\" #address-cells\" property"
2675 " 0 encode-int s\" #size-cells\" property"
2676 " s\" mdio\" device-name"
2677 " s\" fsl,mpc5200b-mdio\" encode-string"
2678 " s\" compatible\" property"
2679 " 0xf0003000 0x400 reg"
2680 " 0x2 encode-int"
2681 " 0x5 encode-int encode+"
2682 " 0x3 encode-int encode+"
2683 " s\" interrupts\" property"
2684 " finish-device");
2685 };
2686
2687 /* Check for a PHY device node - if missing then create one and
2688 * give it's phandle to the ethernet node */
2689 node = call_prom("finddevice", 1, 1,
2690 ADDR("/builtin/mdio/ethernet-phy"));
2691 if (!PHANDLE_VALID(node)) {
2692 prom_printf("Adding Ethernet PHY node\n");
2693 call_prom("interpret", 1, 1,
2694 " s\" /builtin/mdio\" find-device"
2695 " new-device"
2696 " s\" ethernet-phy\" device-name"
2697 " 0x10 encode-int s\" reg\" property"
2698 " my-self"
2699 " ihandle>phandle"
2700 " finish-device"
2701 " s\" /builtin/ethernet\" find-device"
2702 " encode-int"
2703 " s\" phy-handle\" property"
2704 " device-end");
2705 }
2706 }
2707
2708 static void __init fixup_device_tree_efika(void)
2709 {
2710 int sound_irq[3] = { 2, 2, 0 };
2711 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2712 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2713 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2714 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2715 u32 node;
2716 char prop[64];
2717 int rv, len;
2718
2719 /* Check if we're really running on a EFIKA */
2720 node = call_prom("finddevice", 1, 1, ADDR("/"));
2721 if (!PHANDLE_VALID(node))
2722 return;
2723
2724 rv = prom_getprop(node, "model", prop, sizeof(prop));
2725 if (rv == PROM_ERROR)
2726 return;
2727 if (strcmp(prop, "EFIKA5K2"))
2728 return;
2729
2730 prom_printf("Applying EFIKA device tree fixups\n");
2731
2732 /* Claiming to be 'chrp' is death */
2733 node = call_prom("finddevice", 1, 1, ADDR("/"));
2734 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2735 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2736 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2737
2738 /* CODEGEN,description is exposed in /proc/cpuinfo so
2739 fix that too */
2740 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2741 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2742 prom_setprop(node, "/", "CODEGEN,description",
2743 "Efika 5200B PowerPC System",
2744 sizeof("Efika 5200B PowerPC System"));
2745
2746 /* Fixup bestcomm interrupts property */
2747 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2748 if (PHANDLE_VALID(node)) {
2749 len = prom_getproplen(node, "interrupts");
2750 if (len == 12) {
2751 prom_printf("Fixing bestcomm interrupts property\n");
2752 prom_setprop(node, "/builtin/bestcom", "interrupts",
2753 bcomm_irq, sizeof(bcomm_irq));
2754 }
2755 }
2756
2757 /* Fixup sound interrupts property */
2758 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2759 if (PHANDLE_VALID(node)) {
2760 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2761 if (rv == PROM_ERROR) {
2762 prom_printf("Adding sound interrupts property\n");
2763 prom_setprop(node, "/builtin/sound", "interrupts",
2764 sound_irq, sizeof(sound_irq));
2765 }
2766 }
2767
2768 /* Make sure ethernet phy-handle property exists */
2769 fixup_device_tree_efika_add_phy();
2770 }
2771 #else
2772 #define fixup_device_tree_efika()
2773 #endif
2774
2775 static void __init fixup_device_tree(void)
2776 {
2777 fixup_device_tree_maple();
2778 fixup_device_tree_maple_memory_controller();
2779 fixup_device_tree_chrp();
2780 fixup_device_tree_pmac();
2781 fixup_device_tree_efika();
2782 }
2783
2784 static void __init prom_find_boot_cpu(void)
2785 {
2786 __be32 rval;
2787 ihandle prom_cpu;
2788 phandle cpu_pkg;
2789
2790 rval = 0;
2791 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2792 return;
2793 prom_cpu = be32_to_cpu(rval);
2794
2795 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2796
2797 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2798 prom.cpu = be32_to_cpu(rval);
2799
2800 prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2801 }
2802
2803 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2804 {
2805 #ifdef CONFIG_BLK_DEV_INITRD
2806 if (r3 && r4 && r4 != 0xdeadbeef) {
2807 __be64 val;
2808
2809 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2810 prom_initrd_end = prom_initrd_start + r4;
2811
2812 val = cpu_to_be64(prom_initrd_start);
2813 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2814 &val, sizeof(val));
2815 val = cpu_to_be64(prom_initrd_end);
2816 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2817 &val, sizeof(val));
2818
2819 reserve_mem(prom_initrd_start,
2820 prom_initrd_end - prom_initrd_start);
2821
2822 prom_debug("initrd_start=0x%x\n", prom_initrd_start);
2823 prom_debug("initrd_end=0x%x\n", prom_initrd_end);
2824 }
2825 #endif /* CONFIG_BLK_DEV_INITRD */
2826 }
2827
2828 #ifdef CONFIG_PPC64
2829 #ifdef CONFIG_RELOCATABLE
2830 static void reloc_toc(void)
2831 {
2832 }
2833
2834 static void unreloc_toc(void)
2835 {
2836 }
2837 #else
2838 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2839 {
2840 unsigned long i;
2841 unsigned long *toc_entry;
2842
2843 /* Get the start of the TOC by using r2 directly. */
2844 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2845
2846 for (i = 0; i < nr_entries; i++) {
2847 *toc_entry = *toc_entry + offset;
2848 toc_entry++;
2849 }
2850 }
2851
2852 static void reloc_toc(void)
2853 {
2854 unsigned long offset = reloc_offset();
2855 unsigned long nr_entries =
2856 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2857
2858 __reloc_toc(offset, nr_entries);
2859
2860 mb();
2861 }
2862
2863 static void unreloc_toc(void)
2864 {
2865 unsigned long offset = reloc_offset();
2866 unsigned long nr_entries =
2867 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2868
2869 mb();
2870
2871 __reloc_toc(-offset, nr_entries);
2872 }
2873 #endif
2874 #endif
2875
2876 /*
2877 * We enter here early on, when the Open Firmware prom is still
2878 * handling exceptions and the MMU hash table for us.
2879 */
2880
2881 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2882 unsigned long pp,
2883 unsigned long r6, unsigned long r7,
2884 unsigned long kbase)
2885 {
2886 unsigned long hdr;
2887
2888 #ifdef CONFIG_PPC32
2889 unsigned long offset = reloc_offset();
2890 reloc_got2(offset);
2891 #else
2892 reloc_toc();
2893 #endif
2894
2895 /*
2896 * First zero the BSS
2897 */
2898 memset(&__bss_start, 0, __bss_stop - __bss_start);
2899
2900 /*
2901 * Init interface to Open Firmware, get some node references,
2902 * like /chosen
2903 */
2904 prom_init_client_services(pp);
2905
2906 /*
2907 * See if this OF is old enough that we need to do explicit maps
2908 * and other workarounds
2909 */
2910 prom_find_mmu();
2911
2912 /*
2913 * Init prom stdout device
2914 */
2915 prom_init_stdout();
2916
2917 prom_printf("Preparing to boot %s", linux_banner);
2918
2919 /*
2920 * Get default machine type. At this point, we do not differentiate
2921 * between pSeries SMP and pSeries LPAR
2922 */
2923 of_platform = prom_find_machine_type();
2924 prom_printf("Detected machine type: %x\n", of_platform);
2925
2926 #ifndef CONFIG_NONSTATIC_KERNEL
2927 /* Bail if this is a kdump kernel. */
2928 if (PHYSICAL_START > 0)
2929 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2930 #endif
2931
2932 /*
2933 * Check for an initrd
2934 */
2935 prom_check_initrd(r3, r4);
2936
2937 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2938 /*
2939 * On pSeries, inform the firmware about our capabilities
2940 */
2941 if (of_platform == PLATFORM_PSERIES ||
2942 of_platform == PLATFORM_PSERIES_LPAR)
2943 prom_send_capabilities();
2944 #endif
2945
2946 /*
2947 * Copy the CPU hold code
2948 */
2949 if (of_platform != PLATFORM_POWERMAC)
2950 copy_and_flush(0, kbase, 0x100, 0);
2951
2952 /*
2953 * Do early parsing of command line
2954 */
2955 early_cmdline_parse();
2956
2957 /*
2958 * Initialize memory management within prom_init
2959 */
2960 prom_init_mem();
2961
2962 /*
2963 * Determine which cpu is actually running right _now_
2964 */
2965 prom_find_boot_cpu();
2966
2967 /*
2968 * Initialize display devices
2969 */
2970 prom_check_displays();
2971
2972 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
2973 /*
2974 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2975 * that uses the allocator, we need to make sure we get the top of memory
2976 * available for us here...
2977 */
2978 if (of_platform == PLATFORM_PSERIES)
2979 prom_initialize_tce_table();
2980 #endif
2981
2982 /*
2983 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
2984 * have a usable RTAS implementation.
2985 */
2986 if (of_platform != PLATFORM_POWERMAC &&
2987 of_platform != PLATFORM_OPAL)
2988 prom_instantiate_rtas();
2989
2990 #ifdef CONFIG_PPC_POWERNV
2991 #ifdef __BIG_ENDIAN__
2992 /* Detect HAL and try instanciating it & doing takeover */
2993 if (of_platform == PLATFORM_PSERIES_LPAR) {
2994 prom_query_opal();
2995 if (of_platform == PLATFORM_OPAL) {
2996 prom_opal_hold_cpus();
2997 prom_opal_takeover();
2998 }
2999 } else
3000 #endif /* __BIG_ENDIAN__ */
3001 if (of_platform == PLATFORM_OPAL)
3002 prom_instantiate_opal();
3003 #endif /* CONFIG_PPC_POWERNV */
3004
3005 #ifdef CONFIG_PPC64
3006 /* instantiate sml */
3007 prom_instantiate_sml();
3008 #endif
3009
3010 /*
3011 * On non-powermacs, put all CPUs in spin-loops.
3012 *
3013 * PowerMacs use a different mechanism to spin CPUs
3014 */
3015 if (of_platform != PLATFORM_POWERMAC &&
3016 of_platform != PLATFORM_OPAL)
3017 prom_hold_cpus();
3018
3019 /*
3020 * Fill in some infos for use by the kernel later on
3021 */
3022 if (prom_memory_limit) {
3023 __be64 val = cpu_to_be64(prom_memory_limit);
3024 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3025 &val, sizeof(val));
3026 }
3027 #ifdef CONFIG_PPC64
3028 if (prom_iommu_off)
3029 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3030 NULL, 0);
3031
3032 if (prom_iommu_force_on)
3033 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3034 NULL, 0);
3035
3036 if (prom_tce_alloc_start) {
3037 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3038 &prom_tce_alloc_start,
3039 sizeof(prom_tce_alloc_start));
3040 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3041 &prom_tce_alloc_end,
3042 sizeof(prom_tce_alloc_end));
3043 }
3044 #endif
3045
3046 /*
3047 * Fixup any known bugs in the device-tree
3048 */
3049 fixup_device_tree();
3050
3051 /*
3052 * Now finally create the flattened device-tree
3053 */
3054 prom_printf("copying OF device tree...\n");
3055 flatten_device_tree();
3056
3057 /*
3058 * in case stdin is USB and still active on IBM machines...
3059 * Unfortunately quiesce crashes on some powermacs if we have
3060 * closed stdin already (in particular the powerbook 101). It
3061 * appears that the OPAL version of OFW doesn't like it either.
3062 */
3063 if (of_platform != PLATFORM_POWERMAC &&
3064 of_platform != PLATFORM_OPAL)
3065 prom_close_stdin();
3066
3067 /*
3068 * Call OF "quiesce" method to shut down pending DMA's from
3069 * devices etc...
3070 */
3071 prom_printf("Calling quiesce...\n");
3072 call_prom("quiesce", 0, 0);
3073
3074 /*
3075 * And finally, call the kernel passing it the flattened device
3076 * tree and NULL as r5, thus triggering the new entry point which
3077 * is common to us and kexec
3078 */
3079 hdr = dt_header_start;
3080
3081 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3082 if (of_platform != PLATFORM_OPAL) {
3083 prom_printf("returning from prom_init\n");
3084 prom_debug("->dt_header_start=0x%x\n", hdr);
3085 }
3086
3087 #ifdef CONFIG_PPC32
3088 reloc_got2(-offset);
3089 #else
3090 unreloc_toc();
3091 #endif
3092
3093 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3094 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3095 __start(hdr, kbase, 0, 0, 0,
3096 prom_opal_base, prom_opal_entry);
3097 #else
3098 __start(hdr, kbase, 0, 0, 0, 0, 0);
3099 #endif
3100
3101 return 0;
3102 }
This page took 0.098916 seconds and 5 git commands to generate.