1 /*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #undef DEBUG_PROM
17
18 /* we cannot use FORTIFY as it brings in new symbols */
19 #define __NO_FORTIFY
20
21 #include <stdarg.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/threads.h>
26 #include <linux/spinlock.h>
27 #include <linux/types.h>
28 #include <linux/pci.h>
29 #include <linux/proc_fs.h>
30 #include <linux/delay.h>
31 #include <linux/initrd.h>
32 #include <linux/bitops.h>
33 #include <asm/prom.h>
34 #include <asm/rtas.h>
35 #include <asm/page.h>
36 #include <asm/processor.h>
37 #include <asm/irq.h>
38 #include <asm/io.h>
39 #include <asm/smp.h>
40 #include <asm/mmu.h>
41 #include <asm/pgtable.h>
42 #include <asm/iommu.h>
43 #include <asm/btext.h>
44 #include <asm/sections.h>
45 #include <asm/machdep.h>
46 #include <asm/opal.h>
47 #include <asm/asm-prototypes.h>
48
49 #include <linux/linux_logo.h>
50
51 /*
52 * Eventually bump that one up
53 */
54 #define DEVTREE_CHUNK_SIZE 0x100000
55
56 /*
57 * This is the size of the local memory reserve map that gets copied
58 * into the boot params passed to the kernel. That size is totally
59 * flexible as the kernel just reads the list until it encounters an
60 * entry with size 0, so it can be changed without breaking binary
61 * compatibility
62 */
63 #define MEM_RESERVE_MAP_SIZE 8
64
65 /*
66 * prom_init() is called very early on, before the kernel text
67 * and data have been mapped to KERNELBASE. At this point the code
68 * is running at whatever address it has been loaded at.
69 * On ppc32 we compile with -mrelocatable, which means that references
70 * to extern and static variables get relocated automatically.
71 * ppc64 objects are always relocatable, we just need to relocate the
72 * TOC.
73 *
74 * Because OF may have mapped I/O devices into the area starting at
75 * KERNELBASE, particularly on CHRP machines, we can't safely call
76 * OF once the kernel has been mapped to KERNELBASE. Therefore all
77 * OF calls must be done within prom_init().
78 *
79 * ADDR is used in calls to call_prom. The 4th and following
80 * arguments to call_prom should be 32-bit values.
81 * On ppc64, 64 bit values are truncated to 32 bits (and
82 * fortunately don't get interpreted as two arguments).
83 */
84 #define ADDR(x) (u32)(unsigned long)(x)
85
86 #ifdef CONFIG_PPC64
87 #define OF_WORKAROUNDS 0
88 #else
89 #define OF_WORKAROUNDS of_workarounds
90 int of_workarounds;
91 #endif
92
93 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
94 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
95
96 #define PROM_BUG() do { \
97 prom_printf("kernel BUG at %s line 0x%x!\n", \
98 __FILE__, __LINE__); \
99 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
100 } while (0)
101
102 #ifdef DEBUG_PROM
103 #define prom_debug(x...) prom_printf(x)
104 #else
105 #define prom_debug(x...) do { } while (0)
106 #endif
107
108
109 typedef u32 prom_arg_t;
110
111 struct prom_args {
112 __be32 service;
113 __be32 nargs;
114 __be32 nret;
115 __be32 args[10];
116 };
117
118 struct prom_t {
119 ihandle root;
120 phandle chosen;
121 int cpu;
122 ihandle stdout;
123 ihandle mmumap;
124 ihandle memory;
125 };
126
127 struct mem_map_entry {
128 __be64 base;
129 __be64 size;
130 };
131
132 typedef __be32 cell_t;
133
134 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
135 unsigned long r6, unsigned long r7, unsigned long r8,
136 unsigned long r9);
137
138 #ifdef CONFIG_PPC64
139 extern int enter_prom(struct prom_args *args, unsigned long entry);
140 #else
enter_prom(struct prom_args * args,unsigned long entry)141 static inline int enter_prom(struct prom_args *args, unsigned long entry)
142 {
143 return ((int (*)(struct prom_args *))entry)(args);
144 }
145 #endif
146
147 extern void copy_and_flush(unsigned long dest, unsigned long src,
148 unsigned long size, unsigned long offset);
149
150 /* prom structure */
151 static struct prom_t __initdata prom;
152
153 static unsigned long prom_entry __initdata;
154
155 #define PROM_SCRATCH_SIZE 256
156
157 static char __initdata of_stdout_device[256];
158 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
159
160 static unsigned long __initdata dt_header_start;
161 static unsigned long __initdata dt_struct_start, dt_struct_end;
162 static unsigned long __initdata dt_string_start, dt_string_end;
163
164 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
165
166 #ifdef CONFIG_PPC64
167 static int __initdata prom_iommu_force_on;
168 static int __initdata prom_iommu_off;
169 static unsigned long __initdata prom_tce_alloc_start;
170 static unsigned long __initdata prom_tce_alloc_end;
171 #endif
172
173 static bool prom_radix_disable __initdata = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
174
175 struct platform_support {
176 bool hash_mmu;
177 bool radix_mmu;
178 bool radix_gtse;
179 bool xive;
180 };
181
182 /* Platforms codes are now obsolete in the kernel. Now only used within this
183 * file and ultimately gone too. Feel free to change them if you need, they
184 * are not shared with anything outside of this file anymore
185 */
186 #define PLATFORM_PSERIES 0x0100
187 #define PLATFORM_PSERIES_LPAR 0x0101
188 #define PLATFORM_LPAR 0x0001
189 #define PLATFORM_POWERMAC 0x0400
190 #define PLATFORM_GENERIC 0x0500
191 #define PLATFORM_OPAL 0x0600
192
193 static int __initdata of_platform;
194
195 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
196
197 static unsigned long __initdata prom_memory_limit;
198
199 static unsigned long __initdata alloc_top;
200 static unsigned long __initdata alloc_top_high;
201 static unsigned long __initdata alloc_bottom;
202 static unsigned long __initdata rmo_top;
203 static unsigned long __initdata ram_top;
204
205 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
206 static int __initdata mem_reserve_cnt;
207
208 static cell_t __initdata regbuf[1024];
209
210 static bool rtas_has_query_cpu_stopped;
211
212
213 /*
214 * Error results ... some OF calls will return "-1" on error, some
215 * will return 0, some will return either. To simplify, here are
216 * macros to use with any ihandle or phandle return value to check if
217 * it is valid
218 */
219
220 #define PROM_ERROR (-1u)
221 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
222 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
223
224
225 /* This is the one and *ONLY* place where we actually call open
226 * firmware.
227 */
228
call_prom(const char * service,int nargs,int nret,...)229 static int __init call_prom(const char *service, int nargs, int nret, ...)
230 {
231 int i;
232 struct prom_args args;
233 va_list list;
234
235 args.service = cpu_to_be32(ADDR(service));
236 args.nargs = cpu_to_be32(nargs);
237 args.nret = cpu_to_be32(nret);
238
239 va_start(list, nret);
240 for (i = 0; i < nargs; i++)
241 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
242 va_end(list);
243
244 for (i = 0; i < nret; i++)
245 args.args[nargs+i] = 0;
246
247 if (enter_prom(&args, prom_entry) < 0)
248 return PROM_ERROR;
249
250 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
251 }
252
call_prom_ret(const char * service,int nargs,int nret,prom_arg_t * rets,...)253 static int __init call_prom_ret(const char *service, int nargs, int nret,
254 prom_arg_t *rets, ...)
255 {
256 int i;
257 struct prom_args args;
258 va_list list;
259
260 args.service = cpu_to_be32(ADDR(service));
261 args.nargs = cpu_to_be32(nargs);
262 args.nret = cpu_to_be32(nret);
263
264 va_start(list, rets);
265 for (i = 0; i < nargs; i++)
266 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
267 va_end(list);
268
269 for (i = 0; i < nret; i++)
270 args.args[nargs+i] = 0;
271
272 if (enter_prom(&args, prom_entry) < 0)
273 return PROM_ERROR;
274
275 if (rets != NULL)
276 for (i = 1; i < nret; ++i)
277 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
278
279 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
280 }
281
282
prom_print(const char * msg)283 static void __init prom_print(const char *msg)
284 {
285 const char *p, *q;
286
287 if (prom.stdout == 0)
288 return;
289
290 for (p = msg; *p != 0; p = q) {
291 for (q = p; *q != 0 && *q != '\n'; ++q)
292 ;
293 if (q > p)
294 call_prom("write", 3, 1, prom.stdout, p, q - p);
295 if (*q == 0)
296 break;
297 ++q;
298 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
299 }
300 }
301
302
303 /*
304 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
305 * we do not need __udivdi3 or __umoddi3 on 32bits.
306 */
prom_print_hex(unsigned long val)307 static void __init prom_print_hex(unsigned long val)
308 {
309 int i, nibbles = sizeof(val)*2;
310 char buf[sizeof(val)*2+1];
311
312 for (i = nibbles-1; i >= 0; i--) {
313 buf[i] = (val & 0xf) + '0';
314 if (buf[i] > '9')
315 buf[i] += ('a'-'0'-10);
316 val >>= 4;
317 }
318 buf[nibbles] = '\0';
319 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
320 }
321
322 /* max number of decimal digits in an unsigned long */
323 #define UL_DIGITS 21
prom_print_dec(unsigned long val)324 static void __init prom_print_dec(unsigned long val)
325 {
326 int i, size;
327 char buf[UL_DIGITS+1];
328
329 for (i = UL_DIGITS-1; i >= 0; i--) {
330 buf[i] = (val % 10) + '0';
331 val = val/10;
332 if (val == 0)
333 break;
334 }
335 /* shift stuff down */
336 size = UL_DIGITS - i;
337 call_prom("write", 3, 1, prom.stdout, buf+i, size);
338 }
339
340 __printf(1, 2)
prom_printf(const char * format,...)341 static void __init prom_printf(const char *format, ...)
342 {
343 const char *p, *q, *s;
344 va_list args;
345 unsigned long v;
346 long vs;
347 int n = 0;
348
349 va_start(args, format);
350 for (p = format; *p != 0; p = q) {
351 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
352 ;
353 if (q > p)
354 call_prom("write", 3, 1, prom.stdout, p, q - p);
355 if (*q == 0)
356 break;
357 if (*q == '\n') {
358 ++q;
359 call_prom("write", 3, 1, prom.stdout,
360 ADDR("\r\n"), 2);
361 continue;
362 }
363 ++q;
364 if (*q == 0)
365 break;
366 while (*q == 'l') {
367 ++q;
368 ++n;
369 }
370 switch (*q) {
371 case 's':
372 ++q;
373 s = va_arg(args, const char *);
374 prom_print(s);
375 break;
376 case 'x':
377 ++q;
378 switch (n) {
379 case 0:
380 v = va_arg(args, unsigned int);
381 break;
382 case 1:
383 v = va_arg(args, unsigned long);
384 break;
385 case 2:
386 default:
387 v = va_arg(args, unsigned long long);
388 break;
389 }
390 prom_print_hex(v);
391 break;
392 case 'u':
393 ++q;
394 switch (n) {
395 case 0:
396 v = va_arg(args, unsigned int);
397 break;
398 case 1:
399 v = va_arg(args, unsigned long);
400 break;
401 case 2:
402 default:
403 v = va_arg(args, unsigned long long);
404 break;
405 }
406 prom_print_dec(v);
407 break;
408 case 'd':
409 ++q;
410 switch (n) {
411 case 0:
412 vs = va_arg(args, int);
413 break;
414 case 1:
415 vs = va_arg(args, long);
416 break;
417 case 2:
418 default:
419 vs = va_arg(args, long long);
420 break;
421 }
422 if (vs < 0) {
423 prom_print("-");
424 vs = -vs;
425 }
426 prom_print_dec(vs);
427 break;
428 }
429 }
430 va_end(args);
431 }
432
433
prom_claim(unsigned long virt,unsigned long size,unsigned long align)434 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
435 unsigned long align)
436 {
437
438 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
439 /*
440 * Old OF requires we claim physical and virtual separately
441 * and then map explicitly (assuming virtual mode)
442 */
443 int ret;
444 prom_arg_t result;
445
446 ret = call_prom_ret("call-method", 5, 2, &result,
447 ADDR("claim"), prom.memory,
448 align, size, virt);
449 if (ret != 0 || result == -1)
450 return -1;
451 ret = call_prom_ret("call-method", 5, 2, &result,
452 ADDR("claim"), prom.mmumap,
453 align, size, virt);
454 if (ret != 0) {
455 call_prom("call-method", 4, 1, ADDR("release"),
456 prom.memory, size, virt);
457 return -1;
458 }
459 /* the 0x12 is M (coherence) + PP == read/write */
460 call_prom("call-method", 6, 1,
461 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
462 return virt;
463 }
464 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
465 (prom_arg_t)align);
466 }
467
prom_panic(const char * reason)468 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
469 {
470 prom_print(reason);
471 /* Do not call exit because it clears the screen on pmac
472 * it also causes some sort of double-fault on early pmacs */
473 if (of_platform == PLATFORM_POWERMAC)
474 asm("trap\n");
475
476 /* ToDo: should put up an SRC here on pSeries */
477 call_prom("exit", 0, 0);
478
479 for (;;) /* should never get here */
480 ;
481 }
482
483
prom_next_node(phandle * nodep)484 static int __init prom_next_node(phandle *nodep)
485 {
486 phandle node;
487
488 if ((node = *nodep) != 0
489 && (*nodep = call_prom("child", 1, 1, node)) != 0)
490 return 1;
491 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
492 return 1;
493 for (;;) {
494 if ((node = call_prom("parent", 1, 1, node)) == 0)
495 return 0;
496 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
497 return 1;
498 }
499 }
500
prom_getprop(phandle node,const char * pname,void * value,size_t valuelen)501 static inline int prom_getprop(phandle node, const char *pname,
502 void *value, size_t valuelen)
503 {
504 return call_prom("getprop", 4, 1, node, ADDR(pname),
505 (u32)(unsigned long) value, (u32) valuelen);
506 }
507
prom_getproplen(phandle node,const char * pname)508 static inline int prom_getproplen(phandle node, const char *pname)
509 {
510 return call_prom("getproplen", 2, 1, node, ADDR(pname));
511 }
512
add_string(char ** str,const char * q)513 static void add_string(char **str, const char *q)
514 {
515 char *p = *str;
516
517 while (*q)
518 *p++ = *q++;
519 *p++ = ' ';
520 *str = p;
521 }
522
tohex(unsigned int x)523 static char *tohex(unsigned int x)
524 {
525 static char digits[] = "0123456789abcdef";
526 static char result[9];
527 int i;
528
529 result[8] = 0;
530 i = 8;
531 do {
532 --i;
533 result[i] = digits[x & 0xf];
534 x >>= 4;
535 } while (x != 0 && i > 0);
536 return &result[i];
537 }
538
prom_setprop(phandle node,const char * nodename,const char * pname,void * value,size_t valuelen)539 static int __init prom_setprop(phandle node, const char *nodename,
540 const char *pname, void *value, size_t valuelen)
541 {
542 char cmd[256], *p;
543
544 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
545 return call_prom("setprop", 4, 1, node, ADDR(pname),
546 (u32)(unsigned long) value, (u32) valuelen);
547
548 /* gah... setprop doesn't work on longtrail, have to use interpret */
549 p = cmd;
550 add_string(&p, "dev");
551 add_string(&p, nodename);
552 add_string(&p, tohex((u32)(unsigned long) value));
553 add_string(&p, tohex(valuelen));
554 add_string(&p, tohex(ADDR(pname)));
555 add_string(&p, tohex(strlen(pname)));
556 add_string(&p, "property");
557 *p = 0;
558 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
559 }
560
561 /* We can't use the standard versions because of relocation headaches. */
562 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
563 || ('a' <= (c) && (c) <= 'f') \
564 || ('A' <= (c) && (c) <= 'F'))
565
566 #define isdigit(c) ('0' <= (c) && (c) <= '9')
567 #define islower(c) ('a' <= (c) && (c) <= 'z')
568 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
569
prom_strtoul(const char * cp,const char ** endp)570 static unsigned long prom_strtoul(const char *cp, const char **endp)
571 {
572 unsigned long result = 0, base = 10, value;
573
574 if (*cp == '0') {
575 base = 8;
576 cp++;
577 if (toupper(*cp) == 'X') {
578 cp++;
579 base = 16;
580 }
581 }
582
583 while (isxdigit(*cp) &&
584 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
585 result = result * base + value;
586 cp++;
587 }
588
589 if (endp)
590 *endp = cp;
591
592 return result;
593 }
594
prom_memparse(const char * ptr,const char ** retptr)595 static unsigned long prom_memparse(const char *ptr, const char **retptr)
596 {
597 unsigned long ret = prom_strtoul(ptr, retptr);
598 int shift = 0;
599
600 /*
601 * We can't use a switch here because GCC *may* generate a
602 * jump table which won't work, because we're not running at
603 * the address we're linked at.
604 */
605 if ('G' == **retptr || 'g' == **retptr)
606 shift = 30;
607
608 if ('M' == **retptr || 'm' == **retptr)
609 shift = 20;
610
611 if ('K' == **retptr || 'k' == **retptr)
612 shift = 10;
613
614 if (shift) {
615 ret <<= shift;
616 (*retptr)++;
617 }
618
619 return ret;
620 }
621
622 /*
623 * Early parsing of the command line passed to the kernel, used for
624 * "mem=x" and the options that affect the iommu
625 */
early_cmdline_parse(void)626 static void __init early_cmdline_parse(void)
627 {
628 const char *opt;
629
630 char *p;
631 int l __maybe_unused = 0;
632
633 prom_cmd_line[0] = 0;
634 p = prom_cmd_line;
635 if ((long)prom.chosen > 0)
636 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
637 #ifdef CONFIG_CMDLINE
638 if (l <= 0 || p[0] == '\0') /* dbl check */
639 strlcpy(prom_cmd_line,
640 CONFIG_CMDLINE, sizeof(prom_cmd_line));
641 #endif /* CONFIG_CMDLINE */
642 prom_printf("command line: %s\n", prom_cmd_line);
643
644 #ifdef CONFIG_PPC64
645 opt = strstr(prom_cmd_line, "iommu=");
646 if (opt) {
647 prom_printf("iommu opt is: %s\n", opt);
648 opt += 6;
649 while (*opt && *opt == ' ')
650 opt++;
651 if (!strncmp(opt, "off", 3))
652 prom_iommu_off = 1;
653 else if (!strncmp(opt, "force", 5))
654 prom_iommu_force_on = 1;
655 }
656 #endif
657 opt = strstr(prom_cmd_line, "mem=");
658 if (opt) {
659 opt += 4;
660 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
661 #ifdef CONFIG_PPC64
662 /* Align to 16 MB == size of ppc64 large page */
663 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
664 #endif
665 }
666
667 opt = strstr(prom_cmd_line, "disable_radix");
668 if (opt) {
669 opt += 13;
670 if (*opt && *opt == '=') {
671 bool val;
672
673 if (kstrtobool(++opt, &val))
674 prom_radix_disable = false;
675 else
676 prom_radix_disable = val;
677 } else
678 prom_radix_disable = true;
679 }
680 if (prom_radix_disable)
681 prom_debug("Radix disabled from cmdline\n");
682 }
683
684 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
685 /*
686 * The architecture vector has an array of PVR mask/value pairs,
687 * followed by # option vectors - 1, followed by the option vectors.
688 *
689 * See prom.h for the definition of the bits specified in the
690 * architecture vector.
691 */
692
693 /* Firmware expects the value to be n - 1, where n is the # of vectors */
694 #define NUM_VECTORS(n) ((n) - 1)
695
696 /*
697 * Firmware expects 1 + n - 2, where n is the length of the option vector in
698 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
699 */
700 #define VECTOR_LENGTH(n) (1 + (n) - 2)
701
702 struct option_vector1 {
703 u8 byte1;
704 u8 arch_versions;
705 u8 arch_versions3;
706 } __packed;
707
708 struct option_vector2 {
709 u8 byte1;
710 __be16 reserved;
711 __be32 real_base;
712 __be32 real_size;
713 __be32 virt_base;
714 __be32 virt_size;
715 __be32 load_base;
716 __be32 min_rma;
717 __be32 min_load;
718 u8 min_rma_percent;
719 u8 max_pft_size;
720 } __packed;
721
722 struct option_vector3 {
723 u8 byte1;
724 u8 byte2;
725 } __packed;
726
727 struct option_vector4 {
728 u8 byte1;
729 u8 min_vp_cap;
730 } __packed;
731
732 struct option_vector5 {
733 u8 byte1;
734 u8 byte2;
735 u8 byte3;
736 u8 cmo;
737 u8 associativity;
738 u8 bin_opts;
739 u8 micro_checkpoint;
740 u8 reserved0;
741 __be32 max_cpus;
742 __be16 papr_level;
743 __be16 reserved1;
744 u8 platform_facilities;
745 u8 reserved2;
746 __be16 reserved3;
747 u8 subprocessors;
748 u8 byte22;
749 u8 intarch;
750 u8 mmu;
751 u8 hash_ext;
752 u8 radix_ext;
753 } __packed;
754
755 struct option_vector6 {
756 u8 reserved;
757 u8 secondary_pteg;
758 u8 os_name;
759 } __packed;
760
761 struct ibm_arch_vec {
762 struct { u32 mask, val; } pvrs[12];
763
764 u8 num_vectors;
765
766 u8 vec1_len;
767 struct option_vector1 vec1;
768
769 u8 vec2_len;
770 struct option_vector2 vec2;
771
772 u8 vec3_len;
773 struct option_vector3 vec3;
774
775 u8 vec4_len;
776 struct option_vector4 vec4;
777
778 u8 vec5_len;
779 struct option_vector5 vec5;
780
781 u8 vec6_len;
782 struct option_vector6 vec6;
783 } __packed;
784
785 struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
786 .pvrs = {
787 {
788 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
789 .val = cpu_to_be32(0x003a0000),
790 },
791 {
792 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
793 .val = cpu_to_be32(0x003e0000),
794 },
795 {
796 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
797 .val = cpu_to_be32(0x003f0000),
798 },
799 {
800 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
801 .val = cpu_to_be32(0x004b0000),
802 },
803 {
804 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
805 .val = cpu_to_be32(0x004c0000),
806 },
807 {
808 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
809 .val = cpu_to_be32(0x004d0000),
810 },
811 {
812 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
813 .val = cpu_to_be32(0x004e0000),
814 },
815 {
816 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
817 .val = cpu_to_be32(0x0f000005),
818 },
819 {
820 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
821 .val = cpu_to_be32(0x0f000004),
822 },
823 {
824 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
825 .val = cpu_to_be32(0x0f000003),
826 },
827 {
828 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
829 .val = cpu_to_be32(0x0f000002),
830 },
831 {
832 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
833 .val = cpu_to_be32(0x0f000001),
834 },
835 },
836
837 .num_vectors = NUM_VECTORS(6),
838
839 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
840 .vec1 = {
841 .byte1 = 0,
842 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
843 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
844 .arch_versions3 = OV1_PPC_3_00,
845 },
846
847 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
848 /* option vector 2: Open Firmware options supported */
849 .vec2 = {
850 .byte1 = OV2_REAL_MODE,
851 .reserved = 0,
852 .real_base = cpu_to_be32(0xffffffff),
853 .real_size = cpu_to_be32(0xffffffff),
854 .virt_base = cpu_to_be32(0xffffffff),
855 .virt_size = cpu_to_be32(0xffffffff),
856 .load_base = cpu_to_be32(0xffffffff),
857 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
858 .min_load = cpu_to_be32(0xffffffff), /* full client load */
859 .min_rma_percent = 0, /* min RMA percentage of total RAM */
860 .max_pft_size = 48, /* max log_2(hash table size) */
861 },
862
863 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
864 /* option vector 3: processor options supported */
865 .vec3 = {
866 .byte1 = 0, /* don't ignore, don't halt */
867 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
868 },
869
870 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
871 /* option vector 4: IBM PAPR implementation */
872 .vec4 = {
873 .byte1 = 0, /* don't halt */
874 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
875 },
876
877 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
878 /* option vector 5: PAPR/OF options */
879 .vec5 = {
880 .byte1 = 0, /* don't ignore, don't halt */
881 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
882 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
883 #ifdef CONFIG_PCI_MSI
884 /* PCIe/MSI support. Without MSI full PCIe is not supported */
885 OV5_FEAT(OV5_MSI),
886 #else
887 0,
888 #endif
889 .byte3 = 0,
890 .cmo =
891 #ifdef CONFIG_PPC_SMLPAR
892 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
893 #else
894 0,
895 #endif
896 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
897 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
898 .micro_checkpoint = 0,
899 .reserved0 = 0,
900 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
901 .papr_level = 0,
902 .reserved1 = 0,
903 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
904 .reserved2 = 0,
905 .reserved3 = 0,
906 .subprocessors = 1,
907 .byte22 = OV5_FEAT(OV5_DRMEM_V2),
908 .intarch = 0,
909 .mmu = 0,
910 .hash_ext = 0,
911 .radix_ext = 0,
912 },
913
914 /* option vector 6: IBM PAPR hints */
915 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
916 .vec6 = {
917 .reserved = 0,
918 .secondary_pteg = 0,
919 .os_name = OV6_LINUX,
920 },
921 };
922
923 /* Old method - ELF header with PT_NOTE sections only works on BE */
924 #ifdef __BIG_ENDIAN__
925 static struct fake_elf {
926 Elf32_Ehdr elfhdr;
927 Elf32_Phdr phdr[2];
928 struct chrpnote {
929 u32 namesz;
930 u32 descsz;
931 u32 type;
932 char name[8]; /* "PowerPC" */
933 struct chrpdesc {
934 u32 real_mode;
935 u32 real_base;
936 u32 real_size;
937 u32 virt_base;
938 u32 virt_size;
939 u32 load_base;
940 } chrpdesc;
941 } chrpnote;
942 struct rpanote {
943 u32 namesz;
944 u32 descsz;
945 u32 type;
946 char name[24]; /* "IBM,RPA-Client-Config" */
947 struct rpadesc {
948 u32 lpar_affinity;
949 u32 min_rmo_size;
950 u32 min_rmo_percent;
951 u32 max_pft_size;
952 u32 splpar;
953 u32 min_load;
954 u32 new_mem_def;
955 u32 ignore_me;
956 } rpadesc;
957 } rpanote;
958 } fake_elf = {
959 .elfhdr = {
960 .e_ident = { 0x7f, 'E', 'L', 'F',
961 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
962 .e_type = ET_EXEC, /* yeah right */
963 .e_machine = EM_PPC,
964 .e_version = EV_CURRENT,
965 .e_phoff = offsetof(struct fake_elf, phdr),
966 .e_phentsize = sizeof(Elf32_Phdr),
967 .e_phnum = 2
968 },
969 .phdr = {
970 [0] = {
971 .p_type = PT_NOTE,
972 .p_offset = offsetof(struct fake_elf, chrpnote),
973 .p_filesz = sizeof(struct chrpnote)
974 }, [1] = {
975 .p_type = PT_NOTE,
976 .p_offset = offsetof(struct fake_elf, rpanote),
977 .p_filesz = sizeof(struct rpanote)
978 }
979 },
980 .chrpnote = {
981 .namesz = sizeof("PowerPC"),
982 .descsz = sizeof(struct chrpdesc),
983 .type = 0x1275,
984 .name = "PowerPC",
985 .chrpdesc = {
986 .real_mode = ~0U, /* ~0 means "don't care" */
987 .real_base = ~0U,
988 .real_size = ~0U,
989 .virt_base = ~0U,
990 .virt_size = ~0U,
991 .load_base = ~0U
992 },
993 },
994 .rpanote = {
995 .namesz = sizeof("IBM,RPA-Client-Config"),
996 .descsz = sizeof(struct rpadesc),
997 .type = 0x12759999,
998 .name = "IBM,RPA-Client-Config",
999 .rpadesc = {
1000 .lpar_affinity = 0,
1001 .min_rmo_size = 64, /* in megabytes */
1002 .min_rmo_percent = 0,
1003 .max_pft_size = 48, /* 2^48 bytes max PFT size */
1004 .splpar = 1,
1005 .min_load = ~0U,
1006 .new_mem_def = 0
1007 }
1008 }
1009 };
1010 #endif /* __BIG_ENDIAN__ */
1011
prom_count_smt_threads(void)1012 static int __init prom_count_smt_threads(void)
1013 {
1014 phandle node;
1015 char type[64];
1016 unsigned int plen;
1017
1018 /* Pick up th first CPU node we can find */
1019 for (node = 0; prom_next_node(&node); ) {
1020 type[0] = 0;
1021 prom_getprop(node, "device_type", type, sizeof(type));
1022
1023 if (strcmp(type, "cpu"))
1024 continue;
1025 /*
1026 * There is an entry for each smt thread, each entry being
1027 * 4 bytes long. All cpus should have the same number of
1028 * smt threads, so return after finding the first.
1029 */
1030 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1031 if (plen == PROM_ERROR)
1032 break;
1033 plen >>= 2;
1034 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1035
1036 /* Sanity check */
1037 if (plen < 1 || plen > 64) {
1038 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1039 (unsigned long)plen);
1040 return 1;
1041 }
1042 return plen;
1043 }
1044 prom_debug("No threads found, assuming 1 per core\n");
1045
1046 return 1;
1047
1048 }
1049
prom_parse_mmu_model(u8 val,struct platform_support * support)1050 static void __init prom_parse_mmu_model(u8 val,
1051 struct platform_support *support)
1052 {
1053 switch (val) {
1054 case OV5_FEAT(OV5_MMU_DYNAMIC):
1055 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1056 prom_debug("MMU - either supported\n");
1057 support->radix_mmu = !prom_radix_disable;
1058 support->hash_mmu = true;
1059 break;
1060 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1061 prom_debug("MMU - radix only\n");
1062 if (prom_radix_disable) {
1063 /*
1064 * If we __have__ to do radix, we're better off ignoring
1065 * the command line rather than not booting.
1066 */
1067 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1068 }
1069 support->radix_mmu = true;
1070 break;
1071 case OV5_FEAT(OV5_MMU_HASH):
1072 prom_debug("MMU - hash only\n");
1073 support->hash_mmu = true;
1074 break;
1075 default:
1076 prom_debug("Unknown mmu support option: 0x%x\n", val);
1077 break;
1078 }
1079 }
1080
prom_parse_xive_model(u8 val,struct platform_support * support)1081 static void __init prom_parse_xive_model(u8 val,
1082 struct platform_support *support)
1083 {
1084 switch (val) {
1085 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1086 prom_debug("XIVE - either mode supported\n");
1087 support->xive = true;
1088 break;
1089 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1090 prom_debug("XIVE - exploitation mode supported\n");
1091 support->xive = true;
1092 break;
1093 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1094 prom_debug("XIVE - legacy mode supported\n");
1095 break;
1096 default:
1097 prom_debug("Unknown xive support option: 0x%x\n", val);
1098 break;
1099 }
1100 }
1101
prom_parse_platform_support(u8 index,u8 val,struct platform_support * support)1102 static void __init prom_parse_platform_support(u8 index, u8 val,
1103 struct platform_support *support)
1104 {
1105 switch (index) {
1106 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1107 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1108 break;
1109 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1110 if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
1111 prom_debug("Radix - GTSE supported\n");
1112 support->radix_gtse = true;
1113 }
1114 break;
1115 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1116 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1117 support);
1118 break;
1119 }
1120 }
1121
prom_check_platform_support(void)1122 static void __init prom_check_platform_support(void)
1123 {
1124 struct platform_support supported = {
1125 .hash_mmu = false,
1126 .radix_mmu = false,
1127 .radix_gtse = false,
1128 .xive = false
1129 };
1130 int prop_len = prom_getproplen(prom.chosen,
1131 "ibm,arch-vec-5-platform-support");
1132 if (prop_len > 1) {
1133 int i;
1134 u8 vec[prop_len];
1135 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1136 prop_len);
1137 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1138 &vec, sizeof(vec));
1139 for (i = 0; i < prop_len; i += 2) {
1140 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1141 , vec[i]
1142 , vec[i + 1]);
1143 prom_parse_platform_support(vec[i], vec[i + 1],
1144 &supported);
1145 }
1146 }
1147
1148 if (supported.radix_mmu && supported.radix_gtse &&
1149 IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1150 /* Radix preferred - but we require GTSE for now */
1151 prom_debug("Asking for radix with GTSE\n");
1152 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1153 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
1154 } else if (supported.hash_mmu) {
1155 /* Default to hash mmu (if we can) */
1156 prom_debug("Asking for hash\n");
1157 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1158 } else {
1159 /* We're probably on a legacy hypervisor */
1160 prom_debug("Assuming legacy hash support\n");
1161 }
1162
1163 if (supported.xive) {
1164 prom_debug("Asking for XIVE\n");
1165 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1166 }
1167 }
1168
prom_send_capabilities(void)1169 static void __init prom_send_capabilities(void)
1170 {
1171 ihandle root;
1172 prom_arg_t ret;
1173 u32 cores;
1174
1175 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1176 prom_check_platform_support();
1177
1178 root = call_prom("open", 1, 1, ADDR("/"));
1179 if (root != 0) {
1180 /* We need to tell the FW about the number of cores we support.
1181 *
1182 * To do that, we count the number of threads on the first core
1183 * (we assume this is the same for all cores) and use it to
1184 * divide NR_CPUS.
1185 */
1186
1187 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1188 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1189 cores, NR_CPUS);
1190
1191 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1192
1193 /* try calling the ibm,client-architecture-support method */
1194 prom_printf("Calling ibm,client-architecture-support...");
1195 if (call_prom_ret("call-method", 3, 2, &ret,
1196 ADDR("ibm,client-architecture-support"),
1197 root,
1198 ADDR(&ibm_architecture_vec)) == 0) {
1199 /* the call exists... */
1200 if (ret)
1201 prom_printf("\nWARNING: ibm,client-architecture"
1202 "-support call FAILED!\n");
1203 call_prom("close", 1, 0, root);
1204 prom_printf(" done\n");
1205 return;
1206 }
1207 call_prom("close", 1, 0, root);
1208 prom_printf(" not implemented\n");
1209 }
1210
1211 #ifdef __BIG_ENDIAN__
1212 {
1213 ihandle elfloader;
1214
1215 /* no ibm,client-architecture-support call, try the old way */
1216 elfloader = call_prom("open", 1, 1,
1217 ADDR("/packages/elf-loader"));
1218 if (elfloader == 0) {
1219 prom_printf("couldn't open /packages/elf-loader\n");
1220 return;
1221 }
1222 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1223 elfloader, ADDR(&fake_elf));
1224 call_prom("close", 1, 0, elfloader);
1225 }
1226 #endif /* __BIG_ENDIAN__ */
1227 }
1228 #endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1229
1230 /*
1231 * Memory allocation strategy... our layout is normally:
1232 *
1233 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1234 * rare cases, initrd might end up being before the kernel though.
1235 * We assume this won't override the final kernel at 0, we have no
1236 * provision to handle that in this version, but it should hopefully
1237 * never happen.
1238 *
1239 * alloc_top is set to the top of RMO, eventually shrink down if the
1240 * TCEs overlap
1241 *
1242 * alloc_bottom is set to the top of kernel/initrd
1243 *
1244 * from there, allocations are done this way : rtas is allocated
1245 * topmost, and the device-tree is allocated from the bottom. We try
1246 * to grow the device-tree allocation as we progress. If we can't,
1247 * then we fail, we don't currently have a facility to restart
1248 * elsewhere, but that shouldn't be necessary.
1249 *
1250 * Note that calls to reserve_mem have to be done explicitly, memory
1251 * allocated with either alloc_up or alloc_down isn't automatically
1252 * reserved.
1253 */
1254
1255
1256 /*
1257 * Allocates memory in the RMO upward from the kernel/initrd
1258 *
1259 * When align is 0, this is a special case, it means to allocate in place
1260 * at the current location of alloc_bottom or fail (that is basically
1261 * extending the previous allocation). Used for the device-tree flattening
1262 */
alloc_up(unsigned long size,unsigned long align)1263 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1264 {
1265 unsigned long base = alloc_bottom;
1266 unsigned long addr = 0;
1267
1268 if (align)
1269 base = _ALIGN_UP(base, align);
1270 prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1271 if (ram_top == 0)
1272 prom_panic("alloc_up() called with mem not initialized\n");
1273
1274 if (align)
1275 base = _ALIGN_UP(alloc_bottom, align);
1276 else
1277 base = alloc_bottom;
1278
1279 for(; (base + size) <= alloc_top;
1280 base = _ALIGN_UP(base + 0x100000, align)) {
1281 prom_debug(" trying: 0x%lx\n\r", base);
1282 addr = (unsigned long)prom_claim(base, size, 0);
1283 if (addr != PROM_ERROR && addr != 0)
1284 break;
1285 addr = 0;
1286 if (align == 0)
1287 break;
1288 }
1289 if (addr == 0)
1290 return 0;
1291 alloc_bottom = addr + size;
1292
1293 prom_debug(" -> %lx\n", addr);
1294 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1295 prom_debug(" alloc_top : %lx\n", alloc_top);
1296 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1297 prom_debug(" rmo_top : %lx\n", rmo_top);
1298 prom_debug(" ram_top : %lx\n", ram_top);
1299
1300 return addr;
1301 }
1302
1303 /*
1304 * Allocates memory downward, either from top of RMO, or if highmem
1305 * is set, from the top of RAM. Note that this one doesn't handle
1306 * failures. It does claim memory if highmem is not set.
1307 */
alloc_down(unsigned long size,unsigned long align,int highmem)1308 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1309 int highmem)
1310 {
1311 unsigned long base, addr = 0;
1312
1313 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1314 highmem ? "(high)" : "(low)");
1315 if (ram_top == 0)
1316 prom_panic("alloc_down() called with mem not initialized\n");
1317
1318 if (highmem) {
1319 /* Carve out storage for the TCE table. */
1320 addr = _ALIGN_DOWN(alloc_top_high - size, align);
1321 if (addr <= alloc_bottom)
1322 return 0;
1323 /* Will we bump into the RMO ? If yes, check out that we
1324 * didn't overlap existing allocations there, if we did,
1325 * we are dead, we must be the first in town !
1326 */
1327 if (addr < rmo_top) {
1328 /* Good, we are first */
1329 if (alloc_top == rmo_top)
1330 alloc_top = rmo_top = addr;
1331 else
1332 return 0;
1333 }
1334 alloc_top_high = addr;
1335 goto bail;
1336 }
1337
1338 base = _ALIGN_DOWN(alloc_top - size, align);
1339 for (; base > alloc_bottom;
1340 base = _ALIGN_DOWN(base - 0x100000, align)) {
1341 prom_debug(" trying: 0x%lx\n\r", base);
1342 addr = (unsigned long)prom_claim(base, size, 0);
1343 if (addr != PROM_ERROR && addr != 0)
1344 break;
1345 addr = 0;
1346 }
1347 if (addr == 0)
1348 return 0;
1349 alloc_top = addr;
1350
1351 bail:
1352 prom_debug(" -> %lx\n", addr);
1353 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1354 prom_debug(" alloc_top : %lx\n", alloc_top);
1355 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1356 prom_debug(" rmo_top : %lx\n", rmo_top);
1357 prom_debug(" ram_top : %lx\n", ram_top);
1358
1359 return addr;
1360 }
1361
1362 /*
1363 * Parse a "reg" cell
1364 */
prom_next_cell(int s,cell_t ** cellp)1365 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1366 {
1367 cell_t *p = *cellp;
1368 unsigned long r = 0;
1369
1370 /* Ignore more than 2 cells */
1371 while (s > sizeof(unsigned long) / 4) {
1372 p++;
1373 s--;
1374 }
1375 r = be32_to_cpu(*p++);
1376 #ifdef CONFIG_PPC64
1377 if (s > 1) {
1378 r <<= 32;
1379 r |= be32_to_cpu(*(p++));
1380 }
1381 #endif
1382 *cellp = p;
1383 return r;
1384 }
1385
1386 /*
1387 * Very dumb function for adding to the memory reserve list, but
1388 * we don't need anything smarter at this point
1389 *
1390 * XXX Eventually check for collisions. They should NEVER happen.
1391 * If problems seem to show up, it would be a good start to track
1392 * them down.
1393 */
reserve_mem(u64 base,u64 size)1394 static void __init reserve_mem(u64 base, u64 size)
1395 {
1396 u64 top = base + size;
1397 unsigned long cnt = mem_reserve_cnt;
1398
1399 if (size == 0)
1400 return;
1401
1402 /* We need to always keep one empty entry so that we
1403 * have our terminator with "size" set to 0 since we are
1404 * dumb and just copy this entire array to the boot params
1405 */
1406 base = _ALIGN_DOWN(base, PAGE_SIZE);
1407 top = _ALIGN_UP(top, PAGE_SIZE);
1408 size = top - base;
1409
1410 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1411 prom_panic("Memory reserve map exhausted !\n");
1412 mem_reserve_map[cnt].base = cpu_to_be64(base);
1413 mem_reserve_map[cnt].size = cpu_to_be64(size);
1414 mem_reserve_cnt = cnt + 1;
1415 }
1416
1417 /*
1418 * Initialize memory allocation mechanism, parse "memory" nodes and
1419 * obtain that way the top of memory and RMO to setup out local allocator
1420 */
prom_init_mem(void)1421 static void __init prom_init_mem(void)
1422 {
1423 phandle node;
1424 #ifdef DEBUG_PROM
1425 char *path;
1426 #endif
1427 char type[64];
1428 unsigned int plen;
1429 cell_t *p, *endp;
1430 __be32 val;
1431 u32 rac, rsc;
1432
1433 /*
1434 * We iterate the memory nodes to find
1435 * 1) top of RMO (first node)
1436 * 2) top of memory
1437 */
1438 val = cpu_to_be32(2);
1439 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1440 rac = be32_to_cpu(val);
1441 val = cpu_to_be32(1);
1442 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1443 rsc = be32_to_cpu(val);
1444 prom_debug("root_addr_cells: %x\n", rac);
1445 prom_debug("root_size_cells: %x\n", rsc);
1446
1447 prom_debug("scanning memory:\n");
1448 #ifdef DEBUG_PROM
1449 path = prom_scratch;
1450 #endif
1451
1452 for (node = 0; prom_next_node(&node); ) {
1453 type[0] = 0;
1454 prom_getprop(node, "device_type", type, sizeof(type));
1455
1456 if (type[0] == 0) {
1457 /*
1458 * CHRP Longtrail machines have no device_type
1459 * on the memory node, so check the name instead...
1460 */
1461 prom_getprop(node, "name", type, sizeof(type));
1462 }
1463 if (strcmp(type, "memory"))
1464 continue;
1465
1466 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1467 if (plen > sizeof(regbuf)) {
1468 prom_printf("memory node too large for buffer !\n");
1469 plen = sizeof(regbuf);
1470 }
1471 p = regbuf;
1472 endp = p + (plen / sizeof(cell_t));
1473
1474 #ifdef DEBUG_PROM
1475 memset(path, 0, PROM_SCRATCH_SIZE);
1476 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1477 prom_debug(" node %s :\n", path);
1478 #endif /* DEBUG_PROM */
1479
1480 while ((endp - p) >= (rac + rsc)) {
1481 unsigned long base, size;
1482
1483 base = prom_next_cell(rac, &p);
1484 size = prom_next_cell(rsc, &p);
1485
1486 if (size == 0)
1487 continue;
1488 prom_debug(" %lx %lx\n", base, size);
1489 if (base == 0 && (of_platform & PLATFORM_LPAR))
1490 rmo_top = size;
1491 if ((base + size) > ram_top)
1492 ram_top = base + size;
1493 }
1494 }
1495
1496 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1497
1498 /*
1499 * If prom_memory_limit is set we reduce the upper limits *except* for
1500 * alloc_top_high. This must be the real top of RAM so we can put
1501 * TCE's up there.
1502 */
1503
1504 alloc_top_high = ram_top;
1505
1506 if (prom_memory_limit) {
1507 if (prom_memory_limit <= alloc_bottom) {
1508 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1509 prom_memory_limit);
1510 prom_memory_limit = 0;
1511 } else if (prom_memory_limit >= ram_top) {
1512 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1513 prom_memory_limit);
1514 prom_memory_limit = 0;
1515 } else {
1516 ram_top = prom_memory_limit;
1517 rmo_top = min(rmo_top, prom_memory_limit);
1518 }
1519 }
1520
1521 /*
1522 * Setup our top alloc point, that is top of RMO or top of
1523 * segment 0 when running non-LPAR.
1524 * Some RS64 machines have buggy firmware where claims up at
1525 * 1GB fail. Cap at 768MB as a workaround.
1526 * Since 768MB is plenty of room, and we need to cap to something
1527 * reasonable on 32-bit, cap at 768MB on all machines.
1528 */
1529 if (!rmo_top)
1530 rmo_top = ram_top;
1531 rmo_top = min(0x30000000ul, rmo_top);
1532 alloc_top = rmo_top;
1533 alloc_top_high = ram_top;
1534
1535 /*
1536 * Check if we have an initrd after the kernel but still inside
1537 * the RMO. If we do move our bottom point to after it.
1538 */
1539 if (prom_initrd_start &&
1540 prom_initrd_start < rmo_top &&
1541 prom_initrd_end > alloc_bottom)
1542 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1543
1544 prom_printf("memory layout at init:\n");
1545 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1546 prom_memory_limit);
1547 prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
1548 prom_printf(" alloc_top : %lx\n", alloc_top);
1549 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
1550 prom_printf(" rmo_top : %lx\n", rmo_top);
1551 prom_printf(" ram_top : %lx\n", ram_top);
1552 }
1553
prom_close_stdin(void)1554 static void __init prom_close_stdin(void)
1555 {
1556 __be32 val;
1557 ihandle stdin;
1558
1559 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1560 stdin = be32_to_cpu(val);
1561 call_prom("close", 1, 0, stdin);
1562 }
1563 }
1564
1565 #ifdef CONFIG_PPC_POWERNV
1566
1567 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1568 static u64 __initdata prom_opal_base;
1569 static u64 __initdata prom_opal_entry;
1570 #endif
1571
1572 /*
1573 * Allocate room for and instantiate OPAL
1574 */
prom_instantiate_opal(void)1575 static void __init prom_instantiate_opal(void)
1576 {
1577 phandle opal_node;
1578 ihandle opal_inst;
1579 u64 base, entry;
1580 u64 size = 0, align = 0x10000;
1581 __be64 val64;
1582 u32 rets[2];
1583
1584 prom_debug("prom_instantiate_opal: start...\n");
1585
1586 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1587 prom_debug("opal_node: %x\n", opal_node);
1588 if (!PHANDLE_VALID(opal_node))
1589 return;
1590
1591 val64 = 0;
1592 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1593 size = be64_to_cpu(val64);
1594 if (size == 0)
1595 return;
1596 val64 = 0;
1597 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1598 align = be64_to_cpu(val64);
1599
1600 base = alloc_down(size, align, 0);
1601 if (base == 0) {
1602 prom_printf("OPAL allocation failed !\n");
1603 return;
1604 }
1605
1606 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1607 if (!IHANDLE_VALID(opal_inst)) {
1608 prom_printf("opening opal package failed (%x)\n", opal_inst);
1609 return;
1610 }
1611
1612 prom_printf("instantiating opal at 0x%llx...", base);
1613
1614 if (call_prom_ret("call-method", 4, 3, rets,
1615 ADDR("load-opal-runtime"),
1616 opal_inst,
1617 base >> 32, base & 0xffffffff) != 0
1618 || (rets[0] == 0 && rets[1] == 0)) {
1619 prom_printf(" failed\n");
1620 return;
1621 }
1622 entry = (((u64)rets[0]) << 32) | rets[1];
1623
1624 prom_printf(" done\n");
1625
1626 reserve_mem(base, size);
1627
1628 prom_debug("opal base = 0x%llx\n", base);
1629 prom_debug("opal align = 0x%llx\n", align);
1630 prom_debug("opal entry = 0x%llx\n", entry);
1631 prom_debug("opal size = 0x%llx\n", size);
1632
1633 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1634 &base, sizeof(base));
1635 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1636 &entry, sizeof(entry));
1637
1638 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1639 prom_opal_base = base;
1640 prom_opal_entry = entry;
1641 #endif
1642 prom_debug("prom_instantiate_opal: end...\n");
1643 }
1644
1645 #endif /* CONFIG_PPC_POWERNV */
1646
1647 /*
1648 * Allocate room for and instantiate RTAS
1649 */
prom_instantiate_rtas(void)1650 static void __init prom_instantiate_rtas(void)
1651 {
1652 phandle rtas_node;
1653 ihandle rtas_inst;
1654 u32 base, entry = 0;
1655 __be32 val;
1656 u32 size = 0;
1657
1658 prom_debug("prom_instantiate_rtas: start...\n");
1659
1660 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1661 prom_debug("rtas_node: %x\n", rtas_node);
1662 if (!PHANDLE_VALID(rtas_node))
1663 return;
1664
1665 val = 0;
1666 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1667 size = be32_to_cpu(val);
1668 if (size == 0)
1669 return;
1670
1671 base = alloc_down(size, PAGE_SIZE, 0);
1672 if (base == 0)
1673 prom_panic("Could not allocate memory for RTAS\n");
1674
1675 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1676 if (!IHANDLE_VALID(rtas_inst)) {
1677 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1678 return;
1679 }
1680
1681 prom_printf("instantiating rtas at 0x%x...", base);
1682
1683 if (call_prom_ret("call-method", 3, 2, &entry,
1684 ADDR("instantiate-rtas"),
1685 rtas_inst, base) != 0
1686 || entry == 0) {
1687 prom_printf(" failed\n");
1688 return;
1689 }
1690 prom_printf(" done\n");
1691
1692 reserve_mem(base, size);
1693
1694 val = cpu_to_be32(base);
1695 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1696 &val, sizeof(val));
1697 val = cpu_to_be32(entry);
1698 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1699 &val, sizeof(val));
1700
1701 /* Check if it supports "query-cpu-stopped-state" */
1702 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1703 &val, sizeof(val)) != PROM_ERROR)
1704 rtas_has_query_cpu_stopped = true;
1705
1706 prom_debug("rtas base = 0x%x\n", base);
1707 prom_debug("rtas entry = 0x%x\n", entry);
1708 prom_debug("rtas size = 0x%x\n", size);
1709
1710 prom_debug("prom_instantiate_rtas: end...\n");
1711 }
1712
1713 #ifdef CONFIG_PPC64
1714 /*
1715 * Allocate room for and instantiate Stored Measurement Log (SML)
1716 */
prom_instantiate_sml(void)1717 static void __init prom_instantiate_sml(void)
1718 {
1719 phandle ibmvtpm_node;
1720 ihandle ibmvtpm_inst;
1721 u32 entry = 0, size = 0, succ = 0;
1722 u64 base;
1723 __be32 val;
1724
1725 prom_debug("prom_instantiate_sml: start...\n");
1726
1727 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1728 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1729 if (!PHANDLE_VALID(ibmvtpm_node))
1730 return;
1731
1732 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1733 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1734 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1735 return;
1736 }
1737
1738 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1739 &val, sizeof(val)) != PROM_ERROR) {
1740 if (call_prom_ret("call-method", 2, 2, &succ,
1741 ADDR("reformat-sml-to-efi-alignment"),
1742 ibmvtpm_inst) != 0 || succ == 0) {
1743 prom_printf("Reformat SML to EFI alignment failed\n");
1744 return;
1745 }
1746
1747 if (call_prom_ret("call-method", 2, 2, &size,
1748 ADDR("sml-get-allocated-size"),
1749 ibmvtpm_inst) != 0 || size == 0) {
1750 prom_printf("SML get allocated size failed\n");
1751 return;
1752 }
1753 } else {
1754 if (call_prom_ret("call-method", 2, 2, &size,
1755 ADDR("sml-get-handover-size"),
1756 ibmvtpm_inst) != 0 || size == 0) {
1757 prom_printf("SML get handover size failed\n");
1758 return;
1759 }
1760 }
1761
1762 base = alloc_down(size, PAGE_SIZE, 0);
1763 if (base == 0)
1764 prom_panic("Could not allocate memory for sml\n");
1765
1766 prom_printf("instantiating sml at 0x%llx...", base);
1767
1768 memset((void *)base, 0, size);
1769
1770 if (call_prom_ret("call-method", 4, 2, &entry,
1771 ADDR("sml-handover"),
1772 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1773 prom_printf("SML handover failed\n");
1774 return;
1775 }
1776 prom_printf(" done\n");
1777
1778 reserve_mem(base, size);
1779
1780 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1781 &base, sizeof(base));
1782 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1783 &size, sizeof(size));
1784
1785 prom_debug("sml base = 0x%llx\n", base);
1786 prom_debug("sml size = 0x%x\n", size);
1787
1788 prom_debug("prom_instantiate_sml: end...\n");
1789 }
1790
1791 /*
1792 * Allocate room for and initialize TCE tables
1793 */
1794 #ifdef __BIG_ENDIAN__
prom_initialize_tce_table(void)1795 static void __init prom_initialize_tce_table(void)
1796 {
1797 phandle node;
1798 ihandle phb_node;
1799 char compatible[64], type[64], model[64];
1800 char *path = prom_scratch;
1801 u64 base, align;
1802 u32 minalign, minsize;
1803 u64 tce_entry, *tce_entryp;
1804 u64 local_alloc_top, local_alloc_bottom;
1805 u64 i;
1806
1807 if (prom_iommu_off)
1808 return;
1809
1810 prom_debug("starting prom_initialize_tce_table\n");
1811
1812 /* Cache current top of allocs so we reserve a single block */
1813 local_alloc_top = alloc_top_high;
1814 local_alloc_bottom = local_alloc_top;
1815
1816 /* Search all nodes looking for PHBs. */
1817 for (node = 0; prom_next_node(&node); ) {
1818 compatible[0] = 0;
1819 type[0] = 0;
1820 model[0] = 0;
1821 prom_getprop(node, "compatible",
1822 compatible, sizeof(compatible));
1823 prom_getprop(node, "device_type", type, sizeof(type));
1824 prom_getprop(node, "model", model, sizeof(model));
1825
1826 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1827 continue;
1828
1829 /* Keep the old logic intact to avoid regression. */
1830 if (compatible[0] != 0) {
1831 if ((strstr(compatible, "python") == NULL) &&
1832 (strstr(compatible, "Speedwagon") == NULL) &&
1833 (strstr(compatible, "Winnipeg") == NULL))
1834 continue;
1835 } else if (model[0] != 0) {
1836 if ((strstr(model, "ython") == NULL) &&
1837 (strstr(model, "peedwagon") == NULL) &&
1838 (strstr(model, "innipeg") == NULL))
1839 continue;
1840 }
1841
1842 if (prom_getprop(node, "tce-table-minalign", &minalign,
1843 sizeof(minalign)) == PROM_ERROR)
1844 minalign = 0;
1845 if (prom_getprop(node, "tce-table-minsize", &minsize,
1846 sizeof(minsize)) == PROM_ERROR)
1847 minsize = 4UL << 20;
1848
1849 /*
1850 * Even though we read what OF wants, we just set the table
1851 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1852 * By doing this, we avoid the pitfalls of trying to DMA to
1853 * MMIO space and the DMA alias hole.
1854 */
1855 minsize = 4UL << 20;
1856
1857 /* Align to the greater of the align or size */
1858 align = max(minalign, minsize);
1859 base = alloc_down(minsize, align, 1);
1860 if (base == 0)
1861 prom_panic("ERROR, cannot find space for TCE table.\n");
1862 if (base < local_alloc_bottom)
1863 local_alloc_bottom = base;
1864
1865 /* It seems OF doesn't null-terminate the path :-( */
1866 memset(path, 0, PROM_SCRATCH_SIZE);
1867 /* Call OF to setup the TCE hardware */
1868 if (call_prom("package-to-path", 3, 1, node,
1869 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1870 prom_printf("package-to-path failed\n");
1871 }
1872
1873 /* Save away the TCE table attributes for later use. */
1874 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1875 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1876
1877 prom_debug("TCE table: %s\n", path);
1878 prom_debug("\tnode = 0x%x\n", node);
1879 prom_debug("\tbase = 0x%llx\n", base);
1880 prom_debug("\tsize = 0x%x\n", minsize);
1881
1882 /* Initialize the table to have a one-to-one mapping
1883 * over the allocated size.
1884 */
1885 tce_entryp = (u64 *)base;
1886 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1887 tce_entry = (i << PAGE_SHIFT);
1888 tce_entry |= 0x3;
1889 *tce_entryp = tce_entry;
1890 }
1891
1892 prom_printf("opening PHB %s", path);
1893 phb_node = call_prom("open", 1, 1, path);
1894 if (phb_node == 0)
1895 prom_printf("... failed\n");
1896 else
1897 prom_printf("... done\n");
1898
1899 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1900 phb_node, -1, minsize,
1901 (u32) base, (u32) (base >> 32));
1902 call_prom("close", 1, 0, phb_node);
1903 }
1904
1905 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1906
1907 /* These are only really needed if there is a memory limit in
1908 * effect, but we don't know so export them always. */
1909 prom_tce_alloc_start = local_alloc_bottom;
1910 prom_tce_alloc_end = local_alloc_top;
1911
1912 /* Flag the first invalid entry */
1913 prom_debug("ending prom_initialize_tce_table\n");
1914 }
1915 #endif /* __BIG_ENDIAN__ */
1916 #endif /* CONFIG_PPC64 */
1917
1918 /*
1919 * With CHRP SMP we need to use the OF to start the other processors.
1920 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1921 * so we have to put the processors into a holding pattern controlled
1922 * by the kernel (not OF) before we destroy the OF.
1923 *
1924 * This uses a chunk of low memory, puts some holding pattern
1925 * code there and sends the other processors off to there until
1926 * smp_boot_cpus tells them to do something. The holding pattern
1927 * checks that address until its cpu # is there, when it is that
1928 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1929 * of setting those values.
1930 *
1931 * We also use physical address 0x4 here to tell when a cpu
1932 * is in its holding pattern code.
1933 *
1934 * -- Cort
1935 */
1936 /*
1937 * We want to reference the copy of __secondary_hold_* in the
1938 * 0 - 0x100 address range
1939 */
1940 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1941
prom_hold_cpus(void)1942 static void __init prom_hold_cpus(void)
1943 {
1944 unsigned long i;
1945 phandle node;
1946 char type[64];
1947 unsigned long *spinloop
1948 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1949 unsigned long *acknowledge
1950 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1951 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1952
1953 /*
1954 * On pseries, if RTAS supports "query-cpu-stopped-state",
1955 * we skip this stage, the CPUs will be started by the
1956 * kernel using RTAS.
1957 */
1958 if ((of_platform == PLATFORM_PSERIES ||
1959 of_platform == PLATFORM_PSERIES_LPAR) &&
1960 rtas_has_query_cpu_stopped) {
1961 prom_printf("prom_hold_cpus: skipped\n");
1962 return;
1963 }
1964
1965 prom_debug("prom_hold_cpus: start...\n");
1966 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
1967 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
1968 prom_debug(" 1) acknowledge = 0x%lx\n",
1969 (unsigned long)acknowledge);
1970 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
1971 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
1972
1973 /* Set the common spinloop variable, so all of the secondary cpus
1974 * will block when they are awakened from their OF spinloop.
1975 * This must occur for both SMP and non SMP kernels, since OF will
1976 * be trashed when we move the kernel.
1977 */
1978 *spinloop = 0;
1979
1980 /* look for cpus */
1981 for (node = 0; prom_next_node(&node); ) {
1982 unsigned int cpu_no;
1983 __be32 reg;
1984
1985 type[0] = 0;
1986 prom_getprop(node, "device_type", type, sizeof(type));
1987 if (strcmp(type, "cpu") != 0)
1988 continue;
1989
1990 /* Skip non-configured cpus. */
1991 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1992 if (strcmp(type, "okay") != 0)
1993 continue;
1994
1995 reg = cpu_to_be32(-1); /* make sparse happy */
1996 prom_getprop(node, "reg", ®, sizeof(reg));
1997 cpu_no = be32_to_cpu(reg);
1998
1999 prom_debug("cpu hw idx = %u\n", cpu_no);
2000
2001 /* Init the acknowledge var which will be reset by
2002 * the secondary cpu when it awakens from its OF
2003 * spinloop.
2004 */
2005 *acknowledge = (unsigned long)-1;
2006
2007 if (cpu_no != prom.cpu) {
2008 /* Primary Thread of non-boot cpu or any thread */
2009 prom_printf("starting cpu hw idx %u... ", cpu_no);
2010 call_prom("start-cpu", 3, 0, node,
2011 secondary_hold, cpu_no);
2012
2013 for (i = 0; (i < 100000000) &&
2014 (*acknowledge == ((unsigned long)-1)); i++ )
2015 mb();
2016
2017 if (*acknowledge == cpu_no)
2018 prom_printf("done\n");
2019 else
2020 prom_printf("failed: %lx\n", *acknowledge);
2021 }
2022 #ifdef CONFIG_SMP
2023 else
2024 prom_printf("boot cpu hw idx %u\n", cpu_no);
2025 #endif /* CONFIG_SMP */
2026 }
2027
2028 prom_debug("prom_hold_cpus: end...\n");
2029 }
2030
2031
prom_init_client_services(unsigned long pp)2032 static void __init prom_init_client_services(unsigned long pp)
2033 {
2034 /* Get a handle to the prom entry point before anything else */
2035 prom_entry = pp;
2036
2037 /* get a handle for the stdout device */
2038 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2039 if (!PHANDLE_VALID(prom.chosen))
2040 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2041
2042 /* get device tree root */
2043 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2044 if (!PHANDLE_VALID(prom.root))
2045 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2046
2047 prom.mmumap = 0;
2048 }
2049
2050 #ifdef CONFIG_PPC32
2051 /*
2052 * For really old powermacs, we need to map things we claim.
2053 * For that, we need the ihandle of the mmu.
2054 * Also, on the longtrail, we need to work around other bugs.
2055 */
prom_find_mmu(void)2056 static void __init prom_find_mmu(void)
2057 {
2058 phandle oprom;
2059 char version[64];
2060
2061 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2062 if (!PHANDLE_VALID(oprom))
2063 return;
2064 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2065 return;
2066 version[sizeof(version) - 1] = 0;
2067 /* XXX might need to add other versions here */
2068 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
2069 of_workarounds = OF_WA_CLAIM;
2070 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
2071 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2072 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2073 } else
2074 return;
2075 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2076 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2077 sizeof(prom.mmumap));
2078 prom.mmumap = be32_to_cpu(prom.mmumap);
2079 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2080 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2081 }
2082 #else
2083 #define prom_find_mmu()
2084 #endif
2085
prom_init_stdout(void)2086 static void __init prom_init_stdout(void)
2087 {
2088 char *path = of_stdout_device;
2089 char type[16];
2090 phandle stdout_node;
2091 __be32 val;
2092
2093 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2094 prom_panic("cannot find stdout");
2095
2096 prom.stdout = be32_to_cpu(val);
2097
2098 /* Get the full OF pathname of the stdout device */
2099 memset(path, 0, 256);
2100 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2101 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2102 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2103 path, strlen(path) + 1);
2104
2105 /* instance-to-package fails on PA-Semi */
2106 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2107 if (stdout_node != PROM_ERROR) {
2108 val = cpu_to_be32(stdout_node);
2109
2110 /* If it's a display, note it */
2111 memset(type, 0, sizeof(type));
2112 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2113 if (strcmp(type, "display") == 0)
2114 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2115 }
2116 }
2117
prom_find_machine_type(void)2118 static int __init prom_find_machine_type(void)
2119 {
2120 char compat[256];
2121 int len, i = 0;
2122 #ifdef CONFIG_PPC64
2123 phandle rtas;
2124 int x;
2125 #endif
2126
2127 /* Look for a PowerMac or a Cell */
2128 len = prom_getprop(prom.root, "compatible",
2129 compat, sizeof(compat)-1);
2130 if (len > 0) {
2131 compat[len] = 0;
2132 while (i < len) {
2133 char *p = &compat[i];
2134 int sl = strlen(p);
2135 if (sl == 0)
2136 break;
2137 if (strstr(p, "Power Macintosh") ||
2138 strstr(p, "MacRISC"))
2139 return PLATFORM_POWERMAC;
2140 #ifdef CONFIG_PPC64
2141 /* We must make sure we don't detect the IBM Cell
2142 * blades as pSeries due to some firmware issues,
2143 * so we do it here.
2144 */
2145 if (strstr(p, "IBM,CBEA") ||
2146 strstr(p, "IBM,CPBW-1.0"))
2147 return PLATFORM_GENERIC;
2148 #endif /* CONFIG_PPC64 */
2149 i += sl + 1;
2150 }
2151 }
2152 #ifdef CONFIG_PPC64
2153 /* Try to detect OPAL */
2154 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2155 return PLATFORM_OPAL;
2156
2157 /* Try to figure out if it's an IBM pSeries or any other
2158 * PAPR compliant platform. We assume it is if :
2159 * - /device_type is "chrp" (please, do NOT use that for future
2160 * non-IBM designs !
2161 * - it has /rtas
2162 */
2163 len = prom_getprop(prom.root, "device_type",
2164 compat, sizeof(compat)-1);
2165 if (len <= 0)
2166 return PLATFORM_GENERIC;
2167 if (strcmp(compat, "chrp"))
2168 return PLATFORM_GENERIC;
2169
2170 /* Default to pSeries. We need to know if we are running LPAR */
2171 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2172 if (!PHANDLE_VALID(rtas))
2173 return PLATFORM_GENERIC;
2174 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2175 if (x != PROM_ERROR) {
2176 prom_debug("Hypertas detected, assuming LPAR !\n");
2177 return PLATFORM_PSERIES_LPAR;
2178 }
2179 return PLATFORM_PSERIES;
2180 #else
2181 return PLATFORM_GENERIC;
2182 #endif
2183 }
2184
prom_set_color(ihandle ih,int i,int r,int g,int b)2185 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2186 {
2187 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2188 }
2189
2190 /*
2191 * If we have a display that we don't know how to drive,
2192 * we will want to try to execute OF's open method for it
2193 * later. However, OF will probably fall over if we do that
2194 * we've taken over the MMU.
2195 * So we check whether we will need to open the display,
2196 * and if so, open it now.
2197 */
prom_check_displays(void)2198 static void __init prom_check_displays(void)
2199 {
2200 char type[16], *path;
2201 phandle node;
2202 ihandle ih;
2203 int i;
2204
2205 static unsigned char default_colors[] = {
2206 0x00, 0x00, 0x00,
2207 0x00, 0x00, 0xaa,
2208 0x00, 0xaa, 0x00,
2209 0x00, 0xaa, 0xaa,
2210 0xaa, 0x00, 0x00,
2211 0xaa, 0x00, 0xaa,
2212 0xaa, 0xaa, 0x00,
2213 0xaa, 0xaa, 0xaa,
2214 0x55, 0x55, 0x55,
2215 0x55, 0x55, 0xff,
2216 0x55, 0xff, 0x55,
2217 0x55, 0xff, 0xff,
2218 0xff, 0x55, 0x55,
2219 0xff, 0x55, 0xff,
2220 0xff, 0xff, 0x55,
2221 0xff, 0xff, 0xff
2222 };
2223 const unsigned char *clut;
2224
2225 prom_debug("Looking for displays\n");
2226 for (node = 0; prom_next_node(&node); ) {
2227 memset(type, 0, sizeof(type));
2228 prom_getprop(node, "device_type", type, sizeof(type));
2229 if (strcmp(type, "display") != 0)
2230 continue;
2231
2232 /* It seems OF doesn't null-terminate the path :-( */
2233 path = prom_scratch;
2234 memset(path, 0, PROM_SCRATCH_SIZE);
2235
2236 /*
2237 * leave some room at the end of the path for appending extra
2238 * arguments
2239 */
2240 if (call_prom("package-to-path", 3, 1, node, path,
2241 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2242 continue;
2243 prom_printf("found display : %s, opening... ", path);
2244
2245 ih = call_prom("open", 1, 1, path);
2246 if (ih == 0) {
2247 prom_printf("failed\n");
2248 continue;
2249 }
2250
2251 /* Success */
2252 prom_printf("done\n");
2253 prom_setprop(node, path, "linux,opened", NULL, 0);
2254
2255 /* Setup a usable color table when the appropriate
2256 * method is available. Should update this to set-colors */
2257 clut = default_colors;
2258 for (i = 0; i < 16; i++, clut += 3)
2259 if (prom_set_color(ih, i, clut[0], clut[1],
2260 clut[2]) != 0)
2261 break;
2262
2263 #ifdef CONFIG_LOGO_LINUX_CLUT224
2264 clut = PTRRELOC(logo_linux_clut224.clut);
2265 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2266 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2267 clut[2]) != 0)
2268 break;
2269 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2270
2271 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2272 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2273 PROM_ERROR) {
2274 u32 width, height, pitch, addr;
2275
2276 prom_printf("Setting btext !\n");
2277 prom_getprop(node, "width", &width, 4);
2278 prom_getprop(node, "height", &height, 4);
2279 prom_getprop(node, "linebytes", &pitch, 4);
2280 prom_getprop(node, "address", &addr, 4);
2281 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2282 width, height, pitch, addr);
2283 btext_setup_display(width, height, 8, pitch, addr);
2284 }
2285 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2286 }
2287 }
2288
2289
2290 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
make_room(unsigned long * mem_start,unsigned long * mem_end,unsigned long needed,unsigned long align)2291 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2292 unsigned long needed, unsigned long align)
2293 {
2294 void *ret;
2295
2296 *mem_start = _ALIGN(*mem_start, align);
2297 while ((*mem_start + needed) > *mem_end) {
2298 unsigned long room, chunk;
2299
2300 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2301 alloc_bottom);
2302 room = alloc_top - alloc_bottom;
2303 if (room > DEVTREE_CHUNK_SIZE)
2304 room = DEVTREE_CHUNK_SIZE;
2305 if (room < PAGE_SIZE)
2306 prom_panic("No memory for flatten_device_tree "
2307 "(no room)\n");
2308 chunk = alloc_up(room, 0);
2309 if (chunk == 0)
2310 prom_panic("No memory for flatten_device_tree "
2311 "(claim failed)\n");
2312 *mem_end = chunk + room;
2313 }
2314
2315 ret = (void *)*mem_start;
2316 *mem_start += needed;
2317
2318 return ret;
2319 }
2320
2321 #define dt_push_token(token, mem_start, mem_end) do { \
2322 void *room = make_room(mem_start, mem_end, 4, 4); \
2323 *(__be32 *)room = cpu_to_be32(token); \
2324 } while(0)
2325
dt_find_string(char * str)2326 static unsigned long __init dt_find_string(char *str)
2327 {
2328 char *s, *os;
2329
2330 s = os = (char *)dt_string_start;
2331 s += 4;
2332 while (s < (char *)dt_string_end) {
2333 if (strcmp(s, str) == 0)
2334 return s - os;
2335 s += strlen(s) + 1;
2336 }
2337 return 0;
2338 }
2339
2340 /*
2341 * The Open Firmware 1275 specification states properties must be 31 bytes or
2342 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2343 */
2344 #define MAX_PROPERTY_NAME 64
2345
scan_dt_build_strings(phandle node,unsigned long * mem_start,unsigned long * mem_end)2346 static void __init scan_dt_build_strings(phandle node,
2347 unsigned long *mem_start,
2348 unsigned long *mem_end)
2349 {
2350 char *prev_name, *namep, *sstart;
2351 unsigned long soff;
2352 phandle child;
2353
2354 sstart = (char *)dt_string_start;
2355
2356 /* get and store all property names */
2357 prev_name = "";
2358 for (;;) {
2359 /* 64 is max len of name including nul. */
2360 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2361 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2362 /* No more nodes: unwind alloc */
2363 *mem_start = (unsigned long)namep;
2364 break;
2365 }
2366
2367 /* skip "name" */
2368 if (strcmp(namep, "name") == 0) {
2369 *mem_start = (unsigned long)namep;
2370 prev_name = "name";
2371 continue;
2372 }
2373 /* get/create string entry */
2374 soff = dt_find_string(namep);
2375 if (soff != 0) {
2376 *mem_start = (unsigned long)namep;
2377 namep = sstart + soff;
2378 } else {
2379 /* Trim off some if we can */
2380 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2381 dt_string_end = *mem_start;
2382 }
2383 prev_name = namep;
2384 }
2385
2386 /* do all our children */
2387 child = call_prom("child", 1, 1, node);
2388 while (child != 0) {
2389 scan_dt_build_strings(child, mem_start, mem_end);
2390 child = call_prom("peer", 1, 1, child);
2391 }
2392 }
2393
scan_dt_build_struct(phandle node,unsigned long * mem_start,unsigned long * mem_end)2394 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2395 unsigned long *mem_end)
2396 {
2397 phandle child;
2398 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2399 unsigned long soff;
2400 unsigned char *valp;
2401 static char pname[MAX_PROPERTY_NAME];
2402 int l, room, has_phandle = 0;
2403
2404 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2405
2406 /* get the node's full name */
2407 namep = (char *)*mem_start;
2408 room = *mem_end - *mem_start;
2409 if (room > 255)
2410 room = 255;
2411 l = call_prom("package-to-path", 3, 1, node, namep, room);
2412 if (l >= 0) {
2413 /* Didn't fit? Get more room. */
2414 if (l >= room) {
2415 if (l >= *mem_end - *mem_start)
2416 namep = make_room(mem_start, mem_end, l+1, 1);
2417 call_prom("package-to-path", 3, 1, node, namep, l);
2418 }
2419 namep[l] = '\0';
2420
2421 /* Fixup an Apple bug where they have bogus \0 chars in the
2422 * middle of the path in some properties, and extract
2423 * the unit name (everything after the last '/').
2424 */
2425 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2426 if (*p == '/')
2427 lp = namep;
2428 else if (*p != 0)
2429 *lp++ = *p;
2430 }
2431 *lp = 0;
2432 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2433 }
2434
2435 /* get it again for debugging */
2436 path = prom_scratch;
2437 memset(path, 0, PROM_SCRATCH_SIZE);
2438 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2439
2440 /* get and store all properties */
2441 prev_name = "";
2442 sstart = (char *)dt_string_start;
2443 for (;;) {
2444 if (call_prom("nextprop", 3, 1, node, prev_name,
2445 pname) != 1)
2446 break;
2447
2448 /* skip "name" */
2449 if (strcmp(pname, "name") == 0) {
2450 prev_name = "name";
2451 continue;
2452 }
2453
2454 /* find string offset */
2455 soff = dt_find_string(pname);
2456 if (soff == 0) {
2457 prom_printf("WARNING: Can't find string index for"
2458 " <%s>, node %s\n", pname, path);
2459 break;
2460 }
2461 prev_name = sstart + soff;
2462
2463 /* get length */
2464 l = call_prom("getproplen", 2, 1, node, pname);
2465
2466 /* sanity checks */
2467 if (l == PROM_ERROR)
2468 continue;
2469
2470 /* push property head */
2471 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2472 dt_push_token(l, mem_start, mem_end);
2473 dt_push_token(soff, mem_start, mem_end);
2474
2475 /* push property content */
2476 valp = make_room(mem_start, mem_end, l, 4);
2477 call_prom("getprop", 4, 1, node, pname, valp, l);
2478 *mem_start = _ALIGN(*mem_start, 4);
2479
2480 if (!strcmp(pname, "phandle"))
2481 has_phandle = 1;
2482 }
2483
2484 /* Add a "linux,phandle" property if no "phandle" property already
2485 * existed (can happen with OPAL)
2486 */
2487 if (!has_phandle) {
2488 soff = dt_find_string("linux,phandle");
2489 if (soff == 0)
2490 prom_printf("WARNING: Can't find string index for"
2491 " <linux-phandle> node %s\n", path);
2492 else {
2493 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2494 dt_push_token(4, mem_start, mem_end);
2495 dt_push_token(soff, mem_start, mem_end);
2496 valp = make_room(mem_start, mem_end, 4, 4);
2497 *(__be32 *)valp = cpu_to_be32(node);
2498 }
2499 }
2500
2501 /* do all our children */
2502 child = call_prom("child", 1, 1, node);
2503 while (child != 0) {
2504 scan_dt_build_struct(child, mem_start, mem_end);
2505 child = call_prom("peer", 1, 1, child);
2506 }
2507
2508 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2509 }
2510
flatten_device_tree(void)2511 static void __init flatten_device_tree(void)
2512 {
2513 phandle root;
2514 unsigned long mem_start, mem_end, room;
2515 struct boot_param_header *hdr;
2516 char *namep;
2517 u64 *rsvmap;
2518
2519 /*
2520 * Check how much room we have between alloc top & bottom (+/- a
2521 * few pages), crop to 1MB, as this is our "chunk" size
2522 */
2523 room = alloc_top - alloc_bottom - 0x4000;
2524 if (room > DEVTREE_CHUNK_SIZE)
2525 room = DEVTREE_CHUNK_SIZE;
2526 prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2527
2528 /* Now try to claim that */
2529 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2530 if (mem_start == 0)
2531 prom_panic("Can't allocate initial device-tree chunk\n");
2532 mem_end = mem_start + room;
2533
2534 /* Get root of tree */
2535 root = call_prom("peer", 1, 1, (phandle)0);
2536 if (root == (phandle)0)
2537 prom_panic ("couldn't get device tree root\n");
2538
2539 /* Build header and make room for mem rsv map */
2540 mem_start = _ALIGN(mem_start, 4);
2541 hdr = make_room(&mem_start, &mem_end,
2542 sizeof(struct boot_param_header), 4);
2543 dt_header_start = (unsigned long)hdr;
2544 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2545
2546 /* Start of strings */
2547 mem_start = PAGE_ALIGN(mem_start);
2548 dt_string_start = mem_start;
2549 mem_start += 4; /* hole */
2550
2551 /* Add "linux,phandle" in there, we'll need it */
2552 namep = make_room(&mem_start, &mem_end, 16, 1);
2553 strcpy(namep, "linux,phandle");
2554 mem_start = (unsigned long)namep + strlen(namep) + 1;
2555
2556 /* Build string array */
2557 prom_printf("Building dt strings...\n");
2558 scan_dt_build_strings(root, &mem_start, &mem_end);
2559 dt_string_end = mem_start;
2560
2561 /* Build structure */
2562 mem_start = PAGE_ALIGN(mem_start);
2563 dt_struct_start = mem_start;
2564 prom_printf("Building dt structure...\n");
2565 scan_dt_build_struct(root, &mem_start, &mem_end);
2566 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2567 dt_struct_end = PAGE_ALIGN(mem_start);
2568
2569 /* Finish header */
2570 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2571 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2572 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2573 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2574 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2575 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2576 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2577 hdr->version = cpu_to_be32(OF_DT_VERSION);
2578 /* Version 16 is not backward compatible */
2579 hdr->last_comp_version = cpu_to_be32(0x10);
2580
2581 /* Copy the reserve map in */
2582 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2583
2584 #ifdef DEBUG_PROM
2585 {
2586 int i;
2587 prom_printf("reserved memory map:\n");
2588 for (i = 0; i < mem_reserve_cnt; i++)
2589 prom_printf(" %llx - %llx\n",
2590 be64_to_cpu(mem_reserve_map[i].base),
2591 be64_to_cpu(mem_reserve_map[i].size));
2592 }
2593 #endif
2594 /* Bump mem_reserve_cnt to cause further reservations to fail
2595 * since it's too late.
2596 */
2597 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2598
2599 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2600 dt_string_start, dt_string_end);
2601 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2602 dt_struct_start, dt_struct_end);
2603 }
2604
2605 #ifdef CONFIG_PPC_MAPLE
2606 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2607 * The values are bad, and it doesn't even have the right number of cells. */
fixup_device_tree_maple(void)2608 static void __init fixup_device_tree_maple(void)
2609 {
2610 phandle isa;
2611 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2612 u32 isa_ranges[6];
2613 char *name;
2614
2615 name = "/ht@0/isa@4";
2616 isa = call_prom("finddevice", 1, 1, ADDR(name));
2617 if (!PHANDLE_VALID(isa)) {
2618 name = "/ht@0/isa@6";
2619 isa = call_prom("finddevice", 1, 1, ADDR(name));
2620 rloc = 0x01003000; /* IO space; PCI device = 6 */
2621 }
2622 if (!PHANDLE_VALID(isa))
2623 return;
2624
2625 if (prom_getproplen(isa, "ranges") != 12)
2626 return;
2627 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2628 == PROM_ERROR)
2629 return;
2630
2631 if (isa_ranges[0] != 0x1 ||
2632 isa_ranges[1] != 0xf4000000 ||
2633 isa_ranges[2] != 0x00010000)
2634 return;
2635
2636 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2637
2638 isa_ranges[0] = 0x1;
2639 isa_ranges[1] = 0x0;
2640 isa_ranges[2] = rloc;
2641 isa_ranges[3] = 0x0;
2642 isa_ranges[4] = 0x0;
2643 isa_ranges[5] = 0x00010000;
2644 prom_setprop(isa, name, "ranges",
2645 isa_ranges, sizeof(isa_ranges));
2646 }
2647
2648 #define CPC925_MC_START 0xf8000000
2649 #define CPC925_MC_LENGTH 0x1000000
2650 /* The values for memory-controller don't have right number of cells */
fixup_device_tree_maple_memory_controller(void)2651 static void __init fixup_device_tree_maple_memory_controller(void)
2652 {
2653 phandle mc;
2654 u32 mc_reg[4];
2655 char *name = "/hostbridge@f8000000";
2656 u32 ac, sc;
2657
2658 mc = call_prom("finddevice", 1, 1, ADDR(name));
2659 if (!PHANDLE_VALID(mc))
2660 return;
2661
2662 if (prom_getproplen(mc, "reg") != 8)
2663 return;
2664
2665 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2666 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2667 if ((ac != 2) || (sc != 2))
2668 return;
2669
2670 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2671 return;
2672
2673 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2674 return;
2675
2676 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2677
2678 mc_reg[0] = 0x0;
2679 mc_reg[1] = CPC925_MC_START;
2680 mc_reg[2] = 0x0;
2681 mc_reg[3] = CPC925_MC_LENGTH;
2682 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2683 }
2684 #else
2685 #define fixup_device_tree_maple()
2686 #define fixup_device_tree_maple_memory_controller()
2687 #endif
2688
2689 #ifdef CONFIG_PPC_CHRP
2690 /*
2691 * Pegasos and BriQ lacks the "ranges" property in the isa node
2692 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2693 * Pegasos has the IDE configured in legacy mode, but advertised as native
2694 */
fixup_device_tree_chrp(void)2695 static void __init fixup_device_tree_chrp(void)
2696 {
2697 phandle ph;
2698 u32 prop[6];
2699 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2700 char *name;
2701 int rc;
2702
2703 name = "/pci@80000000/isa@c";
2704 ph = call_prom("finddevice", 1, 1, ADDR(name));
2705 if (!PHANDLE_VALID(ph)) {
2706 name = "/pci@ff500000/isa@6";
2707 ph = call_prom("finddevice", 1, 1, ADDR(name));
2708 rloc = 0x01003000; /* IO space; PCI device = 6 */
2709 }
2710 if (PHANDLE_VALID(ph)) {
2711 rc = prom_getproplen(ph, "ranges");
2712 if (rc == 0 || rc == PROM_ERROR) {
2713 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2714
2715 prop[0] = 0x1;
2716 prop[1] = 0x0;
2717 prop[2] = rloc;
2718 prop[3] = 0x0;
2719 prop[4] = 0x0;
2720 prop[5] = 0x00010000;
2721 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2722 }
2723 }
2724
2725 name = "/pci@80000000/ide@C,1";
2726 ph = call_prom("finddevice", 1, 1, ADDR(name));
2727 if (PHANDLE_VALID(ph)) {
2728 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2729 prop[0] = 14;
2730 prop[1] = 0x0;
2731 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2732 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2733 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2734 if (rc == sizeof(u32)) {
2735 prop[0] &= ~0x5;
2736 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2737 }
2738 }
2739 }
2740 #else
2741 #define fixup_device_tree_chrp()
2742 #endif
2743
2744 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
fixup_device_tree_pmac(void)2745 static void __init fixup_device_tree_pmac(void)
2746 {
2747 phandle u3, i2c, mpic;
2748 u32 u3_rev;
2749 u32 interrupts[2];
2750 u32 parent;
2751
2752 /* Some G5s have a missing interrupt definition, fix it up here */
2753 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2754 if (!PHANDLE_VALID(u3))
2755 return;
2756 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2757 if (!PHANDLE_VALID(i2c))
2758 return;
2759 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2760 if (!PHANDLE_VALID(mpic))
2761 return;
2762
2763 /* check if proper rev of u3 */
2764 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2765 == PROM_ERROR)
2766 return;
2767 if (u3_rev < 0x35 || u3_rev > 0x39)
2768 return;
2769 /* does it need fixup ? */
2770 if (prom_getproplen(i2c, "interrupts") > 0)
2771 return;
2772
2773 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2774
2775 /* interrupt on this revision of u3 is number 0 and level */
2776 interrupts[0] = 0;
2777 interrupts[1] = 1;
2778 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2779 &interrupts, sizeof(interrupts));
2780 parent = (u32)mpic;
2781 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2782 &parent, sizeof(parent));
2783 }
2784 #else
2785 #define fixup_device_tree_pmac()
2786 #endif
2787
2788 #ifdef CONFIG_PPC_EFIKA
2789 /*
2790 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2791 * to talk to the phy. If the phy-handle property is missing, then this
2792 * function is called to add the appropriate nodes and link it to the
2793 * ethernet node.
2794 */
fixup_device_tree_efika_add_phy(void)2795 static void __init fixup_device_tree_efika_add_phy(void)
2796 {
2797 u32 node;
2798 char prop[64];
2799 int rv;
2800
2801 /* Check if /builtin/ethernet exists - bail if it doesn't */
2802 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2803 if (!PHANDLE_VALID(node))
2804 return;
2805
2806 /* Check if the phy-handle property exists - bail if it does */
2807 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2808 if (!rv)
2809 return;
2810
2811 /*
2812 * At this point the ethernet device doesn't have a phy described.
2813 * Now we need to add the missing phy node and linkage
2814 */
2815
2816 /* Check for an MDIO bus node - if missing then create one */
2817 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2818 if (!PHANDLE_VALID(node)) {
2819 prom_printf("Adding Ethernet MDIO node\n");
2820 call_prom("interpret", 1, 1,
2821 " s\" /builtin\" find-device"
2822 " new-device"
2823 " 1 encode-int s\" #address-cells\" property"
2824 " 0 encode-int s\" #size-cells\" property"
2825 " s\" mdio\" device-name"
2826 " s\" fsl,mpc5200b-mdio\" encode-string"
2827 " s\" compatible\" property"
2828 " 0xf0003000 0x400 reg"
2829 " 0x2 encode-int"
2830 " 0x5 encode-int encode+"
2831 " 0x3 encode-int encode+"
2832 " s\" interrupts\" property"
2833 " finish-device");
2834 };
2835
2836 /* Check for a PHY device node - if missing then create one and
2837 * give it's phandle to the ethernet node */
2838 node = call_prom("finddevice", 1, 1,
2839 ADDR("/builtin/mdio/ethernet-phy"));
2840 if (!PHANDLE_VALID(node)) {
2841 prom_printf("Adding Ethernet PHY node\n");
2842 call_prom("interpret", 1, 1,
2843 " s\" /builtin/mdio\" find-device"
2844 " new-device"
2845 " s\" ethernet-phy\" device-name"
2846 " 0x10 encode-int s\" reg\" property"
2847 " my-self"
2848 " ihandle>phandle"
2849 " finish-device"
2850 " s\" /builtin/ethernet\" find-device"
2851 " encode-int"
2852 " s\" phy-handle\" property"
2853 " device-end");
2854 }
2855 }
2856
fixup_device_tree_efika(void)2857 static void __init fixup_device_tree_efika(void)
2858 {
2859 int sound_irq[3] = { 2, 2, 0 };
2860 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2861 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2862 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2863 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2864 u32 node;
2865 char prop[64];
2866 int rv, len;
2867
2868 /* Check if we're really running on a EFIKA */
2869 node = call_prom("finddevice", 1, 1, ADDR("/"));
2870 if (!PHANDLE_VALID(node))
2871 return;
2872
2873 rv = prom_getprop(node, "model", prop, sizeof(prop));
2874 if (rv == PROM_ERROR)
2875 return;
2876 if (strcmp(prop, "EFIKA5K2"))
2877 return;
2878
2879 prom_printf("Applying EFIKA device tree fixups\n");
2880
2881 /* Claiming to be 'chrp' is death */
2882 node = call_prom("finddevice", 1, 1, ADDR("/"));
2883 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2884 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2885 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2886
2887 /* CODEGEN,description is exposed in /proc/cpuinfo so
2888 fix that too */
2889 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2890 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2891 prom_setprop(node, "/", "CODEGEN,description",
2892 "Efika 5200B PowerPC System",
2893 sizeof("Efika 5200B PowerPC System"));
2894
2895 /* Fixup bestcomm interrupts property */
2896 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2897 if (PHANDLE_VALID(node)) {
2898 len = prom_getproplen(node, "interrupts");
2899 if (len == 12) {
2900 prom_printf("Fixing bestcomm interrupts property\n");
2901 prom_setprop(node, "/builtin/bestcom", "interrupts",
2902 bcomm_irq, sizeof(bcomm_irq));
2903 }
2904 }
2905
2906 /* Fixup sound interrupts property */
2907 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2908 if (PHANDLE_VALID(node)) {
2909 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2910 if (rv == PROM_ERROR) {
2911 prom_printf("Adding sound interrupts property\n");
2912 prom_setprop(node, "/builtin/sound", "interrupts",
2913 sound_irq, sizeof(sound_irq));
2914 }
2915 }
2916
2917 /* Make sure ethernet phy-handle property exists */
2918 fixup_device_tree_efika_add_phy();
2919 }
2920 #else
2921 #define fixup_device_tree_efika()
2922 #endif
2923
2924 #ifdef CONFIG_PPC_PASEMI_NEMO
2925 /*
2926 * CFE supplied on Nemo is broken in several ways, biggest
2927 * problem is that it reassigns ISA interrupts to unused mpic ints.
2928 * Add an interrupt-controller property for the io-bridge to use
2929 * and correct the ints so we can attach them to an irq_domain
2930 */
fixup_device_tree_pasemi(void)2931 static void __init fixup_device_tree_pasemi(void)
2932 {
2933 u32 interrupts[2], parent, rval, val = 0;
2934 char *name, *pci_name;
2935 phandle iob, node;
2936
2937 /* Find the root pci node */
2938 name = "/pxp@0,e0000000";
2939 iob = call_prom("finddevice", 1, 1, ADDR(name));
2940 if (!PHANDLE_VALID(iob))
2941 return;
2942
2943 /* check if interrupt-controller node set yet */
2944 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
2945 return;
2946
2947 prom_printf("adding interrupt-controller property for SB600...\n");
2948
2949 prom_setprop(iob, name, "interrupt-controller", &val, 0);
2950
2951 pci_name = "/pxp@0,e0000000/pci@11";
2952 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
2953 parent = ADDR(iob);
2954
2955 for( ; prom_next_node(&node); ) {
2956 /* scan each node for one with an interrupt */
2957 if (!PHANDLE_VALID(node))
2958 continue;
2959
2960 rval = prom_getproplen(node, "interrupts");
2961 if (rval == 0 || rval == PROM_ERROR)
2962 continue;
2963
2964 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
2965 if ((interrupts[0] < 212) || (interrupts[0] > 222))
2966 continue;
2967
2968 /* found a node, update both interrupts and interrupt-parent */
2969 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
2970 interrupts[0] -= 203;
2971 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
2972 interrupts[0] -= 213;
2973 if (interrupts[0] == 221)
2974 interrupts[0] = 14;
2975 if (interrupts[0] == 222)
2976 interrupts[0] = 8;
2977
2978 prom_setprop(node, pci_name, "interrupts", interrupts,
2979 sizeof(interrupts));
2980 prom_setprop(node, pci_name, "interrupt-parent", &parent,
2981 sizeof(parent));
2982 }
2983
2984 /*
2985 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
2986 * so that generic isa-bridge code can add the SB600 and its on-board
2987 * peripherals.
2988 */
2989 name = "/pxp@0,e0000000/io-bridge@0";
2990 iob = call_prom("finddevice", 1, 1, ADDR(name));
2991 if (!PHANDLE_VALID(iob))
2992 return;
2993
2994 /* device_type is already set, just change it. */
2995
2996 prom_printf("Changing device_type of SB600 node...\n");
2997
2998 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
2999 }
3000 #else /* !CONFIG_PPC_PASEMI_NEMO */
fixup_device_tree_pasemi(void)3001 static inline void fixup_device_tree_pasemi(void) { }
3002 #endif
3003
fixup_device_tree(void)3004 static void __init fixup_device_tree(void)
3005 {
3006 fixup_device_tree_maple();
3007 fixup_device_tree_maple_memory_controller();
3008 fixup_device_tree_chrp();
3009 fixup_device_tree_pmac();
3010 fixup_device_tree_efika();
3011 fixup_device_tree_pasemi();
3012 }
3013
prom_find_boot_cpu(void)3014 static void __init prom_find_boot_cpu(void)
3015 {
3016 __be32 rval;
3017 ihandle prom_cpu;
3018 phandle cpu_pkg;
3019
3020 rval = 0;
3021 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3022 return;
3023 prom_cpu = be32_to_cpu(rval);
3024
3025 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3026
3027 if (!PHANDLE_VALID(cpu_pkg))
3028 return;
3029
3030 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3031 prom.cpu = be32_to_cpu(rval);
3032
3033 prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3034 }
3035
prom_check_initrd(unsigned long r3,unsigned long r4)3036 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3037 {
3038 #ifdef CONFIG_BLK_DEV_INITRD
3039 if (r3 && r4 && r4 != 0xdeadbeef) {
3040 __be64 val;
3041
3042 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3043 prom_initrd_end = prom_initrd_start + r4;
3044
3045 val = cpu_to_be64(prom_initrd_start);
3046 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3047 &val, sizeof(val));
3048 val = cpu_to_be64(prom_initrd_end);
3049 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3050 &val, sizeof(val));
3051
3052 reserve_mem(prom_initrd_start,
3053 prom_initrd_end - prom_initrd_start);
3054
3055 prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3056 prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3057 }
3058 #endif /* CONFIG_BLK_DEV_INITRD */
3059 }
3060
3061 #ifdef CONFIG_PPC64
3062 #ifdef CONFIG_RELOCATABLE
reloc_toc(void)3063 static void reloc_toc(void)
3064 {
3065 }
3066
unreloc_toc(void)3067 static void unreloc_toc(void)
3068 {
3069 }
3070 #else
__reloc_toc(unsigned long offset,unsigned long nr_entries)3071 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
3072 {
3073 unsigned long i;
3074 unsigned long *toc_entry;
3075
3076 /* Get the start of the TOC by using r2 directly. */
3077 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3078
3079 for (i = 0; i < nr_entries; i++) {
3080 *toc_entry = *toc_entry + offset;
3081 toc_entry++;
3082 }
3083 }
3084
reloc_toc(void)3085 static void reloc_toc(void)
3086 {
3087 unsigned long offset = reloc_offset();
3088 unsigned long nr_entries =
3089 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3090
3091 __reloc_toc(offset, nr_entries);
3092
3093 mb();
3094 }
3095
unreloc_toc(void)3096 static void unreloc_toc(void)
3097 {
3098 unsigned long offset = reloc_offset();
3099 unsigned long nr_entries =
3100 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3101
3102 mb();
3103
3104 __reloc_toc(-offset, nr_entries);
3105 }
3106 #endif
3107 #endif
3108
3109 /*
3110 * We enter here early on, when the Open Firmware prom is still
3111 * handling exceptions and the MMU hash table for us.
3112 */
3113
prom_init(unsigned long r3,unsigned long r4,unsigned long pp,unsigned long r6,unsigned long r7,unsigned long kbase)3114 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3115 unsigned long pp,
3116 unsigned long r6, unsigned long r7,
3117 unsigned long kbase)
3118 {
3119 unsigned long hdr;
3120
3121 #ifdef CONFIG_PPC32
3122 unsigned long offset = reloc_offset();
3123 reloc_got2(offset);
3124 #else
3125 reloc_toc();
3126 #endif
3127
3128 /*
3129 * First zero the BSS
3130 */
3131 memset(&__bss_start, 0, __bss_stop - __bss_start);
3132
3133 /*
3134 * Init interface to Open Firmware, get some node references,
3135 * like /chosen
3136 */
3137 prom_init_client_services(pp);
3138
3139 /*
3140 * See if this OF is old enough that we need to do explicit maps
3141 * and other workarounds
3142 */
3143 prom_find_mmu();
3144
3145 /*
3146 * Init prom stdout device
3147 */
3148 prom_init_stdout();
3149
3150 prom_printf("Preparing to boot %s", linux_banner);
3151
3152 /*
3153 * Get default machine type. At this point, we do not differentiate
3154 * between pSeries SMP and pSeries LPAR
3155 */
3156 of_platform = prom_find_machine_type();
3157 prom_printf("Detected machine type: %x\n", of_platform);
3158
3159 #ifndef CONFIG_NONSTATIC_KERNEL
3160 /* Bail if this is a kdump kernel. */
3161 if (PHYSICAL_START > 0)
3162 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3163 #endif
3164
3165 /*
3166 * Check for an initrd
3167 */
3168 prom_check_initrd(r3, r4);
3169
3170 /*
3171 * Do early parsing of command line
3172 */
3173 early_cmdline_parse();
3174
3175 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
3176 /*
3177 * On pSeries, inform the firmware about our capabilities
3178 */
3179 if (of_platform == PLATFORM_PSERIES ||
3180 of_platform == PLATFORM_PSERIES_LPAR)
3181 prom_send_capabilities();
3182 #endif
3183
3184 /*
3185 * Copy the CPU hold code
3186 */
3187 if (of_platform != PLATFORM_POWERMAC)
3188 copy_and_flush(0, kbase, 0x100, 0);
3189
3190 /*
3191 * Initialize memory management within prom_init
3192 */
3193 prom_init_mem();
3194
3195 /*
3196 * Determine which cpu is actually running right _now_
3197 */
3198 prom_find_boot_cpu();
3199
3200 /*
3201 * Initialize display devices
3202 */
3203 prom_check_displays();
3204
3205 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3206 /*
3207 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3208 * that uses the allocator, we need to make sure we get the top of memory
3209 * available for us here...
3210 */
3211 if (of_platform == PLATFORM_PSERIES)
3212 prom_initialize_tce_table();
3213 #endif
3214
3215 /*
3216 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3217 * have a usable RTAS implementation.
3218 */
3219 if (of_platform != PLATFORM_POWERMAC &&
3220 of_platform != PLATFORM_OPAL)
3221 prom_instantiate_rtas();
3222
3223 #ifdef CONFIG_PPC_POWERNV
3224 if (of_platform == PLATFORM_OPAL)
3225 prom_instantiate_opal();
3226 #endif /* CONFIG_PPC_POWERNV */
3227
3228 #ifdef CONFIG_PPC64
3229 /* instantiate sml */
3230 prom_instantiate_sml();
3231 #endif
3232
3233 /*
3234 * On non-powermacs, put all CPUs in spin-loops.
3235 *
3236 * PowerMacs use a different mechanism to spin CPUs
3237 *
3238 * (This must be done after instanciating RTAS)
3239 */
3240 if (of_platform != PLATFORM_POWERMAC &&
3241 of_platform != PLATFORM_OPAL)
3242 prom_hold_cpus();
3243
3244 /*
3245 * Fill in some infos for use by the kernel later on
3246 */
3247 if (prom_memory_limit) {
3248 __be64 val = cpu_to_be64(prom_memory_limit);
3249 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3250 &val, sizeof(val));
3251 }
3252 #ifdef CONFIG_PPC64
3253 if (prom_iommu_off)
3254 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3255 NULL, 0);
3256
3257 if (prom_iommu_force_on)
3258 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3259 NULL, 0);
3260
3261 if (prom_tce_alloc_start) {
3262 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3263 &prom_tce_alloc_start,
3264 sizeof(prom_tce_alloc_start));
3265 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3266 &prom_tce_alloc_end,
3267 sizeof(prom_tce_alloc_end));
3268 }
3269 #endif
3270
3271 /*
3272 * Fixup any known bugs in the device-tree
3273 */
3274 fixup_device_tree();
3275
3276 /*
3277 * Now finally create the flattened device-tree
3278 */
3279 prom_printf("copying OF device tree...\n");
3280 flatten_device_tree();
3281
3282 /*
3283 * in case stdin is USB and still active on IBM machines...
3284 * Unfortunately quiesce crashes on some powermacs if we have
3285 * closed stdin already (in particular the powerbook 101). It
3286 * appears that the OPAL version of OFW doesn't like it either.
3287 */
3288 if (of_platform != PLATFORM_POWERMAC &&
3289 of_platform != PLATFORM_OPAL)
3290 prom_close_stdin();
3291
3292 /*
3293 * Call OF "quiesce" method to shut down pending DMA's from
3294 * devices etc...
3295 */
3296 prom_printf("Quiescing Open Firmware ...\n");
3297 call_prom("quiesce", 0, 0);
3298
3299 /*
3300 * And finally, call the kernel passing it the flattened device
3301 * tree and NULL as r5, thus triggering the new entry point which
3302 * is common to us and kexec
3303 */
3304 hdr = dt_header_start;
3305
3306 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3307 if (of_platform != PLATFORM_OPAL) {
3308 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3309 prom_debug("->dt_header_start=0x%lx\n", hdr);
3310 }
3311
3312 #ifdef CONFIG_PPC32
3313 reloc_got2(-offset);
3314 #else
3315 unreloc_toc();
3316 #endif
3317
3318 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3319 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3320 __start(hdr, kbase, 0, 0, 0,
3321 prom_opal_base, prom_opal_entry);
3322 #else
3323 __start(hdr, kbase, 0, 0, 0, 0, 0);
3324 #endif
3325
3326 return 0;
3327 }
3328