1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Procedures for interfacing to Open Firmware.
4  *
5  * Paul Mackerras	August 1996.
6  * Copyright (C) 1996-2005 Paul Mackerras.
7  *
8  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9  *    {engebret|bergner}@us.ibm.com
10  */
11 
12 #undef DEBUG_PROM
13 
14 /* we cannot use FORTIFY as it brings in new symbols */
15 #define __NO_FORTIFY
16 
17 #include <stdarg.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/threads.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/delay.h>
27 #include <linux/initrd.h>
28 #include <linux/bitops.h>
29 #include <linux/pgtable.h>
30 #include <asm/prom.h>
31 #include <asm/rtas.h>
32 #include <asm/page.h>
33 #include <asm/processor.h>
34 #include <asm/irq.h>
35 #include <asm/io.h>
36 #include <asm/smp.h>
37 #include <asm/mmu.h>
38 #include <asm/iommu.h>
39 #include <asm/btext.h>
40 #include <asm/sections.h>
41 #include <asm/machdep.h>
42 #include <asm/asm-prototypes.h>
43 #include <asm/ultravisor-api.h>
44 
45 #include <linux/linux_logo.h>
46 
47 /* All of prom_init bss lives here */
48 #define __prombss __section(".bss.prominit")
49 
50 /*
51  * Eventually bump that one up
52  */
53 #define DEVTREE_CHUNK_SIZE	0x100000
54 
55 /*
56  * This is the size of the local memory reserve map that gets copied
57  * into the boot params passed to the kernel. That size is totally
58  * flexible as the kernel just reads the list until it encounters an
59  * entry with size 0, so it can be changed without breaking binary
60  * compatibility
61  */
62 #define MEM_RESERVE_MAP_SIZE	8
63 
64 /*
65  * prom_init() is called very early on, before the kernel text
66  * and data have been mapped to KERNELBASE.  At this point the code
67  * is running at whatever address it has been loaded at.
68  * On ppc32 we compile with -mrelocatable, which means that references
69  * to extern and static variables get relocated automatically.
70  * ppc64 objects are always relocatable, we just need to relocate the
71  * TOC.
72  *
73  * Because OF may have mapped I/O devices into the area starting at
74  * KERNELBASE, particularly on CHRP machines, we can't safely call
75  * OF once the kernel has been mapped to KERNELBASE.  Therefore all
76  * OF calls must be done within prom_init().
77  *
78  * ADDR is used in calls to call_prom.  The 4th and following
79  * arguments to call_prom should be 32-bit values.
80  * On ppc64, 64 bit values are truncated to 32 bits (and
81  * fortunately don't get interpreted as two arguments).
82  */
83 #define ADDR(x)		(u32)(unsigned long)(x)
84 
85 #ifdef CONFIG_PPC64
86 #define OF_WORKAROUNDS	0
87 #else
88 #define OF_WORKAROUNDS	of_workarounds
89 static int of_workarounds __prombss;
90 #endif
91 
92 #define OF_WA_CLAIM	1	/* do phys/virt claim separately, then map */
93 #define OF_WA_LONGTRAIL	2	/* work around longtrail bugs */
94 
95 #define PROM_BUG() do {						\
96         prom_printf("kernel BUG at %s line 0x%x!\n",		\
97 		    __FILE__, __LINE__);			\
98 	__builtin_trap();					\
99 } while (0)
100 
101 #ifdef DEBUG_PROM
102 #define prom_debug(x...)	prom_printf(x)
103 #else
104 #define prom_debug(x...)	do { } while (0)
105 #endif
106 
107 
108 typedef u32 prom_arg_t;
109 
110 struct prom_args {
111         __be32 service;
112         __be32 nargs;
113         __be32 nret;
114         __be32 args[10];
115 };
116 
117 struct prom_t {
118 	ihandle root;
119 	phandle chosen;
120 	int cpu;
121 	ihandle stdout;
122 	ihandle mmumap;
123 	ihandle memory;
124 };
125 
126 struct mem_map_entry {
127 	__be64	base;
128 	__be64	size;
129 };
130 
131 typedef __be32 cell_t;
132 
133 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
134 		    unsigned long r6, unsigned long r7, unsigned long r8,
135 		    unsigned long r9);
136 
137 #ifdef CONFIG_PPC64
138 extern int enter_prom(struct prom_args *args, unsigned long entry);
139 #else
enter_prom(struct prom_args * args,unsigned long entry)140 static inline int enter_prom(struct prom_args *args, unsigned long entry)
141 {
142 	return ((int (*)(struct prom_args *))entry)(args);
143 }
144 #endif
145 
146 extern void copy_and_flush(unsigned long dest, unsigned long src,
147 			   unsigned long size, unsigned long offset);
148 
149 /* prom structure */
150 static struct prom_t __prombss prom;
151 
152 static unsigned long __prombss prom_entry;
153 
154 static char __prombss of_stdout_device[256];
155 static char __prombss prom_scratch[256];
156 
157 static unsigned long __prombss dt_header_start;
158 static unsigned long __prombss dt_struct_start, dt_struct_end;
159 static unsigned long __prombss dt_string_start, dt_string_end;
160 
161 static unsigned long __prombss prom_initrd_start, prom_initrd_end;
162 
163 #ifdef CONFIG_PPC64
164 static int __prombss prom_iommu_force_on;
165 static int __prombss prom_iommu_off;
166 static unsigned long __prombss prom_tce_alloc_start;
167 static unsigned long __prombss prom_tce_alloc_end;
168 #endif
169 
170 #ifdef CONFIG_PPC_PSERIES
171 static bool __prombss prom_radix_disable;
172 static bool __prombss prom_radix_gtse_disable;
173 static bool __prombss prom_xive_disable;
174 #endif
175 
176 #ifdef CONFIG_PPC_SVM
177 static bool __prombss prom_svm_enable;
178 #endif
179 
180 struct platform_support {
181 	bool hash_mmu;
182 	bool radix_mmu;
183 	bool radix_gtse;
184 	bool xive;
185 };
186 
187 /* Platforms codes are now obsolete in the kernel. Now only used within this
188  * file and ultimately gone too. Feel free to change them if you need, they
189  * are not shared with anything outside of this file anymore
190  */
191 #define PLATFORM_PSERIES	0x0100
192 #define PLATFORM_PSERIES_LPAR	0x0101
193 #define PLATFORM_LPAR		0x0001
194 #define PLATFORM_POWERMAC	0x0400
195 #define PLATFORM_GENERIC	0x0500
196 
197 static int __prombss of_platform;
198 
199 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
200 
201 static unsigned long __prombss prom_memory_limit;
202 
203 static unsigned long __prombss alloc_top;
204 static unsigned long __prombss alloc_top_high;
205 static unsigned long __prombss alloc_bottom;
206 static unsigned long __prombss rmo_top;
207 static unsigned long __prombss ram_top;
208 
209 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
210 static int __prombss mem_reserve_cnt;
211 
212 static cell_t __prombss regbuf[1024];
213 
214 static bool  __prombss rtas_has_query_cpu_stopped;
215 
216 
217 /*
218  * Error results ... some OF calls will return "-1" on error, some
219  * will return 0, some will return either. To simplify, here are
220  * macros to use with any ihandle or phandle return value to check if
221  * it is valid
222  */
223 
224 #define PROM_ERROR		(-1u)
225 #define PHANDLE_VALID(p)	((p) != 0 && (p) != PROM_ERROR)
226 #define IHANDLE_VALID(i)	((i) != 0 && (i) != PROM_ERROR)
227 
228 /* Copied from lib/string.c and lib/kstrtox.c */
229 
prom_strcmp(const char * cs,const char * ct)230 static int __init prom_strcmp(const char *cs, const char *ct)
231 {
232 	unsigned char c1, c2;
233 
234 	while (1) {
235 		c1 = *cs++;
236 		c2 = *ct++;
237 		if (c1 != c2)
238 			return c1 < c2 ? -1 : 1;
239 		if (!c1)
240 			break;
241 	}
242 	return 0;
243 }
244 
prom_strcpy(char * dest,const char * src)245 static char __init *prom_strcpy(char *dest, const char *src)
246 {
247 	char *tmp = dest;
248 
249 	while ((*dest++ = *src++) != '\0')
250 		/* nothing */;
251 	return tmp;
252 }
253 
prom_strncmp(const char * cs,const char * ct,size_t count)254 static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
255 {
256 	unsigned char c1, c2;
257 
258 	while (count) {
259 		c1 = *cs++;
260 		c2 = *ct++;
261 		if (c1 != c2)
262 			return c1 < c2 ? -1 : 1;
263 		if (!c1)
264 			break;
265 		count--;
266 	}
267 	return 0;
268 }
269 
prom_strlen(const char * s)270 static size_t __init prom_strlen(const char *s)
271 {
272 	const char *sc;
273 
274 	for (sc = s; *sc != '\0'; ++sc)
275 		/* nothing */;
276 	return sc - s;
277 }
278 
prom_memcmp(const void * cs,const void * ct,size_t count)279 static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
280 {
281 	const unsigned char *su1, *su2;
282 	int res = 0;
283 
284 	for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
285 		if ((res = *su1 - *su2) != 0)
286 			break;
287 	return res;
288 }
289 
prom_strstr(const char * s1,const char * s2)290 static char __init *prom_strstr(const char *s1, const char *s2)
291 {
292 	size_t l1, l2;
293 
294 	l2 = prom_strlen(s2);
295 	if (!l2)
296 		return (char *)s1;
297 	l1 = prom_strlen(s1);
298 	while (l1 >= l2) {
299 		l1--;
300 		if (!prom_memcmp(s1, s2, l2))
301 			return (char *)s1;
302 		s1++;
303 	}
304 	return NULL;
305 }
306 
prom_strlcat(char * dest,const char * src,size_t count)307 static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
308 {
309 	size_t dsize = prom_strlen(dest);
310 	size_t len = prom_strlen(src);
311 	size_t res = dsize + len;
312 
313 	/* This would be a bug */
314 	if (dsize >= count)
315 		return count;
316 
317 	dest += dsize;
318 	count -= dsize;
319 	if (len >= count)
320 		len = count-1;
321 	memcpy(dest, src, len);
322 	dest[len] = 0;
323 	return res;
324 
325 }
326 
327 #ifdef CONFIG_PPC_PSERIES
prom_strtobool(const char * s,bool * res)328 static int __init prom_strtobool(const char *s, bool *res)
329 {
330 	if (!s)
331 		return -EINVAL;
332 
333 	switch (s[0]) {
334 	case 'y':
335 	case 'Y':
336 	case '1':
337 		*res = true;
338 		return 0;
339 	case 'n':
340 	case 'N':
341 	case '0':
342 		*res = false;
343 		return 0;
344 	case 'o':
345 	case 'O':
346 		switch (s[1]) {
347 		case 'n':
348 		case 'N':
349 			*res = true;
350 			return 0;
351 		case 'f':
352 		case 'F':
353 			*res = false;
354 			return 0;
355 		default:
356 			break;
357 		}
358 	default:
359 		break;
360 	}
361 
362 	return -EINVAL;
363 }
364 #endif
365 
366 /* This is the one and *ONLY* place where we actually call open
367  * firmware.
368  */
369 
call_prom(const char * service,int nargs,int nret,...)370 static int __init call_prom(const char *service, int nargs, int nret, ...)
371 {
372 	int i;
373 	struct prom_args args;
374 	va_list list;
375 
376 	args.service = cpu_to_be32(ADDR(service));
377 	args.nargs = cpu_to_be32(nargs);
378 	args.nret = cpu_to_be32(nret);
379 
380 	va_start(list, nret);
381 	for (i = 0; i < nargs; i++)
382 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
383 	va_end(list);
384 
385 	for (i = 0; i < nret; i++)
386 		args.args[nargs+i] = 0;
387 
388 	if (enter_prom(&args, prom_entry) < 0)
389 		return PROM_ERROR;
390 
391 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
392 }
393 
call_prom_ret(const char * service,int nargs,int nret,prom_arg_t * rets,...)394 static int __init call_prom_ret(const char *service, int nargs, int nret,
395 				prom_arg_t *rets, ...)
396 {
397 	int i;
398 	struct prom_args args;
399 	va_list list;
400 
401 	args.service = cpu_to_be32(ADDR(service));
402 	args.nargs = cpu_to_be32(nargs);
403 	args.nret = cpu_to_be32(nret);
404 
405 	va_start(list, rets);
406 	for (i = 0; i < nargs; i++)
407 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
408 	va_end(list);
409 
410 	for (i = 0; i < nret; i++)
411 		args.args[nargs+i] = 0;
412 
413 	if (enter_prom(&args, prom_entry) < 0)
414 		return PROM_ERROR;
415 
416 	if (rets != NULL)
417 		for (i = 1; i < nret; ++i)
418 			rets[i-1] = be32_to_cpu(args.args[nargs+i]);
419 
420 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
421 }
422 
423 
prom_print(const char * msg)424 static void __init prom_print(const char *msg)
425 {
426 	const char *p, *q;
427 
428 	if (prom.stdout == 0)
429 		return;
430 
431 	for (p = msg; *p != 0; p = q) {
432 		for (q = p; *q != 0 && *q != '\n'; ++q)
433 			;
434 		if (q > p)
435 			call_prom("write", 3, 1, prom.stdout, p, q - p);
436 		if (*q == 0)
437 			break;
438 		++q;
439 		call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
440 	}
441 }
442 
443 
444 /*
445  * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
446  * we do not need __udivdi3 or __umoddi3 on 32bits.
447  */
prom_print_hex(unsigned long val)448 static void __init prom_print_hex(unsigned long val)
449 {
450 	int i, nibbles = sizeof(val)*2;
451 	char buf[sizeof(val)*2+1];
452 
453 	for (i = nibbles-1;  i >= 0;  i--) {
454 		buf[i] = (val & 0xf) + '0';
455 		if (buf[i] > '9')
456 			buf[i] += ('a'-'0'-10);
457 		val >>= 4;
458 	}
459 	buf[nibbles] = '\0';
460 	call_prom("write", 3, 1, prom.stdout, buf, nibbles);
461 }
462 
463 /* max number of decimal digits in an unsigned long */
464 #define UL_DIGITS 21
prom_print_dec(unsigned long val)465 static void __init prom_print_dec(unsigned long val)
466 {
467 	int i, size;
468 	char buf[UL_DIGITS+1];
469 
470 	for (i = UL_DIGITS-1; i >= 0;  i--) {
471 		buf[i] = (val % 10) + '0';
472 		val = val/10;
473 		if (val == 0)
474 			break;
475 	}
476 	/* shift stuff down */
477 	size = UL_DIGITS - i;
478 	call_prom("write", 3, 1, prom.stdout, buf+i, size);
479 }
480 
481 __printf(1, 2)
prom_printf(const char * format,...)482 static void __init prom_printf(const char *format, ...)
483 {
484 	const char *p, *q, *s;
485 	va_list args;
486 	unsigned long v;
487 	long vs;
488 	int n = 0;
489 
490 	va_start(args, format);
491 	for (p = format; *p != 0; p = q) {
492 		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
493 			;
494 		if (q > p)
495 			call_prom("write", 3, 1, prom.stdout, p, q - p);
496 		if (*q == 0)
497 			break;
498 		if (*q == '\n') {
499 			++q;
500 			call_prom("write", 3, 1, prom.stdout,
501 				  ADDR("\r\n"), 2);
502 			continue;
503 		}
504 		++q;
505 		if (*q == 0)
506 			break;
507 		while (*q == 'l') {
508 			++q;
509 			++n;
510 		}
511 		switch (*q) {
512 		case 's':
513 			++q;
514 			s = va_arg(args, const char *);
515 			prom_print(s);
516 			break;
517 		case 'x':
518 			++q;
519 			switch (n) {
520 			case 0:
521 				v = va_arg(args, unsigned int);
522 				break;
523 			case 1:
524 				v = va_arg(args, unsigned long);
525 				break;
526 			case 2:
527 			default:
528 				v = va_arg(args, unsigned long long);
529 				break;
530 			}
531 			prom_print_hex(v);
532 			break;
533 		case 'u':
534 			++q;
535 			switch (n) {
536 			case 0:
537 				v = va_arg(args, unsigned int);
538 				break;
539 			case 1:
540 				v = va_arg(args, unsigned long);
541 				break;
542 			case 2:
543 			default:
544 				v = va_arg(args, unsigned long long);
545 				break;
546 			}
547 			prom_print_dec(v);
548 			break;
549 		case 'd':
550 			++q;
551 			switch (n) {
552 			case 0:
553 				vs = va_arg(args, int);
554 				break;
555 			case 1:
556 				vs = va_arg(args, long);
557 				break;
558 			case 2:
559 			default:
560 				vs = va_arg(args, long long);
561 				break;
562 			}
563 			if (vs < 0) {
564 				prom_print("-");
565 				vs = -vs;
566 			}
567 			prom_print_dec(vs);
568 			break;
569 		}
570 	}
571 	va_end(args);
572 }
573 
574 
prom_claim(unsigned long virt,unsigned long size,unsigned long align)575 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
576 				unsigned long align)
577 {
578 
579 	if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
580 		/*
581 		 * Old OF requires we claim physical and virtual separately
582 		 * and then map explicitly (assuming virtual mode)
583 		 */
584 		int ret;
585 		prom_arg_t result;
586 
587 		ret = call_prom_ret("call-method", 5, 2, &result,
588 				    ADDR("claim"), prom.memory,
589 				    align, size, virt);
590 		if (ret != 0 || result == -1)
591 			return -1;
592 		ret = call_prom_ret("call-method", 5, 2, &result,
593 				    ADDR("claim"), prom.mmumap,
594 				    align, size, virt);
595 		if (ret != 0) {
596 			call_prom("call-method", 4, 1, ADDR("release"),
597 				  prom.memory, size, virt);
598 			return -1;
599 		}
600 		/* the 0x12 is M (coherence) + PP == read/write */
601 		call_prom("call-method", 6, 1,
602 			  ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
603 		return virt;
604 	}
605 	return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
606 			 (prom_arg_t)align);
607 }
608 
prom_panic(const char * reason)609 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
610 {
611 	prom_print(reason);
612 	/* Do not call exit because it clears the screen on pmac
613 	 * it also causes some sort of double-fault on early pmacs */
614 	if (of_platform == PLATFORM_POWERMAC)
615 		asm("trap\n");
616 
617 	/* ToDo: should put up an SRC here on pSeries */
618 	call_prom("exit", 0, 0);
619 
620 	for (;;)			/* should never get here */
621 		;
622 }
623 
624 
prom_next_node(phandle * nodep)625 static int __init prom_next_node(phandle *nodep)
626 {
627 	phandle node;
628 
629 	if ((node = *nodep) != 0
630 	    && (*nodep = call_prom("child", 1, 1, node)) != 0)
631 		return 1;
632 	if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
633 		return 1;
634 	for (;;) {
635 		if ((node = call_prom("parent", 1, 1, node)) == 0)
636 			return 0;
637 		if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
638 			return 1;
639 	}
640 }
641 
prom_getprop(phandle node,const char * pname,void * value,size_t valuelen)642 static inline int __init prom_getprop(phandle node, const char *pname,
643 				      void *value, size_t valuelen)
644 {
645 	return call_prom("getprop", 4, 1, node, ADDR(pname),
646 			 (u32)(unsigned long) value, (u32) valuelen);
647 }
648 
prom_getproplen(phandle node,const char * pname)649 static inline int __init prom_getproplen(phandle node, const char *pname)
650 {
651 	return call_prom("getproplen", 2, 1, node, ADDR(pname));
652 }
653 
add_string(char ** str,const char * q)654 static void add_string(char **str, const char *q)
655 {
656 	char *p = *str;
657 
658 	while (*q)
659 		*p++ = *q++;
660 	*p++ = ' ';
661 	*str = p;
662 }
663 
tohex(unsigned int x)664 static char *tohex(unsigned int x)
665 {
666 	static const char digits[] __initconst = "0123456789abcdef";
667 	static char result[9] __prombss;
668 	int i;
669 
670 	result[8] = 0;
671 	i = 8;
672 	do {
673 		--i;
674 		result[i] = digits[x & 0xf];
675 		x >>= 4;
676 	} while (x != 0 && i > 0);
677 	return &result[i];
678 }
679 
prom_setprop(phandle node,const char * nodename,const char * pname,void * value,size_t valuelen)680 static int __init prom_setprop(phandle node, const char *nodename,
681 			       const char *pname, void *value, size_t valuelen)
682 {
683 	char cmd[256], *p;
684 
685 	if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
686 		return call_prom("setprop", 4, 1, node, ADDR(pname),
687 				 (u32)(unsigned long) value, (u32) valuelen);
688 
689 	/* gah... setprop doesn't work on longtrail, have to use interpret */
690 	p = cmd;
691 	add_string(&p, "dev");
692 	add_string(&p, nodename);
693 	add_string(&p, tohex((u32)(unsigned long) value));
694 	add_string(&p, tohex(valuelen));
695 	add_string(&p, tohex(ADDR(pname)));
696 	add_string(&p, tohex(prom_strlen(pname)));
697 	add_string(&p, "property");
698 	*p = 0;
699 	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
700 }
701 
702 /* We can't use the standard versions because of relocation headaches. */
703 #define isxdigit(c)	(('0' <= (c) && (c) <= '9') \
704 			 || ('a' <= (c) && (c) <= 'f') \
705 			 || ('A' <= (c) && (c) <= 'F'))
706 
707 #define isdigit(c)	('0' <= (c) && (c) <= '9')
708 #define islower(c)	('a' <= (c) && (c) <= 'z')
709 #define toupper(c)	(islower(c) ? ((c) - 'a' + 'A') : (c))
710 
prom_strtoul(const char * cp,const char ** endp)711 static unsigned long prom_strtoul(const char *cp, const char **endp)
712 {
713 	unsigned long result = 0, base = 10, value;
714 
715 	if (*cp == '0') {
716 		base = 8;
717 		cp++;
718 		if (toupper(*cp) == 'X') {
719 			cp++;
720 			base = 16;
721 		}
722 	}
723 
724 	while (isxdigit(*cp) &&
725 	       (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
726 		result = result * base + value;
727 		cp++;
728 	}
729 
730 	if (endp)
731 		*endp = cp;
732 
733 	return result;
734 }
735 
prom_memparse(const char * ptr,const char ** retptr)736 static unsigned long prom_memparse(const char *ptr, const char **retptr)
737 {
738 	unsigned long ret = prom_strtoul(ptr, retptr);
739 	int shift = 0;
740 
741 	/*
742 	 * We can't use a switch here because GCC *may* generate a
743 	 * jump table which won't work, because we're not running at
744 	 * the address we're linked at.
745 	 */
746 	if ('G' == **retptr || 'g' == **retptr)
747 		shift = 30;
748 
749 	if ('M' == **retptr || 'm' == **retptr)
750 		shift = 20;
751 
752 	if ('K' == **retptr || 'k' == **retptr)
753 		shift = 10;
754 
755 	if (shift) {
756 		ret <<= shift;
757 		(*retptr)++;
758 	}
759 
760 	return ret;
761 }
762 
763 /*
764  * Early parsing of the command line passed to the kernel, used for
765  * "mem=x" and the options that affect the iommu
766  */
early_cmdline_parse(void)767 static void __init early_cmdline_parse(void)
768 {
769 	const char *opt;
770 
771 	char *p;
772 	int l = 0;
773 
774 	prom_cmd_line[0] = 0;
775 	p = prom_cmd_line;
776 
777 	if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
778 		l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
779 
780 	if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
781 		prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
782 			     sizeof(prom_cmd_line));
783 
784 	prom_printf("command line: %s\n", prom_cmd_line);
785 
786 #ifdef CONFIG_PPC64
787 	opt = prom_strstr(prom_cmd_line, "iommu=");
788 	if (opt) {
789 		prom_printf("iommu opt is: %s\n", opt);
790 		opt += 6;
791 		while (*opt && *opt == ' ')
792 			opt++;
793 		if (!prom_strncmp(opt, "off", 3))
794 			prom_iommu_off = 1;
795 		else if (!prom_strncmp(opt, "force", 5))
796 			prom_iommu_force_on = 1;
797 	}
798 #endif
799 	opt = prom_strstr(prom_cmd_line, "mem=");
800 	if (opt) {
801 		opt += 4;
802 		prom_memory_limit = prom_memparse(opt, (const char **)&opt);
803 #ifdef CONFIG_PPC64
804 		/* Align to 16 MB == size of ppc64 large page */
805 		prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
806 #endif
807 	}
808 
809 #ifdef CONFIG_PPC_PSERIES
810 	prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
811 	opt = prom_strstr(prom_cmd_line, "disable_radix");
812 	if (opt) {
813 		opt += 13;
814 		if (*opt && *opt == '=') {
815 			bool val;
816 
817 			if (prom_strtobool(++opt, &val))
818 				prom_radix_disable = false;
819 			else
820 				prom_radix_disable = val;
821 		} else
822 			prom_radix_disable = true;
823 	}
824 	if (prom_radix_disable)
825 		prom_debug("Radix disabled from cmdline\n");
826 
827 	opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
828 	if (opt) {
829 		prom_radix_gtse_disable = true;
830 		prom_debug("Radix GTSE disabled from cmdline\n");
831 	}
832 
833 	opt = prom_strstr(prom_cmd_line, "xive=off");
834 	if (opt) {
835 		prom_xive_disable = true;
836 		prom_debug("XIVE disabled from cmdline\n");
837 	}
838 #endif /* CONFIG_PPC_PSERIES */
839 
840 #ifdef CONFIG_PPC_SVM
841 	opt = prom_strstr(prom_cmd_line, "svm=");
842 	if (opt) {
843 		bool val;
844 
845 		opt += sizeof("svm=") - 1;
846 		if (!prom_strtobool(opt, &val))
847 			prom_svm_enable = val;
848 	}
849 #endif /* CONFIG_PPC_SVM */
850 }
851 
852 #ifdef CONFIG_PPC_PSERIES
853 /*
854  * The architecture vector has an array of PVR mask/value pairs,
855  * followed by # option vectors - 1, followed by the option vectors.
856  *
857  * See prom.h for the definition of the bits specified in the
858  * architecture vector.
859  */
860 
861 /* Firmware expects the value to be n - 1, where n is the # of vectors */
862 #define NUM_VECTORS(n)		((n) - 1)
863 
864 /*
865  * Firmware expects 1 + n - 2, where n is the length of the option vector in
866  * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
867  */
868 #define VECTOR_LENGTH(n)	(1 + (n) - 2)
869 
870 struct option_vector1 {
871 	u8 byte1;
872 	u8 arch_versions;
873 	u8 arch_versions3;
874 } __packed;
875 
876 struct option_vector2 {
877 	u8 byte1;
878 	__be16 reserved;
879 	__be32 real_base;
880 	__be32 real_size;
881 	__be32 virt_base;
882 	__be32 virt_size;
883 	__be32 load_base;
884 	__be32 min_rma;
885 	__be32 min_load;
886 	u8 min_rma_percent;
887 	u8 max_pft_size;
888 } __packed;
889 
890 struct option_vector3 {
891 	u8 byte1;
892 	u8 byte2;
893 } __packed;
894 
895 struct option_vector4 {
896 	u8 byte1;
897 	u8 min_vp_cap;
898 } __packed;
899 
900 struct option_vector5 {
901 	u8 byte1;
902 	u8 byte2;
903 	u8 byte3;
904 	u8 cmo;
905 	u8 associativity;
906 	u8 bin_opts;
907 	u8 micro_checkpoint;
908 	u8 reserved0;
909 	__be32 max_cpus;
910 	__be16 papr_level;
911 	__be16 reserved1;
912 	u8 platform_facilities;
913 	u8 reserved2;
914 	__be16 reserved3;
915 	u8 subprocessors;
916 	u8 byte22;
917 	u8 intarch;
918 	u8 mmu;
919 	u8 hash_ext;
920 	u8 radix_ext;
921 } __packed;
922 
923 struct option_vector6 {
924 	u8 reserved;
925 	u8 secondary_pteg;
926 	u8 os_name;
927 } __packed;
928 
929 struct ibm_arch_vec {
930 	struct { u32 mask, val; } pvrs[14];
931 
932 	u8 num_vectors;
933 
934 	u8 vec1_len;
935 	struct option_vector1 vec1;
936 
937 	u8 vec2_len;
938 	struct option_vector2 vec2;
939 
940 	u8 vec3_len;
941 	struct option_vector3 vec3;
942 
943 	u8 vec4_len;
944 	struct option_vector4 vec4;
945 
946 	u8 vec5_len;
947 	struct option_vector5 vec5;
948 
949 	u8 vec6_len;
950 	struct option_vector6 vec6;
951 } __packed;
952 
953 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
954 	.pvrs = {
955 		{
956 			.mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
957 			.val  = cpu_to_be32(0x003a0000),
958 		},
959 		{
960 			.mask = cpu_to_be32(0xffff0000), /* POWER6 */
961 			.val  = cpu_to_be32(0x003e0000),
962 		},
963 		{
964 			.mask = cpu_to_be32(0xffff0000), /* POWER7 */
965 			.val  = cpu_to_be32(0x003f0000),
966 		},
967 		{
968 			.mask = cpu_to_be32(0xffff0000), /* POWER8E */
969 			.val  = cpu_to_be32(0x004b0000),
970 		},
971 		{
972 			.mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
973 			.val  = cpu_to_be32(0x004c0000),
974 		},
975 		{
976 			.mask = cpu_to_be32(0xffff0000), /* POWER8 */
977 			.val  = cpu_to_be32(0x004d0000),
978 		},
979 		{
980 			.mask = cpu_to_be32(0xffff0000), /* POWER9 */
981 			.val  = cpu_to_be32(0x004e0000),
982 		},
983 		{
984 			.mask = cpu_to_be32(0xffff0000), /* POWER10 */
985 			.val  = cpu_to_be32(0x00800000),
986 		},
987 		{
988 			.mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
989 			.val  = cpu_to_be32(0x0f000006),
990 		},
991 		{
992 			.mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
993 			.val  = cpu_to_be32(0x0f000005),
994 		},
995 		{
996 			.mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
997 			.val  = cpu_to_be32(0x0f000004),
998 		},
999 		{
1000 			.mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1001 			.val  = cpu_to_be32(0x0f000003),
1002 		},
1003 		{
1004 			.mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1005 			.val  = cpu_to_be32(0x0f000002),
1006 		},
1007 		{
1008 			.mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1009 			.val  = cpu_to_be32(0x0f000001),
1010 		},
1011 	},
1012 
1013 	.num_vectors = NUM_VECTORS(6),
1014 
1015 	.vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1016 	.vec1 = {
1017 		.byte1 = 0,
1018 		.arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1019 				 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1020 		.arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1021 	},
1022 
1023 	.vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1024 	/* option vector 2: Open Firmware options supported */
1025 	.vec2 = {
1026 		.byte1 = OV2_REAL_MODE,
1027 		.reserved = 0,
1028 		.real_base = cpu_to_be32(0xffffffff),
1029 		.real_size = cpu_to_be32(0xffffffff),
1030 		.virt_base = cpu_to_be32(0xffffffff),
1031 		.virt_size = cpu_to_be32(0xffffffff),
1032 		.load_base = cpu_to_be32(0xffffffff),
1033 		.min_rma = cpu_to_be32(512),		/* 512MB min RMA */
1034 		.min_load = cpu_to_be32(0xffffffff),	/* full client load */
1035 		.min_rma_percent = 0,	/* min RMA percentage of total RAM */
1036 		.max_pft_size = 48,	/* max log_2(hash table size) */
1037 	},
1038 
1039 	.vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1040 	/* option vector 3: processor options supported */
1041 	.vec3 = {
1042 		.byte1 = 0,			/* don't ignore, don't halt */
1043 		.byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1044 	},
1045 
1046 	.vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1047 	/* option vector 4: IBM PAPR implementation */
1048 	.vec4 = {
1049 		.byte1 = 0,			/* don't halt */
1050 		.min_vp_cap = OV4_MIN_ENT_CAP,	/* minimum VP entitled capacity */
1051 	},
1052 
1053 	.vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1054 	/* option vector 5: PAPR/OF options */
1055 	.vec5 = {
1056 		.byte1 = 0,				/* don't ignore, don't halt */
1057 		.byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1058 		OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1059 #ifdef CONFIG_PCI_MSI
1060 		/* PCIe/MSI support.  Without MSI full PCIe is not supported */
1061 		OV5_FEAT(OV5_MSI),
1062 #else
1063 		0,
1064 #endif
1065 		.byte3 = 0,
1066 		.cmo =
1067 #ifdef CONFIG_PPC_SMLPAR
1068 		OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1069 #else
1070 		0,
1071 #endif
1072 		.associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
1073 		.bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1074 		.micro_checkpoint = 0,
1075 		.reserved0 = 0,
1076 		.max_cpus = cpu_to_be32(NR_CPUS),	/* number of cores supported */
1077 		.papr_level = 0,
1078 		.reserved1 = 0,
1079 		.platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1080 		.reserved2 = 0,
1081 		.reserved3 = 0,
1082 		.subprocessors = 1,
1083 		.byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1084 		.intarch = 0,
1085 		.mmu = 0,
1086 		.hash_ext = 0,
1087 		.radix_ext = 0,
1088 	},
1089 
1090 	/* option vector 6: IBM PAPR hints */
1091 	.vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1092 	.vec6 = {
1093 		.reserved = 0,
1094 		.secondary_pteg = 0,
1095 		.os_name = OV6_LINUX,
1096 	},
1097 };
1098 
1099 static struct ibm_arch_vec __prombss ibm_architecture_vec  ____cacheline_aligned;
1100 
1101 /* Old method - ELF header with PT_NOTE sections only works on BE */
1102 #ifdef __BIG_ENDIAN__
1103 static const struct fake_elf {
1104 	Elf32_Ehdr	elfhdr;
1105 	Elf32_Phdr	phdr[2];
1106 	struct chrpnote {
1107 		u32	namesz;
1108 		u32	descsz;
1109 		u32	type;
1110 		char	name[8];	/* "PowerPC" */
1111 		struct chrpdesc {
1112 			u32	real_mode;
1113 			u32	real_base;
1114 			u32	real_size;
1115 			u32	virt_base;
1116 			u32	virt_size;
1117 			u32	load_base;
1118 		} chrpdesc;
1119 	} chrpnote;
1120 	struct rpanote {
1121 		u32	namesz;
1122 		u32	descsz;
1123 		u32	type;
1124 		char	name[24];	/* "IBM,RPA-Client-Config" */
1125 		struct rpadesc {
1126 			u32	lpar_affinity;
1127 			u32	min_rmo_size;
1128 			u32	min_rmo_percent;
1129 			u32	max_pft_size;
1130 			u32	splpar;
1131 			u32	min_load;
1132 			u32	new_mem_def;
1133 			u32	ignore_me;
1134 		} rpadesc;
1135 	} rpanote;
1136 } fake_elf __initconst = {
1137 	.elfhdr = {
1138 		.e_ident = { 0x7f, 'E', 'L', 'F',
1139 			     ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1140 		.e_type = ET_EXEC,	/* yeah right */
1141 		.e_machine = EM_PPC,
1142 		.e_version = EV_CURRENT,
1143 		.e_phoff = offsetof(struct fake_elf, phdr),
1144 		.e_phentsize = sizeof(Elf32_Phdr),
1145 		.e_phnum = 2
1146 	},
1147 	.phdr = {
1148 		[0] = {
1149 			.p_type = PT_NOTE,
1150 			.p_offset = offsetof(struct fake_elf, chrpnote),
1151 			.p_filesz = sizeof(struct chrpnote)
1152 		}, [1] = {
1153 			.p_type = PT_NOTE,
1154 			.p_offset = offsetof(struct fake_elf, rpanote),
1155 			.p_filesz = sizeof(struct rpanote)
1156 		}
1157 	},
1158 	.chrpnote = {
1159 		.namesz = sizeof("PowerPC"),
1160 		.descsz = sizeof(struct chrpdesc),
1161 		.type = 0x1275,
1162 		.name = "PowerPC",
1163 		.chrpdesc = {
1164 			.real_mode = ~0U,	/* ~0 means "don't care" */
1165 			.real_base = ~0U,
1166 			.real_size = ~0U,
1167 			.virt_base = ~0U,
1168 			.virt_size = ~0U,
1169 			.load_base = ~0U
1170 		},
1171 	},
1172 	.rpanote = {
1173 		.namesz = sizeof("IBM,RPA-Client-Config"),
1174 		.descsz = sizeof(struct rpadesc),
1175 		.type = 0x12759999,
1176 		.name = "IBM,RPA-Client-Config",
1177 		.rpadesc = {
1178 			.lpar_affinity = 0,
1179 			.min_rmo_size = 64,	/* in megabytes */
1180 			.min_rmo_percent = 0,
1181 			.max_pft_size = 48,	/* 2^48 bytes max PFT size */
1182 			.splpar = 1,
1183 			.min_load = ~0U,
1184 			.new_mem_def = 0
1185 		}
1186 	}
1187 };
1188 #endif /* __BIG_ENDIAN__ */
1189 
prom_count_smt_threads(void)1190 static int __init prom_count_smt_threads(void)
1191 {
1192 	phandle node;
1193 	char type[64];
1194 	unsigned int plen;
1195 
1196 	/* Pick up th first CPU node we can find */
1197 	for (node = 0; prom_next_node(&node); ) {
1198 		type[0] = 0;
1199 		prom_getprop(node, "device_type", type, sizeof(type));
1200 
1201 		if (prom_strcmp(type, "cpu"))
1202 			continue;
1203 		/*
1204 		 * There is an entry for each smt thread, each entry being
1205 		 * 4 bytes long.  All cpus should have the same number of
1206 		 * smt threads, so return after finding the first.
1207 		 */
1208 		plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1209 		if (plen == PROM_ERROR)
1210 			break;
1211 		plen >>= 2;
1212 		prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1213 
1214 		/* Sanity check */
1215 		if (plen < 1 || plen > 64) {
1216 			prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1217 				    (unsigned long)plen);
1218 			return 1;
1219 		}
1220 		return plen;
1221 	}
1222 	prom_debug("No threads found, assuming 1 per core\n");
1223 
1224 	return 1;
1225 
1226 }
1227 
prom_parse_mmu_model(u8 val,struct platform_support * support)1228 static void __init prom_parse_mmu_model(u8 val,
1229 					struct platform_support *support)
1230 {
1231 	switch (val) {
1232 	case OV5_FEAT(OV5_MMU_DYNAMIC):
1233 	case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1234 		prom_debug("MMU - either supported\n");
1235 		support->radix_mmu = !prom_radix_disable;
1236 		support->hash_mmu = true;
1237 		break;
1238 	case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1239 		prom_debug("MMU - radix only\n");
1240 		if (prom_radix_disable) {
1241 			/*
1242 			 * If we __have__ to do radix, we're better off ignoring
1243 			 * the command line rather than not booting.
1244 			 */
1245 			prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1246 		}
1247 		support->radix_mmu = true;
1248 		break;
1249 	case OV5_FEAT(OV5_MMU_HASH):
1250 		prom_debug("MMU - hash only\n");
1251 		support->hash_mmu = true;
1252 		break;
1253 	default:
1254 		prom_debug("Unknown mmu support option: 0x%x\n", val);
1255 		break;
1256 	}
1257 }
1258 
prom_parse_xive_model(u8 val,struct platform_support * support)1259 static void __init prom_parse_xive_model(u8 val,
1260 					 struct platform_support *support)
1261 {
1262 	switch (val) {
1263 	case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1264 		prom_debug("XIVE - either mode supported\n");
1265 		support->xive = !prom_xive_disable;
1266 		break;
1267 	case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1268 		prom_debug("XIVE - exploitation mode supported\n");
1269 		if (prom_xive_disable) {
1270 			/*
1271 			 * If we __have__ to do XIVE, we're better off ignoring
1272 			 * the command line rather than not booting.
1273 			 */
1274 			prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1275 		}
1276 		support->xive = true;
1277 		break;
1278 	case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1279 		prom_debug("XIVE - legacy mode supported\n");
1280 		break;
1281 	default:
1282 		prom_debug("Unknown xive support option: 0x%x\n", val);
1283 		break;
1284 	}
1285 }
1286 
prom_parse_platform_support(u8 index,u8 val,struct platform_support * support)1287 static void __init prom_parse_platform_support(u8 index, u8 val,
1288 					       struct platform_support *support)
1289 {
1290 	switch (index) {
1291 	case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1292 		prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1293 		break;
1294 	case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1295 		if (val & OV5_FEAT(OV5_RADIX_GTSE))
1296 			support->radix_gtse = !prom_radix_gtse_disable;
1297 		break;
1298 	case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1299 		prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1300 				      support);
1301 		break;
1302 	}
1303 }
1304 
prom_check_platform_support(void)1305 static void __init prom_check_platform_support(void)
1306 {
1307 	struct platform_support supported = {
1308 		.hash_mmu = false,
1309 		.radix_mmu = false,
1310 		.radix_gtse = false,
1311 		.xive = false
1312 	};
1313 	int prop_len = prom_getproplen(prom.chosen,
1314 				       "ibm,arch-vec-5-platform-support");
1315 
1316 	/*
1317 	 * First copy the architecture vec template
1318 	 *
1319 	 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1320 	 * by __memcpy() when KASAN is active
1321 	 */
1322 	memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1323 	       sizeof(ibm_architecture_vec));
1324 
1325 	if (prop_len > 1) {
1326 		int i;
1327 		u8 vec[8];
1328 		prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1329 			   prop_len);
1330 		if (prop_len > sizeof(vec))
1331 			prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1332 				    prop_len);
1333 		prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1334 			     &vec, sizeof(vec));
1335 		for (i = 0; i < sizeof(vec); i += 2) {
1336 			prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1337 								  , vec[i]
1338 								  , vec[i + 1]);
1339 			prom_parse_platform_support(vec[i], vec[i + 1],
1340 						    &supported);
1341 		}
1342 	}
1343 
1344 	if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1345 		/* Radix preferred - Check if GTSE is also supported */
1346 		prom_debug("Asking for radix\n");
1347 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1348 		if (supported.radix_gtse)
1349 			ibm_architecture_vec.vec5.radix_ext =
1350 					OV5_FEAT(OV5_RADIX_GTSE);
1351 		else
1352 			prom_debug("Radix GTSE isn't supported\n");
1353 	} else if (supported.hash_mmu) {
1354 		/* Default to hash mmu (if we can) */
1355 		prom_debug("Asking for hash\n");
1356 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1357 	} else {
1358 		/* We're probably on a legacy hypervisor */
1359 		prom_debug("Assuming legacy hash support\n");
1360 	}
1361 
1362 	if (supported.xive) {
1363 		prom_debug("Asking for XIVE\n");
1364 		ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1365 	}
1366 }
1367 
prom_send_capabilities(void)1368 static void __init prom_send_capabilities(void)
1369 {
1370 	ihandle root;
1371 	prom_arg_t ret;
1372 	u32 cores;
1373 
1374 	/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1375 	prom_check_platform_support();
1376 
1377 	root = call_prom("open", 1, 1, ADDR("/"));
1378 	if (root != 0) {
1379 		/* We need to tell the FW about the number of cores we support.
1380 		 *
1381 		 * To do that, we count the number of threads on the first core
1382 		 * (we assume this is the same for all cores) and use it to
1383 		 * divide NR_CPUS.
1384 		 */
1385 
1386 		cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1387 		prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1388 			    cores, NR_CPUS);
1389 
1390 		ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1391 
1392 		/* try calling the ibm,client-architecture-support method */
1393 		prom_printf("Calling ibm,client-architecture-support...");
1394 		if (call_prom_ret("call-method", 3, 2, &ret,
1395 				  ADDR("ibm,client-architecture-support"),
1396 				  root,
1397 				  ADDR(&ibm_architecture_vec)) == 0) {
1398 			/* the call exists... */
1399 			if (ret)
1400 				prom_printf("\nWARNING: ibm,client-architecture"
1401 					    "-support call FAILED!\n");
1402 			call_prom("close", 1, 0, root);
1403 			prom_printf(" done\n");
1404 			return;
1405 		}
1406 		call_prom("close", 1, 0, root);
1407 		prom_printf(" not implemented\n");
1408 	}
1409 
1410 #ifdef __BIG_ENDIAN__
1411 	{
1412 		ihandle elfloader;
1413 
1414 		/* no ibm,client-architecture-support call, try the old way */
1415 		elfloader = call_prom("open", 1, 1,
1416 				      ADDR("/packages/elf-loader"));
1417 		if (elfloader == 0) {
1418 			prom_printf("couldn't open /packages/elf-loader\n");
1419 			return;
1420 		}
1421 		call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1422 			  elfloader, ADDR(&fake_elf));
1423 		call_prom("close", 1, 0, elfloader);
1424 	}
1425 #endif /* __BIG_ENDIAN__ */
1426 }
1427 #endif /* CONFIG_PPC_PSERIES */
1428 
1429 /*
1430  * Memory allocation strategy... our layout is normally:
1431  *
1432  *  at 14Mb or more we have vmlinux, then a gap and initrd.  In some
1433  *  rare cases, initrd might end up being before the kernel though.
1434  *  We assume this won't override the final kernel at 0, we have no
1435  *  provision to handle that in this version, but it should hopefully
1436  *  never happen.
1437  *
1438  *  alloc_top is set to the top of RMO, eventually shrink down if the
1439  *  TCEs overlap
1440  *
1441  *  alloc_bottom is set to the top of kernel/initrd
1442  *
1443  *  from there, allocations are done this way : rtas is allocated
1444  *  topmost, and the device-tree is allocated from the bottom. We try
1445  *  to grow the device-tree allocation as we progress. If we can't,
1446  *  then we fail, we don't currently have a facility to restart
1447  *  elsewhere, but that shouldn't be necessary.
1448  *
1449  *  Note that calls to reserve_mem have to be done explicitly, memory
1450  *  allocated with either alloc_up or alloc_down isn't automatically
1451  *  reserved.
1452  */
1453 
1454 
1455 /*
1456  * Allocates memory in the RMO upward from the kernel/initrd
1457  *
1458  * When align is 0, this is a special case, it means to allocate in place
1459  * at the current location of alloc_bottom or fail (that is basically
1460  * extending the previous allocation). Used for the device-tree flattening
1461  */
alloc_up(unsigned long size,unsigned long align)1462 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1463 {
1464 	unsigned long base = alloc_bottom;
1465 	unsigned long addr = 0;
1466 
1467 	if (align)
1468 		base = ALIGN(base, align);
1469 	prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1470 	if (ram_top == 0)
1471 		prom_panic("alloc_up() called with mem not initialized\n");
1472 
1473 	if (align)
1474 		base = ALIGN(alloc_bottom, align);
1475 	else
1476 		base = alloc_bottom;
1477 
1478 	for(; (base + size) <= alloc_top;
1479 	    base = ALIGN(base + 0x100000, align)) {
1480 		prom_debug("    trying: 0x%lx\n\r", base);
1481 		addr = (unsigned long)prom_claim(base, size, 0);
1482 		if (addr != PROM_ERROR && addr != 0)
1483 			break;
1484 		addr = 0;
1485 		if (align == 0)
1486 			break;
1487 	}
1488 	if (addr == 0)
1489 		return 0;
1490 	alloc_bottom = addr + size;
1491 
1492 	prom_debug(" -> %lx\n", addr);
1493 	prom_debug("  alloc_bottom : %lx\n", alloc_bottom);
1494 	prom_debug("  alloc_top    : %lx\n", alloc_top);
1495 	prom_debug("  alloc_top_hi : %lx\n", alloc_top_high);
1496 	prom_debug("  rmo_top      : %lx\n", rmo_top);
1497 	prom_debug("  ram_top      : %lx\n", ram_top);
1498 
1499 	return addr;
1500 }
1501 
1502 /*
1503  * Allocates memory downward, either from top of RMO, or if highmem
1504  * is set, from the top of RAM.  Note that this one doesn't handle
1505  * failures.  It does claim memory if highmem is not set.
1506  */
alloc_down(unsigned long size,unsigned long align,int highmem)1507 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1508 				       int highmem)
1509 {
1510 	unsigned long base, addr = 0;
1511 
1512 	prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1513 		   highmem ? "(high)" : "(low)");
1514 	if (ram_top == 0)
1515 		prom_panic("alloc_down() called with mem not initialized\n");
1516 
1517 	if (highmem) {
1518 		/* Carve out storage for the TCE table. */
1519 		addr = ALIGN_DOWN(alloc_top_high - size, align);
1520 		if (addr <= alloc_bottom)
1521 			return 0;
1522 		/* Will we bump into the RMO ? If yes, check out that we
1523 		 * didn't overlap existing allocations there, if we did,
1524 		 * we are dead, we must be the first in town !
1525 		 */
1526 		if (addr < rmo_top) {
1527 			/* Good, we are first */
1528 			if (alloc_top == rmo_top)
1529 				alloc_top = rmo_top = addr;
1530 			else
1531 				return 0;
1532 		}
1533 		alloc_top_high = addr;
1534 		goto bail;
1535 	}
1536 
1537 	base = ALIGN_DOWN(alloc_top - size, align);
1538 	for (; base > alloc_bottom;
1539 	     base = ALIGN_DOWN(base - 0x100000, align))  {
1540 		prom_debug("    trying: 0x%lx\n\r", base);
1541 		addr = (unsigned long)prom_claim(base, size, 0);
1542 		if (addr != PROM_ERROR && addr != 0)
1543 			break;
1544 		addr = 0;
1545 	}
1546 	if (addr == 0)
1547 		return 0;
1548 	alloc_top = addr;
1549 
1550  bail:
1551 	prom_debug(" -> %lx\n", addr);
1552 	prom_debug("  alloc_bottom : %lx\n", alloc_bottom);
1553 	prom_debug("  alloc_top    : %lx\n", alloc_top);
1554 	prom_debug("  alloc_top_hi : %lx\n", alloc_top_high);
1555 	prom_debug("  rmo_top      : %lx\n", rmo_top);
1556 	prom_debug("  ram_top      : %lx\n", ram_top);
1557 
1558 	return addr;
1559 }
1560 
1561 /*
1562  * Parse a "reg" cell
1563  */
prom_next_cell(int s,cell_t ** cellp)1564 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1565 {
1566 	cell_t *p = *cellp;
1567 	unsigned long r = 0;
1568 
1569 	/* Ignore more than 2 cells */
1570 	while (s > sizeof(unsigned long) / 4) {
1571 		p++;
1572 		s--;
1573 	}
1574 	r = be32_to_cpu(*p++);
1575 #ifdef CONFIG_PPC64
1576 	if (s > 1) {
1577 		r <<= 32;
1578 		r |= be32_to_cpu(*(p++));
1579 	}
1580 #endif
1581 	*cellp = p;
1582 	return r;
1583 }
1584 
1585 /*
1586  * Very dumb function for adding to the memory reserve list, but
1587  * we don't need anything smarter at this point
1588  *
1589  * XXX Eventually check for collisions.  They should NEVER happen.
1590  * If problems seem to show up, it would be a good start to track
1591  * them down.
1592  */
reserve_mem(u64 base,u64 size)1593 static void __init reserve_mem(u64 base, u64 size)
1594 {
1595 	u64 top = base + size;
1596 	unsigned long cnt = mem_reserve_cnt;
1597 
1598 	if (size == 0)
1599 		return;
1600 
1601 	/* We need to always keep one empty entry so that we
1602 	 * have our terminator with "size" set to 0 since we are
1603 	 * dumb and just copy this entire array to the boot params
1604 	 */
1605 	base = ALIGN_DOWN(base, PAGE_SIZE);
1606 	top = ALIGN(top, PAGE_SIZE);
1607 	size = top - base;
1608 
1609 	if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1610 		prom_panic("Memory reserve map exhausted !\n");
1611 	mem_reserve_map[cnt].base = cpu_to_be64(base);
1612 	mem_reserve_map[cnt].size = cpu_to_be64(size);
1613 	mem_reserve_cnt = cnt + 1;
1614 }
1615 
1616 /*
1617  * Initialize memory allocation mechanism, parse "memory" nodes and
1618  * obtain that way the top of memory and RMO to setup out local allocator
1619  */
prom_init_mem(void)1620 static void __init prom_init_mem(void)
1621 {
1622 	phandle node;
1623 	char type[64];
1624 	unsigned int plen;
1625 	cell_t *p, *endp;
1626 	__be32 val;
1627 	u32 rac, rsc;
1628 
1629 	/*
1630 	 * We iterate the memory nodes to find
1631 	 * 1) top of RMO (first node)
1632 	 * 2) top of memory
1633 	 */
1634 	val = cpu_to_be32(2);
1635 	prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1636 	rac = be32_to_cpu(val);
1637 	val = cpu_to_be32(1);
1638 	prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1639 	rsc = be32_to_cpu(val);
1640 	prom_debug("root_addr_cells: %x\n", rac);
1641 	prom_debug("root_size_cells: %x\n", rsc);
1642 
1643 	prom_debug("scanning memory:\n");
1644 
1645 	for (node = 0; prom_next_node(&node); ) {
1646 		type[0] = 0;
1647 		prom_getprop(node, "device_type", type, sizeof(type));
1648 
1649 		if (type[0] == 0) {
1650 			/*
1651 			 * CHRP Longtrail machines have no device_type
1652 			 * on the memory node, so check the name instead...
1653 			 */
1654 			prom_getprop(node, "name", type, sizeof(type));
1655 		}
1656 		if (prom_strcmp(type, "memory"))
1657 			continue;
1658 
1659 		plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1660 		if (plen > sizeof(regbuf)) {
1661 			prom_printf("memory node too large for buffer !\n");
1662 			plen = sizeof(regbuf);
1663 		}
1664 		p = regbuf;
1665 		endp = p + (plen / sizeof(cell_t));
1666 
1667 #ifdef DEBUG_PROM
1668 		memset(prom_scratch, 0, sizeof(prom_scratch));
1669 		call_prom("package-to-path", 3, 1, node, prom_scratch,
1670 			  sizeof(prom_scratch) - 1);
1671 		prom_debug("  node %s :\n", prom_scratch);
1672 #endif /* DEBUG_PROM */
1673 
1674 		while ((endp - p) >= (rac + rsc)) {
1675 			unsigned long base, size;
1676 
1677 			base = prom_next_cell(rac, &p);
1678 			size = prom_next_cell(rsc, &p);
1679 
1680 			if (size == 0)
1681 				continue;
1682 			prom_debug("    %lx %lx\n", base, size);
1683 			if (base == 0 && (of_platform & PLATFORM_LPAR))
1684 				rmo_top = size;
1685 			if ((base + size) > ram_top)
1686 				ram_top = base + size;
1687 		}
1688 	}
1689 
1690 	alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1691 
1692 	/*
1693 	 * If prom_memory_limit is set we reduce the upper limits *except* for
1694 	 * alloc_top_high. This must be the real top of RAM so we can put
1695 	 * TCE's up there.
1696 	 */
1697 
1698 	alloc_top_high = ram_top;
1699 
1700 	if (prom_memory_limit) {
1701 		if (prom_memory_limit <= alloc_bottom) {
1702 			prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1703 				    prom_memory_limit);
1704 			prom_memory_limit = 0;
1705 		} else if (prom_memory_limit >= ram_top) {
1706 			prom_printf("Ignoring mem=%lx >= ram_top.\n",
1707 				    prom_memory_limit);
1708 			prom_memory_limit = 0;
1709 		} else {
1710 			ram_top = prom_memory_limit;
1711 			rmo_top = min(rmo_top, prom_memory_limit);
1712 		}
1713 	}
1714 
1715 	/*
1716 	 * Setup our top alloc point, that is top of RMO or top of
1717 	 * segment 0 when running non-LPAR.
1718 	 * Some RS64 machines have buggy firmware where claims up at
1719 	 * 1GB fail.  Cap at 768MB as a workaround.
1720 	 * Since 768MB is plenty of room, and we need to cap to something
1721 	 * reasonable on 32-bit, cap at 768MB on all machines.
1722 	 */
1723 	if (!rmo_top)
1724 		rmo_top = ram_top;
1725 	rmo_top = min(0x30000000ul, rmo_top);
1726 	alloc_top = rmo_top;
1727 	alloc_top_high = ram_top;
1728 
1729 	/*
1730 	 * Check if we have an initrd after the kernel but still inside
1731 	 * the RMO.  If we do move our bottom point to after it.
1732 	 */
1733 	if (prom_initrd_start &&
1734 	    prom_initrd_start < rmo_top &&
1735 	    prom_initrd_end > alloc_bottom)
1736 		alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1737 
1738 	prom_printf("memory layout at init:\n");
1739 	prom_printf("  memory_limit : %lx (16 MB aligned)\n",
1740 		    prom_memory_limit);
1741 	prom_printf("  alloc_bottom : %lx\n", alloc_bottom);
1742 	prom_printf("  alloc_top    : %lx\n", alloc_top);
1743 	prom_printf("  alloc_top_hi : %lx\n", alloc_top_high);
1744 	prom_printf("  rmo_top      : %lx\n", rmo_top);
1745 	prom_printf("  ram_top      : %lx\n", ram_top);
1746 }
1747 
prom_close_stdin(void)1748 static void __init prom_close_stdin(void)
1749 {
1750 	__be32 val;
1751 	ihandle stdin;
1752 
1753 	if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1754 		stdin = be32_to_cpu(val);
1755 		call_prom("close", 1, 0, stdin);
1756 	}
1757 }
1758 
1759 #ifdef CONFIG_PPC_SVM
prom_rtas_hcall(uint64_t args)1760 static int prom_rtas_hcall(uint64_t args)
1761 {
1762 	register uint64_t arg1 asm("r3") = H_RTAS;
1763 	register uint64_t arg2 asm("r4") = args;
1764 
1765 	asm volatile("sc 1\n" : "=r" (arg1) :
1766 			"r" (arg1),
1767 			"r" (arg2) :);
1768 	return arg1;
1769 }
1770 
1771 static struct rtas_args __prombss os_term_args;
1772 
prom_rtas_os_term(char * str)1773 static void __init prom_rtas_os_term(char *str)
1774 {
1775 	phandle rtas_node;
1776 	__be32 val;
1777 	u32 token;
1778 
1779 	prom_debug("%s: start...\n", __func__);
1780 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1781 	prom_debug("rtas_node: %x\n", rtas_node);
1782 	if (!PHANDLE_VALID(rtas_node))
1783 		return;
1784 
1785 	val = 0;
1786 	prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1787 	token = be32_to_cpu(val);
1788 	prom_debug("ibm,os-term: %x\n", token);
1789 	if (token == 0)
1790 		prom_panic("Could not get token for ibm,os-term\n");
1791 	os_term_args.token = cpu_to_be32(token);
1792 	os_term_args.nargs = cpu_to_be32(1);
1793 	os_term_args.nret = cpu_to_be32(1);
1794 	os_term_args.args[0] = cpu_to_be32(__pa(str));
1795 	prom_rtas_hcall((uint64_t)&os_term_args);
1796 }
1797 #endif /* CONFIG_PPC_SVM */
1798 
1799 /*
1800  * Allocate room for and instantiate RTAS
1801  */
prom_instantiate_rtas(void)1802 static void __init prom_instantiate_rtas(void)
1803 {
1804 	phandle rtas_node;
1805 	ihandle rtas_inst;
1806 	u32 base, entry = 0;
1807 	__be32 val;
1808 	u32 size = 0;
1809 
1810 	prom_debug("prom_instantiate_rtas: start...\n");
1811 
1812 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1813 	prom_debug("rtas_node: %x\n", rtas_node);
1814 	if (!PHANDLE_VALID(rtas_node))
1815 		return;
1816 
1817 	val = 0;
1818 	prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1819 	size = be32_to_cpu(val);
1820 	if (size == 0)
1821 		return;
1822 
1823 	base = alloc_down(size, PAGE_SIZE, 0);
1824 	if (base == 0)
1825 		prom_panic("Could not allocate memory for RTAS\n");
1826 
1827 	rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1828 	if (!IHANDLE_VALID(rtas_inst)) {
1829 		prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1830 		return;
1831 	}
1832 
1833 	prom_printf("instantiating rtas at 0x%x...", base);
1834 
1835 	if (call_prom_ret("call-method", 3, 2, &entry,
1836 			  ADDR("instantiate-rtas"),
1837 			  rtas_inst, base) != 0
1838 	    || entry == 0) {
1839 		prom_printf(" failed\n");
1840 		return;
1841 	}
1842 	prom_printf(" done\n");
1843 
1844 	reserve_mem(base, size);
1845 
1846 	val = cpu_to_be32(base);
1847 	prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1848 		     &val, sizeof(val));
1849 	val = cpu_to_be32(entry);
1850 	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1851 		     &val, sizeof(val));
1852 
1853 	/* Check if it supports "query-cpu-stopped-state" */
1854 	if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1855 			 &val, sizeof(val)) != PROM_ERROR)
1856 		rtas_has_query_cpu_stopped = true;
1857 
1858 	prom_debug("rtas base     = 0x%x\n", base);
1859 	prom_debug("rtas entry    = 0x%x\n", entry);
1860 	prom_debug("rtas size     = 0x%x\n", size);
1861 
1862 	prom_debug("prom_instantiate_rtas: end...\n");
1863 }
1864 
1865 #ifdef CONFIG_PPC64
1866 /*
1867  * Allocate room for and instantiate Stored Measurement Log (SML)
1868  */
prom_instantiate_sml(void)1869 static void __init prom_instantiate_sml(void)
1870 {
1871 	phandle ibmvtpm_node;
1872 	ihandle ibmvtpm_inst;
1873 	u32 entry = 0, size = 0, succ = 0;
1874 	u64 base;
1875 	__be32 val;
1876 
1877 	prom_debug("prom_instantiate_sml: start...\n");
1878 
1879 	ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1880 	prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1881 	if (!PHANDLE_VALID(ibmvtpm_node))
1882 		return;
1883 
1884 	ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1885 	if (!IHANDLE_VALID(ibmvtpm_inst)) {
1886 		prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1887 		return;
1888 	}
1889 
1890 	if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1891 			 &val, sizeof(val)) != PROM_ERROR) {
1892 		if (call_prom_ret("call-method", 2, 2, &succ,
1893 				  ADDR("reformat-sml-to-efi-alignment"),
1894 				  ibmvtpm_inst) != 0 || succ == 0) {
1895 			prom_printf("Reformat SML to EFI alignment failed\n");
1896 			return;
1897 		}
1898 
1899 		if (call_prom_ret("call-method", 2, 2, &size,
1900 				  ADDR("sml-get-allocated-size"),
1901 				  ibmvtpm_inst) != 0 || size == 0) {
1902 			prom_printf("SML get allocated size failed\n");
1903 			return;
1904 		}
1905 	} else {
1906 		if (call_prom_ret("call-method", 2, 2, &size,
1907 				  ADDR("sml-get-handover-size"),
1908 				  ibmvtpm_inst) != 0 || size == 0) {
1909 			prom_printf("SML get handover size failed\n");
1910 			return;
1911 		}
1912 	}
1913 
1914 	base = alloc_down(size, PAGE_SIZE, 0);
1915 	if (base == 0)
1916 		prom_panic("Could not allocate memory for sml\n");
1917 
1918 	prom_printf("instantiating sml at 0x%llx...", base);
1919 
1920 	memset((void *)base, 0, size);
1921 
1922 	if (call_prom_ret("call-method", 4, 2, &entry,
1923 			  ADDR("sml-handover"),
1924 			  ibmvtpm_inst, size, base) != 0 || entry == 0) {
1925 		prom_printf("SML handover failed\n");
1926 		return;
1927 	}
1928 	prom_printf(" done\n");
1929 
1930 	reserve_mem(base, size);
1931 
1932 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1933 		     &base, sizeof(base));
1934 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1935 		     &size, sizeof(size));
1936 
1937 	prom_debug("sml base     = 0x%llx\n", base);
1938 	prom_debug("sml size     = 0x%x\n", size);
1939 
1940 	prom_debug("prom_instantiate_sml: end...\n");
1941 }
1942 
1943 /*
1944  * Allocate room for and initialize TCE tables
1945  */
1946 #ifdef __BIG_ENDIAN__
prom_initialize_tce_table(void)1947 static void __init prom_initialize_tce_table(void)
1948 {
1949 	phandle node;
1950 	ihandle phb_node;
1951 	char compatible[64], type[64], model[64];
1952 	char *path = prom_scratch;
1953 	u64 base, align;
1954 	u32 minalign, minsize;
1955 	u64 tce_entry, *tce_entryp;
1956 	u64 local_alloc_top, local_alloc_bottom;
1957 	u64 i;
1958 
1959 	if (prom_iommu_off)
1960 		return;
1961 
1962 	prom_debug("starting prom_initialize_tce_table\n");
1963 
1964 	/* Cache current top of allocs so we reserve a single block */
1965 	local_alloc_top = alloc_top_high;
1966 	local_alloc_bottom = local_alloc_top;
1967 
1968 	/* Search all nodes looking for PHBs. */
1969 	for (node = 0; prom_next_node(&node); ) {
1970 		compatible[0] = 0;
1971 		type[0] = 0;
1972 		model[0] = 0;
1973 		prom_getprop(node, "compatible",
1974 			     compatible, sizeof(compatible));
1975 		prom_getprop(node, "device_type", type, sizeof(type));
1976 		prom_getprop(node, "model", model, sizeof(model));
1977 
1978 		if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
1979 			continue;
1980 
1981 		/* Keep the old logic intact to avoid regression. */
1982 		if (compatible[0] != 0) {
1983 			if ((prom_strstr(compatible, "python") == NULL) &&
1984 			    (prom_strstr(compatible, "Speedwagon") == NULL) &&
1985 			    (prom_strstr(compatible, "Winnipeg") == NULL))
1986 				continue;
1987 		} else if (model[0] != 0) {
1988 			if ((prom_strstr(model, "ython") == NULL) &&
1989 			    (prom_strstr(model, "peedwagon") == NULL) &&
1990 			    (prom_strstr(model, "innipeg") == NULL))
1991 				continue;
1992 		}
1993 
1994 		if (prom_getprop(node, "tce-table-minalign", &minalign,
1995 				 sizeof(minalign)) == PROM_ERROR)
1996 			minalign = 0;
1997 		if (prom_getprop(node, "tce-table-minsize", &minsize,
1998 				 sizeof(minsize)) == PROM_ERROR)
1999 			minsize = 4UL << 20;
2000 
2001 		/*
2002 		 * Even though we read what OF wants, we just set the table
2003 		 * size to 4 MB.  This is enough to map 2GB of PCI DMA space.
2004 		 * By doing this, we avoid the pitfalls of trying to DMA to
2005 		 * MMIO space and the DMA alias hole.
2006 		 */
2007 		minsize = 4UL << 20;
2008 
2009 		/* Align to the greater of the align or size */
2010 		align = max(minalign, minsize);
2011 		base = alloc_down(minsize, align, 1);
2012 		if (base == 0)
2013 			prom_panic("ERROR, cannot find space for TCE table.\n");
2014 		if (base < local_alloc_bottom)
2015 			local_alloc_bottom = base;
2016 
2017 		/* It seems OF doesn't null-terminate the path :-( */
2018 		memset(path, 0, sizeof(prom_scratch));
2019 		/* Call OF to setup the TCE hardware */
2020 		if (call_prom("package-to-path", 3, 1, node,
2021 			      path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2022 			prom_printf("package-to-path failed\n");
2023 		}
2024 
2025 		/* Save away the TCE table attributes for later use. */
2026 		prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2027 		prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2028 
2029 		prom_debug("TCE table: %s\n", path);
2030 		prom_debug("\tnode = 0x%x\n", node);
2031 		prom_debug("\tbase = 0x%llx\n", base);
2032 		prom_debug("\tsize = 0x%x\n", minsize);
2033 
2034 		/* Initialize the table to have a one-to-one mapping
2035 		 * over the allocated size.
2036 		 */
2037 		tce_entryp = (u64 *)base;
2038 		for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2039 			tce_entry = (i << PAGE_SHIFT);
2040 			tce_entry |= 0x3;
2041 			*tce_entryp = tce_entry;
2042 		}
2043 
2044 		prom_printf("opening PHB %s", path);
2045 		phb_node = call_prom("open", 1, 1, path);
2046 		if (phb_node == 0)
2047 			prom_printf("... failed\n");
2048 		else
2049 			prom_printf("... done\n");
2050 
2051 		call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2052 			  phb_node, -1, minsize,
2053 			  (u32) base, (u32) (base >> 32));
2054 		call_prom("close", 1, 0, phb_node);
2055 	}
2056 
2057 	reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2058 
2059 	/* These are only really needed if there is a memory limit in
2060 	 * effect, but we don't know so export them always. */
2061 	prom_tce_alloc_start = local_alloc_bottom;
2062 	prom_tce_alloc_end = local_alloc_top;
2063 
2064 	/* Flag the first invalid entry */
2065 	prom_debug("ending prom_initialize_tce_table\n");
2066 }
2067 #endif /* __BIG_ENDIAN__ */
2068 #endif /* CONFIG_PPC64 */
2069 
2070 /*
2071  * With CHRP SMP we need to use the OF to start the other processors.
2072  * We can't wait until smp_boot_cpus (the OF is trashed by then)
2073  * so we have to put the processors into a holding pattern controlled
2074  * by the kernel (not OF) before we destroy the OF.
2075  *
2076  * This uses a chunk of low memory, puts some holding pattern
2077  * code there and sends the other processors off to there until
2078  * smp_boot_cpus tells them to do something.  The holding pattern
2079  * checks that address until its cpu # is there, when it is that
2080  * cpu jumps to __secondary_start().  smp_boot_cpus() takes care
2081  * of setting those values.
2082  *
2083  * We also use physical address 0x4 here to tell when a cpu
2084  * is in its holding pattern code.
2085  *
2086  * -- Cort
2087  */
2088 /*
2089  * We want to reference the copy of __secondary_hold_* in the
2090  * 0 - 0x100 address range
2091  */
2092 #define LOW_ADDR(x)	(((unsigned long) &(x)) & 0xff)
2093 
prom_hold_cpus(void)2094 static void __init prom_hold_cpus(void)
2095 {
2096 	unsigned long i;
2097 	phandle node;
2098 	char type[64];
2099 	unsigned long *spinloop
2100 		= (void *) LOW_ADDR(__secondary_hold_spinloop);
2101 	unsigned long *acknowledge
2102 		= (void *) LOW_ADDR(__secondary_hold_acknowledge);
2103 	unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2104 
2105 	/*
2106 	 * On pseries, if RTAS supports "query-cpu-stopped-state",
2107 	 * we skip this stage, the CPUs will be started by the
2108 	 * kernel using RTAS.
2109 	 */
2110 	if ((of_platform == PLATFORM_PSERIES ||
2111 	     of_platform == PLATFORM_PSERIES_LPAR) &&
2112 	    rtas_has_query_cpu_stopped) {
2113 		prom_printf("prom_hold_cpus: skipped\n");
2114 		return;
2115 	}
2116 
2117 	prom_debug("prom_hold_cpus: start...\n");
2118 	prom_debug("    1) spinloop       = 0x%lx\n", (unsigned long)spinloop);
2119 	prom_debug("    1) *spinloop      = 0x%lx\n", *spinloop);
2120 	prom_debug("    1) acknowledge    = 0x%lx\n",
2121 		   (unsigned long)acknowledge);
2122 	prom_debug("    1) *acknowledge   = 0x%lx\n", *acknowledge);
2123 	prom_debug("    1) secondary_hold = 0x%lx\n", secondary_hold);
2124 
2125 	/* Set the common spinloop variable, so all of the secondary cpus
2126 	 * will block when they are awakened from their OF spinloop.
2127 	 * This must occur for both SMP and non SMP kernels, since OF will
2128 	 * be trashed when we move the kernel.
2129 	 */
2130 	*spinloop = 0;
2131 
2132 	/* look for cpus */
2133 	for (node = 0; prom_next_node(&node); ) {
2134 		unsigned int cpu_no;
2135 		__be32 reg;
2136 
2137 		type[0] = 0;
2138 		prom_getprop(node, "device_type", type, sizeof(type));
2139 		if (prom_strcmp(type, "cpu") != 0)
2140 			continue;
2141 
2142 		/* Skip non-configured cpus. */
2143 		if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2144 			if (prom_strcmp(type, "okay") != 0)
2145 				continue;
2146 
2147 		reg = cpu_to_be32(-1); /* make sparse happy */
2148 		prom_getprop(node, "reg", &reg, sizeof(reg));
2149 		cpu_no = be32_to_cpu(reg);
2150 
2151 		prom_debug("cpu hw idx   = %u\n", cpu_no);
2152 
2153 		/* Init the acknowledge var which will be reset by
2154 		 * the secondary cpu when it awakens from its OF
2155 		 * spinloop.
2156 		 */
2157 		*acknowledge = (unsigned long)-1;
2158 
2159 		if (cpu_no != prom.cpu) {
2160 			/* Primary Thread of non-boot cpu or any thread */
2161 			prom_printf("starting cpu hw idx %u... ", cpu_no);
2162 			call_prom("start-cpu", 3, 0, node,
2163 				  secondary_hold, cpu_no);
2164 
2165 			for (i = 0; (i < 100000000) &&
2166 			     (*acknowledge == ((unsigned long)-1)); i++ )
2167 				mb();
2168 
2169 			if (*acknowledge == cpu_no)
2170 				prom_printf("done\n");
2171 			else
2172 				prom_printf("failed: %lx\n", *acknowledge);
2173 		}
2174 #ifdef CONFIG_SMP
2175 		else
2176 			prom_printf("boot cpu hw idx %u\n", cpu_no);
2177 #endif /* CONFIG_SMP */
2178 	}
2179 
2180 	prom_debug("prom_hold_cpus: end...\n");
2181 }
2182 
2183 
prom_init_client_services(unsigned long pp)2184 static void __init prom_init_client_services(unsigned long pp)
2185 {
2186 	/* Get a handle to the prom entry point before anything else */
2187 	prom_entry = pp;
2188 
2189 	/* get a handle for the stdout device */
2190 	prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2191 	if (!PHANDLE_VALID(prom.chosen))
2192 		prom_panic("cannot find chosen"); /* msg won't be printed :( */
2193 
2194 	/* get device tree root */
2195 	prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2196 	if (!PHANDLE_VALID(prom.root))
2197 		prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2198 
2199 	prom.mmumap = 0;
2200 }
2201 
2202 #ifdef CONFIG_PPC32
2203 /*
2204  * For really old powermacs, we need to map things we claim.
2205  * For that, we need the ihandle of the mmu.
2206  * Also, on the longtrail, we need to work around other bugs.
2207  */
prom_find_mmu(void)2208 static void __init prom_find_mmu(void)
2209 {
2210 	phandle oprom;
2211 	char version[64];
2212 
2213 	oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2214 	if (!PHANDLE_VALID(oprom))
2215 		return;
2216 	if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2217 		return;
2218 	version[sizeof(version) - 1] = 0;
2219 	/* XXX might need to add other versions here */
2220 	if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2221 		of_workarounds = OF_WA_CLAIM;
2222 	else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2223 		of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2224 		call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2225 	} else
2226 		return;
2227 	prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2228 	prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2229 		     sizeof(prom.mmumap));
2230 	prom.mmumap = be32_to_cpu(prom.mmumap);
2231 	if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2232 		of_workarounds &= ~OF_WA_CLAIM;		/* hmmm */
2233 }
2234 #else
2235 #define prom_find_mmu()
2236 #endif
2237 
prom_init_stdout(void)2238 static void __init prom_init_stdout(void)
2239 {
2240 	char *path = of_stdout_device;
2241 	char type[16];
2242 	phandle stdout_node;
2243 	__be32 val;
2244 
2245 	if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2246 		prom_panic("cannot find stdout");
2247 
2248 	prom.stdout = be32_to_cpu(val);
2249 
2250 	/* Get the full OF pathname of the stdout device */
2251 	memset(path, 0, 256);
2252 	call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2253 	prom_printf("OF stdout device is: %s\n", of_stdout_device);
2254 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2255 		     path, prom_strlen(path) + 1);
2256 
2257 	/* instance-to-package fails on PA-Semi */
2258 	stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2259 	if (stdout_node != PROM_ERROR) {
2260 		val = cpu_to_be32(stdout_node);
2261 
2262 		/* If it's a display, note it */
2263 		memset(type, 0, sizeof(type));
2264 		prom_getprop(stdout_node, "device_type", type, sizeof(type));
2265 		if (prom_strcmp(type, "display") == 0)
2266 			prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2267 	}
2268 }
2269 
prom_find_machine_type(void)2270 static int __init prom_find_machine_type(void)
2271 {
2272 	char compat[256];
2273 	int len, i = 0;
2274 #ifdef CONFIG_PPC64
2275 	phandle rtas;
2276 	int x;
2277 #endif
2278 
2279 	/* Look for a PowerMac or a Cell */
2280 	len = prom_getprop(prom.root, "compatible",
2281 			   compat, sizeof(compat)-1);
2282 	if (len > 0) {
2283 		compat[len] = 0;
2284 		while (i < len) {
2285 			char *p = &compat[i];
2286 			int sl = prom_strlen(p);
2287 			if (sl == 0)
2288 				break;
2289 			if (prom_strstr(p, "Power Macintosh") ||
2290 			    prom_strstr(p, "MacRISC"))
2291 				return PLATFORM_POWERMAC;
2292 #ifdef CONFIG_PPC64
2293 			/* We must make sure we don't detect the IBM Cell
2294 			 * blades as pSeries due to some firmware issues,
2295 			 * so we do it here.
2296 			 */
2297 			if (prom_strstr(p, "IBM,CBEA") ||
2298 			    prom_strstr(p, "IBM,CPBW-1.0"))
2299 				return PLATFORM_GENERIC;
2300 #endif /* CONFIG_PPC64 */
2301 			i += sl + 1;
2302 		}
2303 	}
2304 #ifdef CONFIG_PPC64
2305 	/* Try to figure out if it's an IBM pSeries or any other
2306 	 * PAPR compliant platform. We assume it is if :
2307 	 *  - /device_type is "chrp" (please, do NOT use that for future
2308 	 *    non-IBM designs !
2309 	 *  - it has /rtas
2310 	 */
2311 	len = prom_getprop(prom.root, "device_type",
2312 			   compat, sizeof(compat)-1);
2313 	if (len <= 0)
2314 		return PLATFORM_GENERIC;
2315 	if (prom_strcmp(compat, "chrp"))
2316 		return PLATFORM_GENERIC;
2317 
2318 	/* Default to pSeries. We need to know if we are running LPAR */
2319 	rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2320 	if (!PHANDLE_VALID(rtas))
2321 		return PLATFORM_GENERIC;
2322 	x = prom_getproplen(rtas, "ibm,hypertas-functions");
2323 	if (x != PROM_ERROR) {
2324 		prom_debug("Hypertas detected, assuming LPAR !\n");
2325 		return PLATFORM_PSERIES_LPAR;
2326 	}
2327 	return PLATFORM_PSERIES;
2328 #else
2329 	return PLATFORM_GENERIC;
2330 #endif
2331 }
2332 
prom_set_color(ihandle ih,int i,int r,int g,int b)2333 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2334 {
2335 	return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2336 }
2337 
2338 /*
2339  * If we have a display that we don't know how to drive,
2340  * we will want to try to execute OF's open method for it
2341  * later.  However, OF will probably fall over if we do that
2342  * we've taken over the MMU.
2343  * So we check whether we will need to open the display,
2344  * and if so, open it now.
2345  */
prom_check_displays(void)2346 static void __init prom_check_displays(void)
2347 {
2348 	char type[16], *path;
2349 	phandle node;
2350 	ihandle ih;
2351 	int i;
2352 
2353 	static const unsigned char default_colors[] __initconst = {
2354 		0x00, 0x00, 0x00,
2355 		0x00, 0x00, 0xaa,
2356 		0x00, 0xaa, 0x00,
2357 		0x00, 0xaa, 0xaa,
2358 		0xaa, 0x00, 0x00,
2359 		0xaa, 0x00, 0xaa,
2360 		0xaa, 0xaa, 0x00,
2361 		0xaa, 0xaa, 0xaa,
2362 		0x55, 0x55, 0x55,
2363 		0x55, 0x55, 0xff,
2364 		0x55, 0xff, 0x55,
2365 		0x55, 0xff, 0xff,
2366 		0xff, 0x55, 0x55,
2367 		0xff, 0x55, 0xff,
2368 		0xff, 0xff, 0x55,
2369 		0xff, 0xff, 0xff
2370 	};
2371 	const unsigned char *clut;
2372 
2373 	prom_debug("Looking for displays\n");
2374 	for (node = 0; prom_next_node(&node); ) {
2375 		memset(type, 0, sizeof(type));
2376 		prom_getprop(node, "device_type", type, sizeof(type));
2377 		if (prom_strcmp(type, "display") != 0)
2378 			continue;
2379 
2380 		/* It seems OF doesn't null-terminate the path :-( */
2381 		path = prom_scratch;
2382 		memset(path, 0, sizeof(prom_scratch));
2383 
2384 		/*
2385 		 * leave some room at the end of the path for appending extra
2386 		 * arguments
2387 		 */
2388 		if (call_prom("package-to-path", 3, 1, node, path,
2389 			      sizeof(prom_scratch) - 10) == PROM_ERROR)
2390 			continue;
2391 		prom_printf("found display   : %s, opening... ", path);
2392 
2393 		ih = call_prom("open", 1, 1, path);
2394 		if (ih == 0) {
2395 			prom_printf("failed\n");
2396 			continue;
2397 		}
2398 
2399 		/* Success */
2400 		prom_printf("done\n");
2401 		prom_setprop(node, path, "linux,opened", NULL, 0);
2402 
2403 		/* Setup a usable color table when the appropriate
2404 		 * method is available. Should update this to set-colors */
2405 		clut = default_colors;
2406 		for (i = 0; i < 16; i++, clut += 3)
2407 			if (prom_set_color(ih, i, clut[0], clut[1],
2408 					   clut[2]) != 0)
2409 				break;
2410 
2411 #ifdef CONFIG_LOGO_LINUX_CLUT224
2412 		clut = PTRRELOC(logo_linux_clut224.clut);
2413 		for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2414 			if (prom_set_color(ih, i + 32, clut[0], clut[1],
2415 					   clut[2]) != 0)
2416 				break;
2417 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2418 
2419 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2420 		if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2421 		    PROM_ERROR) {
2422 			u32 width, height, pitch, addr;
2423 
2424 			prom_printf("Setting btext !\n");
2425 
2426 			if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
2427 				return;
2428 
2429 			if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
2430 				return;
2431 
2432 			if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
2433 				return;
2434 
2435 			if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
2436 				return;
2437 
2438 			prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2439 				    width, height, pitch, addr);
2440 			btext_setup_display(width, height, 8, pitch, addr);
2441 			btext_prepare_BAT();
2442 		}
2443 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2444 	}
2445 }
2446 
2447 
2448 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
make_room(unsigned long * mem_start,unsigned long * mem_end,unsigned long needed,unsigned long align)2449 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2450 			      unsigned long needed, unsigned long align)
2451 {
2452 	void *ret;
2453 
2454 	*mem_start = ALIGN(*mem_start, align);
2455 	while ((*mem_start + needed) > *mem_end) {
2456 		unsigned long room, chunk;
2457 
2458 		prom_debug("Chunk exhausted, claiming more at %lx...\n",
2459 			   alloc_bottom);
2460 		room = alloc_top - alloc_bottom;
2461 		if (room > DEVTREE_CHUNK_SIZE)
2462 			room = DEVTREE_CHUNK_SIZE;
2463 		if (room < PAGE_SIZE)
2464 			prom_panic("No memory for flatten_device_tree "
2465 				   "(no room)\n");
2466 		chunk = alloc_up(room, 0);
2467 		if (chunk == 0)
2468 			prom_panic("No memory for flatten_device_tree "
2469 				   "(claim failed)\n");
2470 		*mem_end = chunk + room;
2471 	}
2472 
2473 	ret = (void *)*mem_start;
2474 	*mem_start += needed;
2475 
2476 	return ret;
2477 }
2478 
2479 #define dt_push_token(token, mem_start, mem_end) do { 			\
2480 		void *room = make_room(mem_start, mem_end, 4, 4);	\
2481 		*(__be32 *)room = cpu_to_be32(token);			\
2482 	} while(0)
2483 
dt_find_string(char * str)2484 static unsigned long __init dt_find_string(char *str)
2485 {
2486 	char *s, *os;
2487 
2488 	s = os = (char *)dt_string_start;
2489 	s += 4;
2490 	while (s <  (char *)dt_string_end) {
2491 		if (prom_strcmp(s, str) == 0)
2492 			return s - os;
2493 		s += prom_strlen(s) + 1;
2494 	}
2495 	return 0;
2496 }
2497 
2498 /*
2499  * The Open Firmware 1275 specification states properties must be 31 bytes or
2500  * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2501  */
2502 #define MAX_PROPERTY_NAME 64
2503 
scan_dt_build_strings(phandle node,unsigned long * mem_start,unsigned long * mem_end)2504 static void __init scan_dt_build_strings(phandle node,
2505 					 unsigned long *mem_start,
2506 					 unsigned long *mem_end)
2507 {
2508 	char *prev_name, *namep, *sstart;
2509 	unsigned long soff;
2510 	phandle child;
2511 
2512 	sstart =  (char *)dt_string_start;
2513 
2514 	/* get and store all property names */
2515 	prev_name = "";
2516 	for (;;) {
2517 		/* 64 is max len of name including nul. */
2518 		namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2519 		if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2520 			/* No more nodes: unwind alloc */
2521 			*mem_start = (unsigned long)namep;
2522 			break;
2523 		}
2524 
2525  		/* skip "name" */
2526 		if (prom_strcmp(namep, "name") == 0) {
2527  			*mem_start = (unsigned long)namep;
2528  			prev_name = "name";
2529  			continue;
2530  		}
2531 		/* get/create string entry */
2532 		soff = dt_find_string(namep);
2533 		if (soff != 0) {
2534 			*mem_start = (unsigned long)namep;
2535 			namep = sstart + soff;
2536 		} else {
2537 			/* Trim off some if we can */
2538 			*mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2539 			dt_string_end = *mem_start;
2540 		}
2541 		prev_name = namep;
2542 	}
2543 
2544 	/* do all our children */
2545 	child = call_prom("child", 1, 1, node);
2546 	while (child != 0) {
2547 		scan_dt_build_strings(child, mem_start, mem_end);
2548 		child = call_prom("peer", 1, 1, child);
2549 	}
2550 }
2551 
scan_dt_build_struct(phandle node,unsigned long * mem_start,unsigned long * mem_end)2552 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2553 					unsigned long *mem_end)
2554 {
2555 	phandle child;
2556 	char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2557 	unsigned long soff;
2558 	unsigned char *valp;
2559 	static char pname[MAX_PROPERTY_NAME] __prombss;
2560 	int l, room, has_phandle = 0;
2561 
2562 	dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2563 
2564 	/* get the node's full name */
2565 	namep = (char *)*mem_start;
2566 	room = *mem_end - *mem_start;
2567 	if (room > 255)
2568 		room = 255;
2569 	l = call_prom("package-to-path", 3, 1, node, namep, room);
2570 	if (l >= 0) {
2571 		/* Didn't fit?  Get more room. */
2572 		if (l >= room) {
2573 			if (l >= *mem_end - *mem_start)
2574 				namep = make_room(mem_start, mem_end, l+1, 1);
2575 			call_prom("package-to-path", 3, 1, node, namep, l);
2576 		}
2577 		namep[l] = '\0';
2578 
2579 		/* Fixup an Apple bug where they have bogus \0 chars in the
2580 		 * middle of the path in some properties, and extract
2581 		 * the unit name (everything after the last '/').
2582 		 */
2583 		for (lp = p = namep, ep = namep + l; p < ep; p++) {
2584 			if (*p == '/')
2585 				lp = namep;
2586 			else if (*p != 0)
2587 				*lp++ = *p;
2588 		}
2589 		*lp = 0;
2590 		*mem_start = ALIGN((unsigned long)lp + 1, 4);
2591 	}
2592 
2593 	/* get it again for debugging */
2594 	path = prom_scratch;
2595 	memset(path, 0, sizeof(prom_scratch));
2596 	call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2597 
2598 	/* get and store all properties */
2599 	prev_name = "";
2600 	sstart = (char *)dt_string_start;
2601 	for (;;) {
2602 		if (call_prom("nextprop", 3, 1, node, prev_name,
2603 			      pname) != 1)
2604 			break;
2605 
2606  		/* skip "name" */
2607 		if (prom_strcmp(pname, "name") == 0) {
2608  			prev_name = "name";
2609  			continue;
2610  		}
2611 
2612 		/* find string offset */
2613 		soff = dt_find_string(pname);
2614 		if (soff == 0) {
2615 			prom_printf("WARNING: Can't find string index for"
2616 				    " <%s>, node %s\n", pname, path);
2617 			break;
2618 		}
2619 		prev_name = sstart + soff;
2620 
2621 		/* get length */
2622 		l = call_prom("getproplen", 2, 1, node, pname);
2623 
2624 		/* sanity checks */
2625 		if (l == PROM_ERROR)
2626 			continue;
2627 
2628 		/* push property head */
2629 		dt_push_token(OF_DT_PROP, mem_start, mem_end);
2630 		dt_push_token(l, mem_start, mem_end);
2631 		dt_push_token(soff, mem_start, mem_end);
2632 
2633 		/* push property content */
2634 		valp = make_room(mem_start, mem_end, l, 4);
2635 		call_prom("getprop", 4, 1, node, pname, valp, l);
2636 		*mem_start = ALIGN(*mem_start, 4);
2637 
2638 		if (!prom_strcmp(pname, "phandle"))
2639 			has_phandle = 1;
2640 	}
2641 
2642 	/* Add a "phandle" property if none already exist */
2643 	if (!has_phandle) {
2644 		soff = dt_find_string("phandle");
2645 		if (soff == 0)
2646 			prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2647 		else {
2648 			dt_push_token(OF_DT_PROP, mem_start, mem_end);
2649 			dt_push_token(4, mem_start, mem_end);
2650 			dt_push_token(soff, mem_start, mem_end);
2651 			valp = make_room(mem_start, mem_end, 4, 4);
2652 			*(__be32 *)valp = cpu_to_be32(node);
2653 		}
2654 	}
2655 
2656 	/* do all our children */
2657 	child = call_prom("child", 1, 1, node);
2658 	while (child != 0) {
2659 		scan_dt_build_struct(child, mem_start, mem_end);
2660 		child = call_prom("peer", 1, 1, child);
2661 	}
2662 
2663 	dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2664 }
2665 
flatten_device_tree(void)2666 static void __init flatten_device_tree(void)
2667 {
2668 	phandle root;
2669 	unsigned long mem_start, mem_end, room;
2670 	struct boot_param_header *hdr;
2671 	char *namep;
2672 	u64 *rsvmap;
2673 
2674 	/*
2675 	 * Check how much room we have between alloc top & bottom (+/- a
2676 	 * few pages), crop to 1MB, as this is our "chunk" size
2677 	 */
2678 	room = alloc_top - alloc_bottom - 0x4000;
2679 	if (room > DEVTREE_CHUNK_SIZE)
2680 		room = DEVTREE_CHUNK_SIZE;
2681 	prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2682 
2683 	/* Now try to claim that */
2684 	mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2685 	if (mem_start == 0)
2686 		prom_panic("Can't allocate initial device-tree chunk\n");
2687 	mem_end = mem_start + room;
2688 
2689 	/* Get root of tree */
2690 	root = call_prom("peer", 1, 1, (phandle)0);
2691 	if (root == (phandle)0)
2692 		prom_panic ("couldn't get device tree root\n");
2693 
2694 	/* Build header and make room for mem rsv map */
2695 	mem_start = ALIGN(mem_start, 4);
2696 	hdr = make_room(&mem_start, &mem_end,
2697 			sizeof(struct boot_param_header), 4);
2698 	dt_header_start = (unsigned long)hdr;
2699 	rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2700 
2701 	/* Start of strings */
2702 	mem_start = PAGE_ALIGN(mem_start);
2703 	dt_string_start = mem_start;
2704 	mem_start += 4; /* hole */
2705 
2706 	/* Add "phandle" in there, we'll need it */
2707 	namep = make_room(&mem_start, &mem_end, 16, 1);
2708 	prom_strcpy(namep, "phandle");
2709 	mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2710 
2711 	/* Build string array */
2712 	prom_printf("Building dt strings...\n");
2713 	scan_dt_build_strings(root, &mem_start, &mem_end);
2714 	dt_string_end = mem_start;
2715 
2716 	/* Build structure */
2717 	mem_start = PAGE_ALIGN(mem_start);
2718 	dt_struct_start = mem_start;
2719 	prom_printf("Building dt structure...\n");
2720 	scan_dt_build_struct(root, &mem_start, &mem_end);
2721 	dt_push_token(OF_DT_END, &mem_start, &mem_end);
2722 	dt_struct_end = PAGE_ALIGN(mem_start);
2723 
2724 	/* Finish header */
2725 	hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2726 	hdr->magic = cpu_to_be32(OF_DT_HEADER);
2727 	hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2728 	hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2729 	hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2730 	hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2731 	hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2732 	hdr->version = cpu_to_be32(OF_DT_VERSION);
2733 	/* Version 16 is not backward compatible */
2734 	hdr->last_comp_version = cpu_to_be32(0x10);
2735 
2736 	/* Copy the reserve map in */
2737 	memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2738 
2739 #ifdef DEBUG_PROM
2740 	{
2741 		int i;
2742 		prom_printf("reserved memory map:\n");
2743 		for (i = 0; i < mem_reserve_cnt; i++)
2744 			prom_printf("  %llx - %llx\n",
2745 				    be64_to_cpu(mem_reserve_map[i].base),
2746 				    be64_to_cpu(mem_reserve_map[i].size));
2747 	}
2748 #endif
2749 	/* Bump mem_reserve_cnt to cause further reservations to fail
2750 	 * since it's too late.
2751 	 */
2752 	mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2753 
2754 	prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2755 		    dt_string_start, dt_string_end);
2756 	prom_printf("Device tree struct  0x%lx -> 0x%lx\n",
2757 		    dt_struct_start, dt_struct_end);
2758 }
2759 
2760 #ifdef CONFIG_PPC_MAPLE
2761 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2762  * The values are bad, and it doesn't even have the right number of cells. */
fixup_device_tree_maple(void)2763 static void __init fixup_device_tree_maple(void)
2764 {
2765 	phandle isa;
2766 	u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2767 	u32 isa_ranges[6];
2768 	char *name;
2769 
2770 	name = "/ht@0/isa@4";
2771 	isa = call_prom("finddevice", 1, 1, ADDR(name));
2772 	if (!PHANDLE_VALID(isa)) {
2773 		name = "/ht@0/isa@6";
2774 		isa = call_prom("finddevice", 1, 1, ADDR(name));
2775 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2776 	}
2777 	if (!PHANDLE_VALID(isa))
2778 		return;
2779 
2780 	if (prom_getproplen(isa, "ranges") != 12)
2781 		return;
2782 	if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2783 		== PROM_ERROR)
2784 		return;
2785 
2786 	if (isa_ranges[0] != 0x1 ||
2787 		isa_ranges[1] != 0xf4000000 ||
2788 		isa_ranges[2] != 0x00010000)
2789 		return;
2790 
2791 	prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2792 
2793 	isa_ranges[0] = 0x1;
2794 	isa_ranges[1] = 0x0;
2795 	isa_ranges[2] = rloc;
2796 	isa_ranges[3] = 0x0;
2797 	isa_ranges[4] = 0x0;
2798 	isa_ranges[5] = 0x00010000;
2799 	prom_setprop(isa, name, "ranges",
2800 			isa_ranges, sizeof(isa_ranges));
2801 }
2802 
2803 #define CPC925_MC_START		0xf8000000
2804 #define CPC925_MC_LENGTH	0x1000000
2805 /* The values for memory-controller don't have right number of cells */
fixup_device_tree_maple_memory_controller(void)2806 static void __init fixup_device_tree_maple_memory_controller(void)
2807 {
2808 	phandle mc;
2809 	u32 mc_reg[4];
2810 	char *name = "/hostbridge@f8000000";
2811 	u32 ac, sc;
2812 
2813 	mc = call_prom("finddevice", 1, 1, ADDR(name));
2814 	if (!PHANDLE_VALID(mc))
2815 		return;
2816 
2817 	if (prom_getproplen(mc, "reg") != 8)
2818 		return;
2819 
2820 	prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2821 	prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2822 	if ((ac != 2) || (sc != 2))
2823 		return;
2824 
2825 	if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2826 		return;
2827 
2828 	if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2829 		return;
2830 
2831 	prom_printf("Fixing up bogus hostbridge on Maple...\n");
2832 
2833 	mc_reg[0] = 0x0;
2834 	mc_reg[1] = CPC925_MC_START;
2835 	mc_reg[2] = 0x0;
2836 	mc_reg[3] = CPC925_MC_LENGTH;
2837 	prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2838 }
2839 #else
2840 #define fixup_device_tree_maple()
2841 #define fixup_device_tree_maple_memory_controller()
2842 #endif
2843 
2844 #ifdef CONFIG_PPC_CHRP
2845 /*
2846  * Pegasos and BriQ lacks the "ranges" property in the isa node
2847  * Pegasos needs decimal IRQ 14/15, not hexadecimal
2848  * Pegasos has the IDE configured in legacy mode, but advertised as native
2849  */
fixup_device_tree_chrp(void)2850 static void __init fixup_device_tree_chrp(void)
2851 {
2852 	phandle ph;
2853 	u32 prop[6];
2854 	u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2855 	char *name;
2856 	int rc;
2857 
2858 	name = "/pci@80000000/isa@c";
2859 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2860 	if (!PHANDLE_VALID(ph)) {
2861 		name = "/pci@ff500000/isa@6";
2862 		ph = call_prom("finddevice", 1, 1, ADDR(name));
2863 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2864 	}
2865 	if (PHANDLE_VALID(ph)) {
2866 		rc = prom_getproplen(ph, "ranges");
2867 		if (rc == 0 || rc == PROM_ERROR) {
2868 			prom_printf("Fixing up missing ISA range on Pegasos...\n");
2869 
2870 			prop[0] = 0x1;
2871 			prop[1] = 0x0;
2872 			prop[2] = rloc;
2873 			prop[3] = 0x0;
2874 			prop[4] = 0x0;
2875 			prop[5] = 0x00010000;
2876 			prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2877 		}
2878 	}
2879 
2880 	name = "/pci@80000000/ide@C,1";
2881 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2882 	if (PHANDLE_VALID(ph)) {
2883 		prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2884 		prop[0] = 14;
2885 		prop[1] = 0x0;
2886 		prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2887 		prom_printf("Fixing up IDE class-code on Pegasos...\n");
2888 		rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2889 		if (rc == sizeof(u32)) {
2890 			prop[0] &= ~0x5;
2891 			prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2892 		}
2893 	}
2894 }
2895 #else
2896 #define fixup_device_tree_chrp()
2897 #endif
2898 
2899 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
fixup_device_tree_pmac(void)2900 static void __init fixup_device_tree_pmac(void)
2901 {
2902 	phandle u3, i2c, mpic;
2903 	u32 u3_rev;
2904 	u32 interrupts[2];
2905 	u32 parent;
2906 
2907 	/* Some G5s have a missing interrupt definition, fix it up here */
2908 	u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2909 	if (!PHANDLE_VALID(u3))
2910 		return;
2911 	i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2912 	if (!PHANDLE_VALID(i2c))
2913 		return;
2914 	mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2915 	if (!PHANDLE_VALID(mpic))
2916 		return;
2917 
2918 	/* check if proper rev of u3 */
2919 	if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2920 	    == PROM_ERROR)
2921 		return;
2922 	if (u3_rev < 0x35 || u3_rev > 0x39)
2923 		return;
2924 	/* does it need fixup ? */
2925 	if (prom_getproplen(i2c, "interrupts") > 0)
2926 		return;
2927 
2928 	prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2929 
2930 	/* interrupt on this revision of u3 is number 0 and level */
2931 	interrupts[0] = 0;
2932 	interrupts[1] = 1;
2933 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2934 		     &interrupts, sizeof(interrupts));
2935 	parent = (u32)mpic;
2936 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2937 		     &parent, sizeof(parent));
2938 }
2939 #else
2940 #define fixup_device_tree_pmac()
2941 #endif
2942 
2943 #ifdef CONFIG_PPC_EFIKA
2944 /*
2945  * The MPC5200 FEC driver requires an phy-handle property to tell it how
2946  * to talk to the phy.  If the phy-handle property is missing, then this
2947  * function is called to add the appropriate nodes and link it to the
2948  * ethernet node.
2949  */
fixup_device_tree_efika_add_phy(void)2950 static void __init fixup_device_tree_efika_add_phy(void)
2951 {
2952 	u32 node;
2953 	char prop[64];
2954 	int rv;
2955 
2956 	/* Check if /builtin/ethernet exists - bail if it doesn't */
2957 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2958 	if (!PHANDLE_VALID(node))
2959 		return;
2960 
2961 	/* Check if the phy-handle property exists - bail if it does */
2962 	rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2963 	if (!rv)
2964 		return;
2965 
2966 	/*
2967 	 * At this point the ethernet device doesn't have a phy described.
2968 	 * Now we need to add the missing phy node and linkage
2969 	 */
2970 
2971 	/* Check for an MDIO bus node - if missing then create one */
2972 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2973 	if (!PHANDLE_VALID(node)) {
2974 		prom_printf("Adding Ethernet MDIO node\n");
2975 		call_prom("interpret", 1, 1,
2976 			" s\" /builtin\" find-device"
2977 			" new-device"
2978 				" 1 encode-int s\" #address-cells\" property"
2979 				" 0 encode-int s\" #size-cells\" property"
2980 				" s\" mdio\" device-name"
2981 				" s\" fsl,mpc5200b-mdio\" encode-string"
2982 				" s\" compatible\" property"
2983 				" 0xf0003000 0x400 reg"
2984 				" 0x2 encode-int"
2985 				" 0x5 encode-int encode+"
2986 				" 0x3 encode-int encode+"
2987 				" s\" interrupts\" property"
2988 			" finish-device");
2989 	};
2990 
2991 	/* Check for a PHY device node - if missing then create one and
2992 	 * give it's phandle to the ethernet node */
2993 	node = call_prom("finddevice", 1, 1,
2994 			 ADDR("/builtin/mdio/ethernet-phy"));
2995 	if (!PHANDLE_VALID(node)) {
2996 		prom_printf("Adding Ethernet PHY node\n");
2997 		call_prom("interpret", 1, 1,
2998 			" s\" /builtin/mdio\" find-device"
2999 			" new-device"
3000 				" s\" ethernet-phy\" device-name"
3001 				" 0x10 encode-int s\" reg\" property"
3002 				" my-self"
3003 				" ihandle>phandle"
3004 			" finish-device"
3005 			" s\" /builtin/ethernet\" find-device"
3006 				" encode-int"
3007 				" s\" phy-handle\" property"
3008 			" device-end");
3009 	}
3010 }
3011 
fixup_device_tree_efika(void)3012 static void __init fixup_device_tree_efika(void)
3013 {
3014 	int sound_irq[3] = { 2, 2, 0 };
3015 	int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3016 				3,4,0, 3,5,0, 3,6,0, 3,7,0,
3017 				3,8,0, 3,9,0, 3,10,0, 3,11,0,
3018 				3,12,0, 3,13,0, 3,14,0, 3,15,0 };
3019 	u32 node;
3020 	char prop[64];
3021 	int rv, len;
3022 
3023 	/* Check if we're really running on a EFIKA */
3024 	node = call_prom("finddevice", 1, 1, ADDR("/"));
3025 	if (!PHANDLE_VALID(node))
3026 		return;
3027 
3028 	rv = prom_getprop(node, "model", prop, sizeof(prop));
3029 	if (rv == PROM_ERROR)
3030 		return;
3031 	if (prom_strcmp(prop, "EFIKA5K2"))
3032 		return;
3033 
3034 	prom_printf("Applying EFIKA device tree fixups\n");
3035 
3036 	/* Claiming to be 'chrp' is death */
3037 	node = call_prom("finddevice", 1, 1, ADDR("/"));
3038 	rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3039 	if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3040 		prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3041 
3042 	/* CODEGEN,description is exposed in /proc/cpuinfo so
3043 	   fix that too */
3044 	rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3045 	if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3046 		prom_setprop(node, "/", "CODEGEN,description",
3047 			     "Efika 5200B PowerPC System",
3048 			     sizeof("Efika 5200B PowerPC System"));
3049 
3050 	/* Fixup bestcomm interrupts property */
3051 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3052 	if (PHANDLE_VALID(node)) {
3053 		len = prom_getproplen(node, "interrupts");
3054 		if (len == 12) {
3055 			prom_printf("Fixing bestcomm interrupts property\n");
3056 			prom_setprop(node, "/builtin/bestcom", "interrupts",
3057 				     bcomm_irq, sizeof(bcomm_irq));
3058 		}
3059 	}
3060 
3061 	/* Fixup sound interrupts property */
3062 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3063 	if (PHANDLE_VALID(node)) {
3064 		rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3065 		if (rv == PROM_ERROR) {
3066 			prom_printf("Adding sound interrupts property\n");
3067 			prom_setprop(node, "/builtin/sound", "interrupts",
3068 				     sound_irq, sizeof(sound_irq));
3069 		}
3070 	}
3071 
3072 	/* Make sure ethernet phy-handle property exists */
3073 	fixup_device_tree_efika_add_phy();
3074 }
3075 #else
3076 #define fixup_device_tree_efika()
3077 #endif
3078 
3079 #ifdef CONFIG_PPC_PASEMI_NEMO
3080 /*
3081  * CFE supplied on Nemo is broken in several ways, biggest
3082  * problem is that it reassigns ISA interrupts to unused mpic ints.
3083  * Add an interrupt-controller property for the io-bridge to use
3084  * and correct the ints so we can attach them to an irq_domain
3085  */
fixup_device_tree_pasemi(void)3086 static void __init fixup_device_tree_pasemi(void)
3087 {
3088 	u32 interrupts[2], parent, rval, val = 0;
3089 	char *name, *pci_name;
3090 	phandle iob, node;
3091 
3092 	/* Find the root pci node */
3093 	name = "/pxp@0,e0000000";
3094 	iob = call_prom("finddevice", 1, 1, ADDR(name));
3095 	if (!PHANDLE_VALID(iob))
3096 		return;
3097 
3098 	/* check if interrupt-controller node set yet */
3099 	if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3100 		return;
3101 
3102 	prom_printf("adding interrupt-controller property for SB600...\n");
3103 
3104 	prom_setprop(iob, name, "interrupt-controller", &val, 0);
3105 
3106 	pci_name = "/pxp@0,e0000000/pci@11";
3107 	node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3108 	parent = ADDR(iob);
3109 
3110 	for( ; prom_next_node(&node); ) {
3111 		/* scan each node for one with an interrupt */
3112 		if (!PHANDLE_VALID(node))
3113 			continue;
3114 
3115 		rval = prom_getproplen(node, "interrupts");
3116 		if (rval == 0 || rval == PROM_ERROR)
3117 			continue;
3118 
3119 		prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3120 		if ((interrupts[0] < 212) || (interrupts[0] > 222))
3121 			continue;
3122 
3123 		/* found a node, update both interrupts and interrupt-parent */
3124 		if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3125 			interrupts[0] -= 203;
3126 		if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3127 			interrupts[0] -= 213;
3128 		if (interrupts[0] == 221)
3129 			interrupts[0] = 14;
3130 		if (interrupts[0] == 222)
3131 			interrupts[0] = 8;
3132 
3133 		prom_setprop(node, pci_name, "interrupts", interrupts,
3134 					sizeof(interrupts));
3135 		prom_setprop(node, pci_name, "interrupt-parent", &parent,
3136 					sizeof(parent));
3137 	}
3138 
3139 	/*
3140 	 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3141 	 * so that generic isa-bridge code can add the SB600 and its on-board
3142 	 * peripherals.
3143 	 */
3144 	name = "/pxp@0,e0000000/io-bridge@0";
3145 	iob = call_prom("finddevice", 1, 1, ADDR(name));
3146 	if (!PHANDLE_VALID(iob))
3147 		return;
3148 
3149 	/* device_type is already set, just change it. */
3150 
3151 	prom_printf("Changing device_type of SB600 node...\n");
3152 
3153 	prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3154 }
3155 #else	/* !CONFIG_PPC_PASEMI_NEMO */
fixup_device_tree_pasemi(void)3156 static inline void fixup_device_tree_pasemi(void) { }
3157 #endif
3158 
fixup_device_tree(void)3159 static void __init fixup_device_tree(void)
3160 {
3161 	fixup_device_tree_maple();
3162 	fixup_device_tree_maple_memory_controller();
3163 	fixup_device_tree_chrp();
3164 	fixup_device_tree_pmac();
3165 	fixup_device_tree_efika();
3166 	fixup_device_tree_pasemi();
3167 }
3168 
prom_find_boot_cpu(void)3169 static void __init prom_find_boot_cpu(void)
3170 {
3171 	__be32 rval;
3172 	ihandle prom_cpu;
3173 	phandle cpu_pkg;
3174 
3175 	rval = 0;
3176 	if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3177 		return;
3178 	prom_cpu = be32_to_cpu(rval);
3179 
3180 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3181 
3182 	if (!PHANDLE_VALID(cpu_pkg))
3183 		return;
3184 
3185 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3186 	prom.cpu = be32_to_cpu(rval);
3187 
3188 	prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3189 }
3190 
prom_check_initrd(unsigned long r3,unsigned long r4)3191 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3192 {
3193 #ifdef CONFIG_BLK_DEV_INITRD
3194 	if (r3 && r4 && r4 != 0xdeadbeef) {
3195 		__be64 val;
3196 
3197 		prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3198 		prom_initrd_end = prom_initrd_start + r4;
3199 
3200 		val = cpu_to_be64(prom_initrd_start);
3201 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3202 			     &val, sizeof(val));
3203 		val = cpu_to_be64(prom_initrd_end);
3204 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3205 			     &val, sizeof(val));
3206 
3207 		reserve_mem(prom_initrd_start,
3208 			    prom_initrd_end - prom_initrd_start);
3209 
3210 		prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3211 		prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3212 	}
3213 #endif /* CONFIG_BLK_DEV_INITRD */
3214 }
3215 
3216 #ifdef CONFIG_PPC64
3217 #ifdef CONFIG_RELOCATABLE
reloc_toc(void)3218 static void reloc_toc(void)
3219 {
3220 }
3221 
unreloc_toc(void)3222 static void unreloc_toc(void)
3223 {
3224 }
3225 #else
__reloc_toc(unsigned long offset,unsigned long nr_entries)3226 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
3227 {
3228 	unsigned long i;
3229 	unsigned long *toc_entry;
3230 
3231 	/* Get the start of the TOC by using r2 directly. */
3232 	asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3233 
3234 	for (i = 0; i < nr_entries; i++) {
3235 		*toc_entry = *toc_entry + offset;
3236 		toc_entry++;
3237 	}
3238 }
3239 
reloc_toc(void)3240 static void reloc_toc(void)
3241 {
3242 	unsigned long offset = reloc_offset();
3243 	unsigned long nr_entries =
3244 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3245 
3246 	__reloc_toc(offset, nr_entries);
3247 
3248 	mb();
3249 }
3250 
unreloc_toc(void)3251 static void unreloc_toc(void)
3252 {
3253 	unsigned long offset = reloc_offset();
3254 	unsigned long nr_entries =
3255 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3256 
3257 	mb();
3258 
3259 	__reloc_toc(-offset, nr_entries);
3260 }
3261 #endif
3262 #endif
3263 
3264 #ifdef CONFIG_PPC_SVM
3265 /*
3266  * Perform the Enter Secure Mode ultracall.
3267  */
enter_secure_mode(unsigned long kbase,unsigned long fdt)3268 static int enter_secure_mode(unsigned long kbase, unsigned long fdt)
3269 {
3270 	register unsigned long r3 asm("r3") = UV_ESM;
3271 	register unsigned long r4 asm("r4") = kbase;
3272 	register unsigned long r5 asm("r5") = fdt;
3273 
3274 	asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3275 
3276 	return r3;
3277 }
3278 
3279 /*
3280  * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3281  */
setup_secure_guest(unsigned long kbase,unsigned long fdt)3282 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3283 {
3284 	int ret;
3285 
3286 	if (!prom_svm_enable)
3287 		return;
3288 
3289 	/* Switch to secure mode. */
3290 	prom_printf("Switching to secure mode.\n");
3291 
3292 	/*
3293 	 * The ultravisor will do an integrity check of the kernel image but we
3294 	 * relocated it so the check will fail. Restore the original image by
3295 	 * relocating it back to the kernel virtual base address.
3296 	 */
3297 	if (IS_ENABLED(CONFIG_RELOCATABLE))
3298 		relocate(KERNELBASE);
3299 
3300 	ret = enter_secure_mode(kbase, fdt);
3301 
3302 	/* Relocate the kernel again. */
3303 	if (IS_ENABLED(CONFIG_RELOCATABLE))
3304 		relocate(kbase);
3305 
3306 	if (ret != U_SUCCESS) {
3307 		prom_printf("Returned %d from switching to secure mode.\n", ret);
3308 		prom_rtas_os_term("Switch to secure mode failed.\n");
3309 	}
3310 }
3311 #else
setup_secure_guest(unsigned long kbase,unsigned long fdt)3312 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3313 {
3314 }
3315 #endif /* CONFIG_PPC_SVM */
3316 
3317 /*
3318  * We enter here early on, when the Open Firmware prom is still
3319  * handling exceptions and the MMU hash table for us.
3320  */
3321 
prom_init(unsigned long r3,unsigned long r4,unsigned long pp,unsigned long r6,unsigned long r7,unsigned long kbase)3322 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3323 			       unsigned long pp,
3324 			       unsigned long r6, unsigned long r7,
3325 			       unsigned long kbase)
3326 {
3327 	unsigned long hdr;
3328 
3329 #ifdef CONFIG_PPC32
3330 	unsigned long offset = reloc_offset();
3331 	reloc_got2(offset);
3332 #else
3333 	reloc_toc();
3334 #endif
3335 
3336 	/*
3337 	 * First zero the BSS
3338 	 */
3339 	memset(&__bss_start, 0, __bss_stop - __bss_start);
3340 
3341 	/*
3342 	 * Init interface to Open Firmware, get some node references,
3343 	 * like /chosen
3344 	 */
3345 	prom_init_client_services(pp);
3346 
3347 	/*
3348 	 * See if this OF is old enough that we need to do explicit maps
3349 	 * and other workarounds
3350 	 */
3351 	prom_find_mmu();
3352 
3353 	/*
3354 	 * Init prom stdout device
3355 	 */
3356 	prom_init_stdout();
3357 
3358 	prom_printf("Preparing to boot %s", linux_banner);
3359 
3360 	/*
3361 	 * Get default machine type. At this point, we do not differentiate
3362 	 * between pSeries SMP and pSeries LPAR
3363 	 */
3364 	of_platform = prom_find_machine_type();
3365 	prom_printf("Detected machine type: %x\n", of_platform);
3366 
3367 #ifndef CONFIG_NONSTATIC_KERNEL
3368 	/* Bail if this is a kdump kernel. */
3369 	if (PHYSICAL_START > 0)
3370 		prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3371 #endif
3372 
3373 	/*
3374 	 * Check for an initrd
3375 	 */
3376 	prom_check_initrd(r3, r4);
3377 
3378 	/*
3379 	 * Do early parsing of command line
3380 	 */
3381 	early_cmdline_parse();
3382 
3383 #ifdef CONFIG_PPC_PSERIES
3384 	/*
3385 	 * On pSeries, inform the firmware about our capabilities
3386 	 */
3387 	if (of_platform == PLATFORM_PSERIES ||
3388 	    of_platform == PLATFORM_PSERIES_LPAR)
3389 		prom_send_capabilities();
3390 #endif
3391 
3392 	/*
3393 	 * Copy the CPU hold code
3394 	 */
3395 	if (of_platform != PLATFORM_POWERMAC)
3396 		copy_and_flush(0, kbase, 0x100, 0);
3397 
3398 	/*
3399 	 * Initialize memory management within prom_init
3400 	 */
3401 	prom_init_mem();
3402 
3403 	/*
3404 	 * Determine which cpu is actually running right _now_
3405 	 */
3406 	prom_find_boot_cpu();
3407 
3408 	/*
3409 	 * Initialize display devices
3410 	 */
3411 	prom_check_displays();
3412 
3413 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3414 	/*
3415 	 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3416 	 * that uses the allocator, we need to make sure we get the top of memory
3417 	 * available for us here...
3418 	 */
3419 	if (of_platform == PLATFORM_PSERIES)
3420 		prom_initialize_tce_table();
3421 #endif
3422 
3423 	/*
3424 	 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3425 	 * have a usable RTAS implementation.
3426 	 */
3427 	if (of_platform != PLATFORM_POWERMAC)
3428 		prom_instantiate_rtas();
3429 
3430 #ifdef CONFIG_PPC64
3431 	/* instantiate sml */
3432 	prom_instantiate_sml();
3433 #endif
3434 
3435 	/*
3436 	 * On non-powermacs, put all CPUs in spin-loops.
3437 	 *
3438 	 * PowerMacs use a different mechanism to spin CPUs
3439 	 *
3440 	 * (This must be done after instanciating RTAS)
3441 	 */
3442 	if (of_platform != PLATFORM_POWERMAC)
3443 		prom_hold_cpus();
3444 
3445 	/*
3446 	 * Fill in some infos for use by the kernel later on
3447 	 */
3448 	if (prom_memory_limit) {
3449 		__be64 val = cpu_to_be64(prom_memory_limit);
3450 		prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3451 			     &val, sizeof(val));
3452 	}
3453 #ifdef CONFIG_PPC64
3454 	if (prom_iommu_off)
3455 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3456 			     NULL, 0);
3457 
3458 	if (prom_iommu_force_on)
3459 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3460 			     NULL, 0);
3461 
3462 	if (prom_tce_alloc_start) {
3463 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3464 			     &prom_tce_alloc_start,
3465 			     sizeof(prom_tce_alloc_start));
3466 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3467 			     &prom_tce_alloc_end,
3468 			     sizeof(prom_tce_alloc_end));
3469 	}
3470 #endif
3471 
3472 	/*
3473 	 * Fixup any known bugs in the device-tree
3474 	 */
3475 	fixup_device_tree();
3476 
3477 	/*
3478 	 * Now finally create the flattened device-tree
3479 	 */
3480 	prom_printf("copying OF device tree...\n");
3481 	flatten_device_tree();
3482 
3483 	/*
3484 	 * in case stdin is USB and still active on IBM machines...
3485 	 * Unfortunately quiesce crashes on some powermacs if we have
3486 	 * closed stdin already (in particular the powerbook 101).
3487 	 */
3488 	if (of_platform != PLATFORM_POWERMAC)
3489 		prom_close_stdin();
3490 
3491 	/*
3492 	 * Call OF "quiesce" method to shut down pending DMA's from
3493 	 * devices etc...
3494 	 */
3495 	prom_printf("Quiescing Open Firmware ...\n");
3496 	call_prom("quiesce", 0, 0);
3497 
3498 	/*
3499 	 * And finally, call the kernel passing it the flattened device
3500 	 * tree and NULL as r5, thus triggering the new entry point which
3501 	 * is common to us and kexec
3502 	 */
3503 	hdr = dt_header_start;
3504 
3505 	prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3506 	prom_debug("->dt_header_start=0x%lx\n", hdr);
3507 
3508 #ifdef CONFIG_PPC32
3509 	reloc_got2(-offset);
3510 #else
3511 	unreloc_toc();
3512 #endif
3513 
3514 	/* Move to secure memory if we're supposed to be secure guests. */
3515 	setup_secure_guest(kbase, hdr);
3516 
3517 	__start(hdr, kbase, 0, 0, 0, 0, 0);
3518 
3519 	return 0;
3520 }
3521