1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Procedures for interfacing to Open Firmware.
4  *
5  * Paul Mackerras	August 1996.
6  * Copyright (C) 1996-2005 Paul Mackerras.
7  *
8  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9  *    {engebret|bergner}@us.ibm.com
10  */
11 
12 #undef DEBUG_PROM
13 
14 /* we cannot use FORTIFY as it brings in new symbols */
15 #define __NO_FORTIFY
16 
17 #include <stdarg.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/threads.h>
22 #include <linux/spinlock.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/delay.h>
27 #include <linux/initrd.h>
28 #include <linux/bitops.h>
29 #include <linux/pgtable.h>
30 #include <asm/prom.h>
31 #include <asm/rtas.h>
32 #include <asm/page.h>
33 #include <asm/processor.h>
34 #include <asm/irq.h>
35 #include <asm/io.h>
36 #include <asm/smp.h>
37 #include <asm/mmu.h>
38 #include <asm/iommu.h>
39 #include <asm/btext.h>
40 #include <asm/sections.h>
41 #include <asm/machdep.h>
42 #include <asm/asm-prototypes.h>
43 #include <asm/ultravisor-api.h>
44 
45 #include <linux/linux_logo.h>
46 
47 /* All of prom_init bss lives here */
48 #define __prombss __section(".bss.prominit")
49 
50 /*
51  * Eventually bump that one up
52  */
53 #define DEVTREE_CHUNK_SIZE	0x100000
54 
55 /*
56  * This is the size of the local memory reserve map that gets copied
57  * into the boot params passed to the kernel. That size is totally
58  * flexible as the kernel just reads the list until it encounters an
59  * entry with size 0, so it can be changed without breaking binary
60  * compatibility
61  */
62 #define MEM_RESERVE_MAP_SIZE	8
63 
64 /*
65  * prom_init() is called very early on, before the kernel text
66  * and data have been mapped to KERNELBASE.  At this point the code
67  * is running at whatever address it has been loaded at.
68  * On ppc32 we compile with -mrelocatable, which means that references
69  * to extern and static variables get relocated automatically.
70  * ppc64 objects are always relocatable, we just need to relocate the
71  * TOC.
72  *
73  * Because OF may have mapped I/O devices into the area starting at
74  * KERNELBASE, particularly on CHRP machines, we can't safely call
75  * OF once the kernel has been mapped to KERNELBASE.  Therefore all
76  * OF calls must be done within prom_init().
77  *
78  * ADDR is used in calls to call_prom.  The 4th and following
79  * arguments to call_prom should be 32-bit values.
80  * On ppc64, 64 bit values are truncated to 32 bits (and
81  * fortunately don't get interpreted as two arguments).
82  */
83 #define ADDR(x)		(u32)(unsigned long)(x)
84 
85 #ifdef CONFIG_PPC64
86 #define OF_WORKAROUNDS	0
87 #else
88 #define OF_WORKAROUNDS	of_workarounds
89 static int of_workarounds __prombss;
90 #endif
91 
92 #define OF_WA_CLAIM	1	/* do phys/virt claim separately, then map */
93 #define OF_WA_LONGTRAIL	2	/* work around longtrail bugs */
94 
95 #define PROM_BUG() do {						\
96         prom_printf("kernel BUG at %s line 0x%x!\n",		\
97 		    __FILE__, __LINE__);			\
98 	__builtin_trap();					\
99 } while (0)
100 
101 #ifdef DEBUG_PROM
102 #define prom_debug(x...)	prom_printf(x)
103 #else
104 #define prom_debug(x...)	do { } while (0)
105 #endif
106 
107 
108 typedef u32 prom_arg_t;
109 
110 struct prom_args {
111         __be32 service;
112         __be32 nargs;
113         __be32 nret;
114         __be32 args[10];
115 };
116 
117 struct prom_t {
118 	ihandle root;
119 	phandle chosen;
120 	int cpu;
121 	ihandle stdout;
122 	ihandle mmumap;
123 	ihandle memory;
124 };
125 
126 struct mem_map_entry {
127 	__be64	base;
128 	__be64	size;
129 };
130 
131 typedef __be32 cell_t;
132 
133 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
134 		    unsigned long r6, unsigned long r7, unsigned long r8,
135 		    unsigned long r9);
136 
137 #ifdef CONFIG_PPC64
138 extern int enter_prom(struct prom_args *args, unsigned long entry);
139 #else
enter_prom(struct prom_args * args,unsigned long entry)140 static inline int enter_prom(struct prom_args *args, unsigned long entry)
141 {
142 	return ((int (*)(struct prom_args *))entry)(args);
143 }
144 #endif
145 
146 extern void copy_and_flush(unsigned long dest, unsigned long src,
147 			   unsigned long size, unsigned long offset);
148 
149 /* prom structure */
150 static struct prom_t __prombss prom;
151 
152 static unsigned long __prombss prom_entry;
153 
154 static char __prombss of_stdout_device[256];
155 static char __prombss prom_scratch[256];
156 
157 static unsigned long __prombss dt_header_start;
158 static unsigned long __prombss dt_struct_start, dt_struct_end;
159 static unsigned long __prombss dt_string_start, dt_string_end;
160 
161 static unsigned long __prombss prom_initrd_start, prom_initrd_end;
162 
163 #ifdef CONFIG_PPC64
164 static int __prombss prom_iommu_force_on;
165 static int __prombss prom_iommu_off;
166 static unsigned long __prombss prom_tce_alloc_start;
167 static unsigned long __prombss prom_tce_alloc_end;
168 #endif
169 
170 #ifdef CONFIG_PPC_PSERIES
171 static bool __prombss prom_radix_disable;
172 static bool __prombss prom_radix_gtse_disable;
173 static bool __prombss prom_xive_disable;
174 #endif
175 
176 #ifdef CONFIG_PPC_SVM
177 static bool __prombss prom_svm_enable;
178 #endif
179 
180 struct platform_support {
181 	bool hash_mmu;
182 	bool radix_mmu;
183 	bool radix_gtse;
184 	bool xive;
185 };
186 
187 /* Platforms codes are now obsolete in the kernel. Now only used within this
188  * file and ultimately gone too. Feel free to change them if you need, they
189  * are not shared with anything outside of this file anymore
190  */
191 #define PLATFORM_PSERIES	0x0100
192 #define PLATFORM_PSERIES_LPAR	0x0101
193 #define PLATFORM_LPAR		0x0001
194 #define PLATFORM_POWERMAC	0x0400
195 #define PLATFORM_GENERIC	0x0500
196 
197 static int __prombss of_platform;
198 
199 static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
200 
201 static unsigned long __prombss prom_memory_limit;
202 
203 static unsigned long __prombss alloc_top;
204 static unsigned long __prombss alloc_top_high;
205 static unsigned long __prombss alloc_bottom;
206 static unsigned long __prombss rmo_top;
207 static unsigned long __prombss ram_top;
208 
209 static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
210 static int __prombss mem_reserve_cnt;
211 
212 static cell_t __prombss regbuf[1024];
213 
214 static bool  __prombss rtas_has_query_cpu_stopped;
215 
216 
217 /*
218  * Error results ... some OF calls will return "-1" on error, some
219  * will return 0, some will return either. To simplify, here are
220  * macros to use with any ihandle or phandle return value to check if
221  * it is valid
222  */
223 
224 #define PROM_ERROR		(-1u)
225 #define PHANDLE_VALID(p)	((p) != 0 && (p) != PROM_ERROR)
226 #define IHANDLE_VALID(i)	((i) != 0 && (i) != PROM_ERROR)
227 
228 /* Copied from lib/string.c and lib/kstrtox.c */
229 
prom_strcmp(const char * cs,const char * ct)230 static int __init prom_strcmp(const char *cs, const char *ct)
231 {
232 	unsigned char c1, c2;
233 
234 	while (1) {
235 		c1 = *cs++;
236 		c2 = *ct++;
237 		if (c1 != c2)
238 			return c1 < c2 ? -1 : 1;
239 		if (!c1)
240 			break;
241 	}
242 	return 0;
243 }
244 
prom_strcpy(char * dest,const char * src)245 static char __init *prom_strcpy(char *dest, const char *src)
246 {
247 	char *tmp = dest;
248 
249 	while ((*dest++ = *src++) != '\0')
250 		/* nothing */;
251 	return tmp;
252 }
253 
prom_strncmp(const char * cs,const char * ct,size_t count)254 static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
255 {
256 	unsigned char c1, c2;
257 
258 	while (count) {
259 		c1 = *cs++;
260 		c2 = *ct++;
261 		if (c1 != c2)
262 			return c1 < c2 ? -1 : 1;
263 		if (!c1)
264 			break;
265 		count--;
266 	}
267 	return 0;
268 }
269 
prom_strlen(const char * s)270 static size_t __init prom_strlen(const char *s)
271 {
272 	const char *sc;
273 
274 	for (sc = s; *sc != '\0'; ++sc)
275 		/* nothing */;
276 	return sc - s;
277 }
278 
prom_memcmp(const void * cs,const void * ct,size_t count)279 static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
280 {
281 	const unsigned char *su1, *su2;
282 	int res = 0;
283 
284 	for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
285 		if ((res = *su1 - *su2) != 0)
286 			break;
287 	return res;
288 }
289 
prom_strstr(const char * s1,const char * s2)290 static char __init *prom_strstr(const char *s1, const char *s2)
291 {
292 	size_t l1, l2;
293 
294 	l2 = prom_strlen(s2);
295 	if (!l2)
296 		return (char *)s1;
297 	l1 = prom_strlen(s1);
298 	while (l1 >= l2) {
299 		l1--;
300 		if (!prom_memcmp(s1, s2, l2))
301 			return (char *)s1;
302 		s1++;
303 	}
304 	return NULL;
305 }
306 
prom_strlcat(char * dest,const char * src,size_t count)307 static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
308 {
309 	size_t dsize = prom_strlen(dest);
310 	size_t len = prom_strlen(src);
311 	size_t res = dsize + len;
312 
313 	/* This would be a bug */
314 	if (dsize >= count)
315 		return count;
316 
317 	dest += dsize;
318 	count -= dsize;
319 	if (len >= count)
320 		len = count-1;
321 	memcpy(dest, src, len);
322 	dest[len] = 0;
323 	return res;
324 
325 }
326 
327 #ifdef CONFIG_PPC_PSERIES
prom_strtobool(const char * s,bool * res)328 static int __init prom_strtobool(const char *s, bool *res)
329 {
330 	if (!s)
331 		return -EINVAL;
332 
333 	switch (s[0]) {
334 	case 'y':
335 	case 'Y':
336 	case '1':
337 		*res = true;
338 		return 0;
339 	case 'n':
340 	case 'N':
341 	case '0':
342 		*res = false;
343 		return 0;
344 	case 'o':
345 	case 'O':
346 		switch (s[1]) {
347 		case 'n':
348 		case 'N':
349 			*res = true;
350 			return 0;
351 		case 'f':
352 		case 'F':
353 			*res = false;
354 			return 0;
355 		default:
356 			break;
357 		}
358 		break;
359 	default:
360 		break;
361 	}
362 
363 	return -EINVAL;
364 }
365 #endif
366 
367 /* This is the one and *ONLY* place where we actually call open
368  * firmware.
369  */
370 
call_prom(const char * service,int nargs,int nret,...)371 static int __init call_prom(const char *service, int nargs, int nret, ...)
372 {
373 	int i;
374 	struct prom_args args;
375 	va_list list;
376 
377 	args.service = cpu_to_be32(ADDR(service));
378 	args.nargs = cpu_to_be32(nargs);
379 	args.nret = cpu_to_be32(nret);
380 
381 	va_start(list, nret);
382 	for (i = 0; i < nargs; i++)
383 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
384 	va_end(list);
385 
386 	for (i = 0; i < nret; i++)
387 		args.args[nargs+i] = 0;
388 
389 	if (enter_prom(&args, prom_entry) < 0)
390 		return PROM_ERROR;
391 
392 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
393 }
394 
call_prom_ret(const char * service,int nargs,int nret,prom_arg_t * rets,...)395 static int __init call_prom_ret(const char *service, int nargs, int nret,
396 				prom_arg_t *rets, ...)
397 {
398 	int i;
399 	struct prom_args args;
400 	va_list list;
401 
402 	args.service = cpu_to_be32(ADDR(service));
403 	args.nargs = cpu_to_be32(nargs);
404 	args.nret = cpu_to_be32(nret);
405 
406 	va_start(list, rets);
407 	for (i = 0; i < nargs; i++)
408 		args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
409 	va_end(list);
410 
411 	for (i = 0; i < nret; i++)
412 		args.args[nargs+i] = 0;
413 
414 	if (enter_prom(&args, prom_entry) < 0)
415 		return PROM_ERROR;
416 
417 	if (rets != NULL)
418 		for (i = 1; i < nret; ++i)
419 			rets[i-1] = be32_to_cpu(args.args[nargs+i]);
420 
421 	return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
422 }
423 
424 
prom_print(const char * msg)425 static void __init prom_print(const char *msg)
426 {
427 	const char *p, *q;
428 
429 	if (prom.stdout == 0)
430 		return;
431 
432 	for (p = msg; *p != 0; p = q) {
433 		for (q = p; *q != 0 && *q != '\n'; ++q)
434 			;
435 		if (q > p)
436 			call_prom("write", 3, 1, prom.stdout, p, q - p);
437 		if (*q == 0)
438 			break;
439 		++q;
440 		call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
441 	}
442 }
443 
444 
445 /*
446  * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
447  * we do not need __udivdi3 or __umoddi3 on 32bits.
448  */
prom_print_hex(unsigned long val)449 static void __init prom_print_hex(unsigned long val)
450 {
451 	int i, nibbles = sizeof(val)*2;
452 	char buf[sizeof(val)*2+1];
453 
454 	for (i = nibbles-1;  i >= 0;  i--) {
455 		buf[i] = (val & 0xf) + '0';
456 		if (buf[i] > '9')
457 			buf[i] += ('a'-'0'-10);
458 		val >>= 4;
459 	}
460 	buf[nibbles] = '\0';
461 	call_prom("write", 3, 1, prom.stdout, buf, nibbles);
462 }
463 
464 /* max number of decimal digits in an unsigned long */
465 #define UL_DIGITS 21
prom_print_dec(unsigned long val)466 static void __init prom_print_dec(unsigned long val)
467 {
468 	int i, size;
469 	char buf[UL_DIGITS+1];
470 
471 	for (i = UL_DIGITS-1; i >= 0;  i--) {
472 		buf[i] = (val % 10) + '0';
473 		val = val/10;
474 		if (val == 0)
475 			break;
476 	}
477 	/* shift stuff down */
478 	size = UL_DIGITS - i;
479 	call_prom("write", 3, 1, prom.stdout, buf+i, size);
480 }
481 
482 __printf(1, 2)
prom_printf(const char * format,...)483 static void __init prom_printf(const char *format, ...)
484 {
485 	const char *p, *q, *s;
486 	va_list args;
487 	unsigned long v;
488 	long vs;
489 	int n = 0;
490 
491 	va_start(args, format);
492 	for (p = format; *p != 0; p = q) {
493 		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
494 			;
495 		if (q > p)
496 			call_prom("write", 3, 1, prom.stdout, p, q - p);
497 		if (*q == 0)
498 			break;
499 		if (*q == '\n') {
500 			++q;
501 			call_prom("write", 3, 1, prom.stdout,
502 				  ADDR("\r\n"), 2);
503 			continue;
504 		}
505 		++q;
506 		if (*q == 0)
507 			break;
508 		while (*q == 'l') {
509 			++q;
510 			++n;
511 		}
512 		switch (*q) {
513 		case 's':
514 			++q;
515 			s = va_arg(args, const char *);
516 			prom_print(s);
517 			break;
518 		case 'x':
519 			++q;
520 			switch (n) {
521 			case 0:
522 				v = va_arg(args, unsigned int);
523 				break;
524 			case 1:
525 				v = va_arg(args, unsigned long);
526 				break;
527 			case 2:
528 			default:
529 				v = va_arg(args, unsigned long long);
530 				break;
531 			}
532 			prom_print_hex(v);
533 			break;
534 		case 'u':
535 			++q;
536 			switch (n) {
537 			case 0:
538 				v = va_arg(args, unsigned int);
539 				break;
540 			case 1:
541 				v = va_arg(args, unsigned long);
542 				break;
543 			case 2:
544 			default:
545 				v = va_arg(args, unsigned long long);
546 				break;
547 			}
548 			prom_print_dec(v);
549 			break;
550 		case 'd':
551 			++q;
552 			switch (n) {
553 			case 0:
554 				vs = va_arg(args, int);
555 				break;
556 			case 1:
557 				vs = va_arg(args, long);
558 				break;
559 			case 2:
560 			default:
561 				vs = va_arg(args, long long);
562 				break;
563 			}
564 			if (vs < 0) {
565 				prom_print("-");
566 				vs = -vs;
567 			}
568 			prom_print_dec(vs);
569 			break;
570 		}
571 	}
572 	va_end(args);
573 }
574 
575 
prom_claim(unsigned long virt,unsigned long size,unsigned long align)576 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
577 				unsigned long align)
578 {
579 
580 	if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
581 		/*
582 		 * Old OF requires we claim physical and virtual separately
583 		 * and then map explicitly (assuming virtual mode)
584 		 */
585 		int ret;
586 		prom_arg_t result;
587 
588 		ret = call_prom_ret("call-method", 5, 2, &result,
589 				    ADDR("claim"), prom.memory,
590 				    align, size, virt);
591 		if (ret != 0 || result == -1)
592 			return -1;
593 		ret = call_prom_ret("call-method", 5, 2, &result,
594 				    ADDR("claim"), prom.mmumap,
595 				    align, size, virt);
596 		if (ret != 0) {
597 			call_prom("call-method", 4, 1, ADDR("release"),
598 				  prom.memory, size, virt);
599 			return -1;
600 		}
601 		/* the 0x12 is M (coherence) + PP == read/write */
602 		call_prom("call-method", 6, 1,
603 			  ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
604 		return virt;
605 	}
606 	return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
607 			 (prom_arg_t)align);
608 }
609 
prom_panic(const char * reason)610 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
611 {
612 	prom_print(reason);
613 	/* Do not call exit because it clears the screen on pmac
614 	 * it also causes some sort of double-fault on early pmacs */
615 	if (of_platform == PLATFORM_POWERMAC)
616 		asm("trap\n");
617 
618 	/* ToDo: should put up an SRC here on pSeries */
619 	call_prom("exit", 0, 0);
620 
621 	for (;;)			/* should never get here */
622 		;
623 }
624 
625 
prom_next_node(phandle * nodep)626 static int __init prom_next_node(phandle *nodep)
627 {
628 	phandle node;
629 
630 	if ((node = *nodep) != 0
631 	    && (*nodep = call_prom("child", 1, 1, node)) != 0)
632 		return 1;
633 	if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
634 		return 1;
635 	for (;;) {
636 		if ((node = call_prom("parent", 1, 1, node)) == 0)
637 			return 0;
638 		if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
639 			return 1;
640 	}
641 }
642 
prom_getprop(phandle node,const char * pname,void * value,size_t valuelen)643 static inline int __init prom_getprop(phandle node, const char *pname,
644 				      void *value, size_t valuelen)
645 {
646 	return call_prom("getprop", 4, 1, node, ADDR(pname),
647 			 (u32)(unsigned long) value, (u32) valuelen);
648 }
649 
prom_getproplen(phandle node,const char * pname)650 static inline int __init prom_getproplen(phandle node, const char *pname)
651 {
652 	return call_prom("getproplen", 2, 1, node, ADDR(pname));
653 }
654 
add_string(char ** str,const char * q)655 static void add_string(char **str, const char *q)
656 {
657 	char *p = *str;
658 
659 	while (*q)
660 		*p++ = *q++;
661 	*p++ = ' ';
662 	*str = p;
663 }
664 
tohex(unsigned int x)665 static char *tohex(unsigned int x)
666 {
667 	static const char digits[] __initconst = "0123456789abcdef";
668 	static char result[9] __prombss;
669 	int i;
670 
671 	result[8] = 0;
672 	i = 8;
673 	do {
674 		--i;
675 		result[i] = digits[x & 0xf];
676 		x >>= 4;
677 	} while (x != 0 && i > 0);
678 	return &result[i];
679 }
680 
prom_setprop(phandle node,const char * nodename,const char * pname,void * value,size_t valuelen)681 static int __init prom_setprop(phandle node, const char *nodename,
682 			       const char *pname, void *value, size_t valuelen)
683 {
684 	char cmd[256], *p;
685 
686 	if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
687 		return call_prom("setprop", 4, 1, node, ADDR(pname),
688 				 (u32)(unsigned long) value, (u32) valuelen);
689 
690 	/* gah... setprop doesn't work on longtrail, have to use interpret */
691 	p = cmd;
692 	add_string(&p, "dev");
693 	add_string(&p, nodename);
694 	add_string(&p, tohex((u32)(unsigned long) value));
695 	add_string(&p, tohex(valuelen));
696 	add_string(&p, tohex(ADDR(pname)));
697 	add_string(&p, tohex(prom_strlen(pname)));
698 	add_string(&p, "property");
699 	*p = 0;
700 	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
701 }
702 
703 /* We can't use the standard versions because of relocation headaches. */
704 #define isxdigit(c)	(('0' <= (c) && (c) <= '9') \
705 			 || ('a' <= (c) && (c) <= 'f') \
706 			 || ('A' <= (c) && (c) <= 'F'))
707 
708 #define isdigit(c)	('0' <= (c) && (c) <= '9')
709 #define islower(c)	('a' <= (c) && (c) <= 'z')
710 #define toupper(c)	(islower(c) ? ((c) - 'a' + 'A') : (c))
711 
prom_strtoul(const char * cp,const char ** endp)712 static unsigned long prom_strtoul(const char *cp, const char **endp)
713 {
714 	unsigned long result = 0, base = 10, value;
715 
716 	if (*cp == '0') {
717 		base = 8;
718 		cp++;
719 		if (toupper(*cp) == 'X') {
720 			cp++;
721 			base = 16;
722 		}
723 	}
724 
725 	while (isxdigit(*cp) &&
726 	       (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
727 		result = result * base + value;
728 		cp++;
729 	}
730 
731 	if (endp)
732 		*endp = cp;
733 
734 	return result;
735 }
736 
prom_memparse(const char * ptr,const char ** retptr)737 static unsigned long prom_memparse(const char *ptr, const char **retptr)
738 {
739 	unsigned long ret = prom_strtoul(ptr, retptr);
740 	int shift = 0;
741 
742 	/*
743 	 * We can't use a switch here because GCC *may* generate a
744 	 * jump table which won't work, because we're not running at
745 	 * the address we're linked at.
746 	 */
747 	if ('G' == **retptr || 'g' == **retptr)
748 		shift = 30;
749 
750 	if ('M' == **retptr || 'm' == **retptr)
751 		shift = 20;
752 
753 	if ('K' == **retptr || 'k' == **retptr)
754 		shift = 10;
755 
756 	if (shift) {
757 		ret <<= shift;
758 		(*retptr)++;
759 	}
760 
761 	return ret;
762 }
763 
764 /*
765  * Early parsing of the command line passed to the kernel, used for
766  * "mem=x" and the options that affect the iommu
767  */
early_cmdline_parse(void)768 static void __init early_cmdline_parse(void)
769 {
770 	const char *opt;
771 
772 	char *p;
773 	int l = 0;
774 
775 	prom_cmd_line[0] = 0;
776 	p = prom_cmd_line;
777 
778 	if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
779 		l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
780 
781 	if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
782 		prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
783 			     sizeof(prom_cmd_line));
784 
785 	prom_printf("command line: %s\n", prom_cmd_line);
786 
787 #ifdef CONFIG_PPC64
788 	opt = prom_strstr(prom_cmd_line, "iommu=");
789 	if (opt) {
790 		prom_printf("iommu opt is: %s\n", opt);
791 		opt += 6;
792 		while (*opt && *opt == ' ')
793 			opt++;
794 		if (!prom_strncmp(opt, "off", 3))
795 			prom_iommu_off = 1;
796 		else if (!prom_strncmp(opt, "force", 5))
797 			prom_iommu_force_on = 1;
798 	}
799 #endif
800 	opt = prom_strstr(prom_cmd_line, "mem=");
801 	if (opt) {
802 		opt += 4;
803 		prom_memory_limit = prom_memparse(opt, (const char **)&opt);
804 #ifdef CONFIG_PPC64
805 		/* Align to 16 MB == size of ppc64 large page */
806 		prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
807 #endif
808 	}
809 
810 #ifdef CONFIG_PPC_PSERIES
811 	prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
812 	opt = prom_strstr(prom_cmd_line, "disable_radix");
813 	if (opt) {
814 		opt += 13;
815 		if (*opt && *opt == '=') {
816 			bool val;
817 
818 			if (prom_strtobool(++opt, &val))
819 				prom_radix_disable = false;
820 			else
821 				prom_radix_disable = val;
822 		} else
823 			prom_radix_disable = true;
824 	}
825 	if (prom_radix_disable)
826 		prom_debug("Radix disabled from cmdline\n");
827 
828 	opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
829 	if (opt) {
830 		prom_radix_gtse_disable = true;
831 		prom_debug("Radix GTSE disabled from cmdline\n");
832 	}
833 
834 	opt = prom_strstr(prom_cmd_line, "xive=off");
835 	if (opt) {
836 		prom_xive_disable = true;
837 		prom_debug("XIVE disabled from cmdline\n");
838 	}
839 #endif /* CONFIG_PPC_PSERIES */
840 
841 #ifdef CONFIG_PPC_SVM
842 	opt = prom_strstr(prom_cmd_line, "svm=");
843 	if (opt) {
844 		bool val;
845 
846 		opt += sizeof("svm=") - 1;
847 		if (!prom_strtobool(opt, &val))
848 			prom_svm_enable = val;
849 	}
850 #endif /* CONFIG_PPC_SVM */
851 }
852 
853 #ifdef CONFIG_PPC_PSERIES
854 /*
855  * The architecture vector has an array of PVR mask/value pairs,
856  * followed by # option vectors - 1, followed by the option vectors.
857  *
858  * See prom.h for the definition of the bits specified in the
859  * architecture vector.
860  */
861 
862 /* Firmware expects the value to be n - 1, where n is the # of vectors */
863 #define NUM_VECTORS(n)		((n) - 1)
864 
865 /*
866  * Firmware expects 1 + n - 2, where n is the length of the option vector in
867  * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
868  */
869 #define VECTOR_LENGTH(n)	(1 + (n) - 2)
870 
871 struct option_vector1 {
872 	u8 byte1;
873 	u8 arch_versions;
874 	u8 arch_versions3;
875 } __packed;
876 
877 struct option_vector2 {
878 	u8 byte1;
879 	__be16 reserved;
880 	__be32 real_base;
881 	__be32 real_size;
882 	__be32 virt_base;
883 	__be32 virt_size;
884 	__be32 load_base;
885 	__be32 min_rma;
886 	__be32 min_load;
887 	u8 min_rma_percent;
888 	u8 max_pft_size;
889 } __packed;
890 
891 struct option_vector3 {
892 	u8 byte1;
893 	u8 byte2;
894 } __packed;
895 
896 struct option_vector4 {
897 	u8 byte1;
898 	u8 min_vp_cap;
899 } __packed;
900 
901 struct option_vector5 {
902 	u8 byte1;
903 	u8 byte2;
904 	u8 byte3;
905 	u8 cmo;
906 	u8 associativity;
907 	u8 bin_opts;
908 	u8 micro_checkpoint;
909 	u8 reserved0;
910 	__be32 max_cpus;
911 	__be16 papr_level;
912 	__be16 reserved1;
913 	u8 platform_facilities;
914 	u8 reserved2;
915 	__be16 reserved3;
916 	u8 subprocessors;
917 	u8 byte22;
918 	u8 intarch;
919 	u8 mmu;
920 	u8 hash_ext;
921 	u8 radix_ext;
922 } __packed;
923 
924 struct option_vector6 {
925 	u8 reserved;
926 	u8 secondary_pteg;
927 	u8 os_name;
928 } __packed;
929 
930 struct ibm_arch_vec {
931 	struct { u32 mask, val; } pvrs[14];
932 
933 	u8 num_vectors;
934 
935 	u8 vec1_len;
936 	struct option_vector1 vec1;
937 
938 	u8 vec2_len;
939 	struct option_vector2 vec2;
940 
941 	u8 vec3_len;
942 	struct option_vector3 vec3;
943 
944 	u8 vec4_len;
945 	struct option_vector4 vec4;
946 
947 	u8 vec5_len;
948 	struct option_vector5 vec5;
949 
950 	u8 vec6_len;
951 	struct option_vector6 vec6;
952 } __packed;
953 
954 static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
955 	.pvrs = {
956 		{
957 			.mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
958 			.val  = cpu_to_be32(0x003a0000),
959 		},
960 		{
961 			.mask = cpu_to_be32(0xffff0000), /* POWER6 */
962 			.val  = cpu_to_be32(0x003e0000),
963 		},
964 		{
965 			.mask = cpu_to_be32(0xffff0000), /* POWER7 */
966 			.val  = cpu_to_be32(0x003f0000),
967 		},
968 		{
969 			.mask = cpu_to_be32(0xffff0000), /* POWER8E */
970 			.val  = cpu_to_be32(0x004b0000),
971 		},
972 		{
973 			.mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
974 			.val  = cpu_to_be32(0x004c0000),
975 		},
976 		{
977 			.mask = cpu_to_be32(0xffff0000), /* POWER8 */
978 			.val  = cpu_to_be32(0x004d0000),
979 		},
980 		{
981 			.mask = cpu_to_be32(0xffff0000), /* POWER9 */
982 			.val  = cpu_to_be32(0x004e0000),
983 		},
984 		{
985 			.mask = cpu_to_be32(0xffff0000), /* POWER10 */
986 			.val  = cpu_to_be32(0x00800000),
987 		},
988 		{
989 			.mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
990 			.val  = cpu_to_be32(0x0f000006),
991 		},
992 		{
993 			.mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
994 			.val  = cpu_to_be32(0x0f000005),
995 		},
996 		{
997 			.mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
998 			.val  = cpu_to_be32(0x0f000004),
999 		},
1000 		{
1001 			.mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1002 			.val  = cpu_to_be32(0x0f000003),
1003 		},
1004 		{
1005 			.mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1006 			.val  = cpu_to_be32(0x0f000002),
1007 		},
1008 		{
1009 			.mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1010 			.val  = cpu_to_be32(0x0f000001),
1011 		},
1012 	},
1013 
1014 	.num_vectors = NUM_VECTORS(6),
1015 
1016 	.vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1017 	.vec1 = {
1018 		.byte1 = 0,
1019 		.arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1020 				 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1021 		.arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1022 	},
1023 
1024 	.vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1025 	/* option vector 2: Open Firmware options supported */
1026 	.vec2 = {
1027 		.byte1 = OV2_REAL_MODE,
1028 		.reserved = 0,
1029 		.real_base = cpu_to_be32(0xffffffff),
1030 		.real_size = cpu_to_be32(0xffffffff),
1031 		.virt_base = cpu_to_be32(0xffffffff),
1032 		.virt_size = cpu_to_be32(0xffffffff),
1033 		.load_base = cpu_to_be32(0xffffffff),
1034 		.min_rma = cpu_to_be32(512),		/* 512MB min RMA */
1035 		.min_load = cpu_to_be32(0xffffffff),	/* full client load */
1036 		.min_rma_percent = 0,	/* min RMA percentage of total RAM */
1037 		.max_pft_size = 48,	/* max log_2(hash table size) */
1038 	},
1039 
1040 	.vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1041 	/* option vector 3: processor options supported */
1042 	.vec3 = {
1043 		.byte1 = 0,			/* don't ignore, don't halt */
1044 		.byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1045 	},
1046 
1047 	.vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1048 	/* option vector 4: IBM PAPR implementation */
1049 	.vec4 = {
1050 		.byte1 = 0,			/* don't halt */
1051 		.min_vp_cap = OV4_MIN_ENT_CAP,	/* minimum VP entitled capacity */
1052 	},
1053 
1054 	.vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1055 	/* option vector 5: PAPR/OF options */
1056 	.vec5 = {
1057 		.byte1 = 0,				/* don't ignore, don't halt */
1058 		.byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1059 		OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1060 #ifdef CONFIG_PCI_MSI
1061 		/* PCIe/MSI support.  Without MSI full PCIe is not supported */
1062 		OV5_FEAT(OV5_MSI),
1063 #else
1064 		0,
1065 #endif
1066 		.byte3 = 0,
1067 		.cmo =
1068 #ifdef CONFIG_PPC_SMLPAR
1069 		OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1070 #else
1071 		0,
1072 #endif
1073 		.associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
1074 		.bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1075 		.micro_checkpoint = 0,
1076 		.reserved0 = 0,
1077 		.max_cpus = cpu_to_be32(NR_CPUS),	/* number of cores supported */
1078 		.papr_level = 0,
1079 		.reserved1 = 0,
1080 		.platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1081 		.reserved2 = 0,
1082 		.reserved3 = 0,
1083 		.subprocessors = 1,
1084 		.byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1085 		.intarch = 0,
1086 		.mmu = 0,
1087 		.hash_ext = 0,
1088 		.radix_ext = 0,
1089 	},
1090 
1091 	/* option vector 6: IBM PAPR hints */
1092 	.vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1093 	.vec6 = {
1094 		.reserved = 0,
1095 		.secondary_pteg = 0,
1096 		.os_name = OV6_LINUX,
1097 	},
1098 };
1099 
1100 static struct ibm_arch_vec __prombss ibm_architecture_vec  ____cacheline_aligned;
1101 
1102 /* Old method - ELF header with PT_NOTE sections only works on BE */
1103 #ifdef __BIG_ENDIAN__
1104 static const struct fake_elf {
1105 	Elf32_Ehdr	elfhdr;
1106 	Elf32_Phdr	phdr[2];
1107 	struct chrpnote {
1108 		u32	namesz;
1109 		u32	descsz;
1110 		u32	type;
1111 		char	name[8];	/* "PowerPC" */
1112 		struct chrpdesc {
1113 			u32	real_mode;
1114 			u32	real_base;
1115 			u32	real_size;
1116 			u32	virt_base;
1117 			u32	virt_size;
1118 			u32	load_base;
1119 		} chrpdesc;
1120 	} chrpnote;
1121 	struct rpanote {
1122 		u32	namesz;
1123 		u32	descsz;
1124 		u32	type;
1125 		char	name[24];	/* "IBM,RPA-Client-Config" */
1126 		struct rpadesc {
1127 			u32	lpar_affinity;
1128 			u32	min_rmo_size;
1129 			u32	min_rmo_percent;
1130 			u32	max_pft_size;
1131 			u32	splpar;
1132 			u32	min_load;
1133 			u32	new_mem_def;
1134 			u32	ignore_me;
1135 		} rpadesc;
1136 	} rpanote;
1137 } fake_elf __initconst = {
1138 	.elfhdr = {
1139 		.e_ident = { 0x7f, 'E', 'L', 'F',
1140 			     ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1141 		.e_type = ET_EXEC,	/* yeah right */
1142 		.e_machine = EM_PPC,
1143 		.e_version = EV_CURRENT,
1144 		.e_phoff = offsetof(struct fake_elf, phdr),
1145 		.e_phentsize = sizeof(Elf32_Phdr),
1146 		.e_phnum = 2
1147 	},
1148 	.phdr = {
1149 		[0] = {
1150 			.p_type = PT_NOTE,
1151 			.p_offset = offsetof(struct fake_elf, chrpnote),
1152 			.p_filesz = sizeof(struct chrpnote)
1153 		}, [1] = {
1154 			.p_type = PT_NOTE,
1155 			.p_offset = offsetof(struct fake_elf, rpanote),
1156 			.p_filesz = sizeof(struct rpanote)
1157 		}
1158 	},
1159 	.chrpnote = {
1160 		.namesz = sizeof("PowerPC"),
1161 		.descsz = sizeof(struct chrpdesc),
1162 		.type = 0x1275,
1163 		.name = "PowerPC",
1164 		.chrpdesc = {
1165 			.real_mode = ~0U,	/* ~0 means "don't care" */
1166 			.real_base = ~0U,
1167 			.real_size = ~0U,
1168 			.virt_base = ~0U,
1169 			.virt_size = ~0U,
1170 			.load_base = ~0U
1171 		},
1172 	},
1173 	.rpanote = {
1174 		.namesz = sizeof("IBM,RPA-Client-Config"),
1175 		.descsz = sizeof(struct rpadesc),
1176 		.type = 0x12759999,
1177 		.name = "IBM,RPA-Client-Config",
1178 		.rpadesc = {
1179 			.lpar_affinity = 0,
1180 			.min_rmo_size = 64,	/* in megabytes */
1181 			.min_rmo_percent = 0,
1182 			.max_pft_size = 48,	/* 2^48 bytes max PFT size */
1183 			.splpar = 1,
1184 			.min_load = ~0U,
1185 			.new_mem_def = 0
1186 		}
1187 	}
1188 };
1189 #endif /* __BIG_ENDIAN__ */
1190 
prom_count_smt_threads(void)1191 static int __init prom_count_smt_threads(void)
1192 {
1193 	phandle node;
1194 	char type[64];
1195 	unsigned int plen;
1196 
1197 	/* Pick up th first CPU node we can find */
1198 	for (node = 0; prom_next_node(&node); ) {
1199 		type[0] = 0;
1200 		prom_getprop(node, "device_type", type, sizeof(type));
1201 
1202 		if (prom_strcmp(type, "cpu"))
1203 			continue;
1204 		/*
1205 		 * There is an entry for each smt thread, each entry being
1206 		 * 4 bytes long.  All cpus should have the same number of
1207 		 * smt threads, so return after finding the first.
1208 		 */
1209 		plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1210 		if (plen == PROM_ERROR)
1211 			break;
1212 		plen >>= 2;
1213 		prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1214 
1215 		/* Sanity check */
1216 		if (plen < 1 || plen > 64) {
1217 			prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1218 				    (unsigned long)plen);
1219 			return 1;
1220 		}
1221 		return plen;
1222 	}
1223 	prom_debug("No threads found, assuming 1 per core\n");
1224 
1225 	return 1;
1226 
1227 }
1228 
prom_parse_mmu_model(u8 val,struct platform_support * support)1229 static void __init prom_parse_mmu_model(u8 val,
1230 					struct platform_support *support)
1231 {
1232 	switch (val) {
1233 	case OV5_FEAT(OV5_MMU_DYNAMIC):
1234 	case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1235 		prom_debug("MMU - either supported\n");
1236 		support->radix_mmu = !prom_radix_disable;
1237 		support->hash_mmu = true;
1238 		break;
1239 	case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1240 		prom_debug("MMU - radix only\n");
1241 		if (prom_radix_disable) {
1242 			/*
1243 			 * If we __have__ to do radix, we're better off ignoring
1244 			 * the command line rather than not booting.
1245 			 */
1246 			prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1247 		}
1248 		support->radix_mmu = true;
1249 		break;
1250 	case OV5_FEAT(OV5_MMU_HASH):
1251 		prom_debug("MMU - hash only\n");
1252 		support->hash_mmu = true;
1253 		break;
1254 	default:
1255 		prom_debug("Unknown mmu support option: 0x%x\n", val);
1256 		break;
1257 	}
1258 }
1259 
prom_parse_xive_model(u8 val,struct platform_support * support)1260 static void __init prom_parse_xive_model(u8 val,
1261 					 struct platform_support *support)
1262 {
1263 	switch (val) {
1264 	case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1265 		prom_debug("XIVE - either mode supported\n");
1266 		support->xive = !prom_xive_disable;
1267 		break;
1268 	case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1269 		prom_debug("XIVE - exploitation mode supported\n");
1270 		if (prom_xive_disable) {
1271 			/*
1272 			 * If we __have__ to do XIVE, we're better off ignoring
1273 			 * the command line rather than not booting.
1274 			 */
1275 			prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1276 		}
1277 		support->xive = true;
1278 		break;
1279 	case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1280 		prom_debug("XIVE - legacy mode supported\n");
1281 		break;
1282 	default:
1283 		prom_debug("Unknown xive support option: 0x%x\n", val);
1284 		break;
1285 	}
1286 }
1287 
prom_parse_platform_support(u8 index,u8 val,struct platform_support * support)1288 static void __init prom_parse_platform_support(u8 index, u8 val,
1289 					       struct platform_support *support)
1290 {
1291 	switch (index) {
1292 	case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1293 		prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1294 		break;
1295 	case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1296 		if (val & OV5_FEAT(OV5_RADIX_GTSE))
1297 			support->radix_gtse = !prom_radix_gtse_disable;
1298 		break;
1299 	case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1300 		prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1301 				      support);
1302 		break;
1303 	}
1304 }
1305 
prom_check_platform_support(void)1306 static void __init prom_check_platform_support(void)
1307 {
1308 	struct platform_support supported = {
1309 		.hash_mmu = false,
1310 		.radix_mmu = false,
1311 		.radix_gtse = false,
1312 		.xive = false
1313 	};
1314 	int prop_len = prom_getproplen(prom.chosen,
1315 				       "ibm,arch-vec-5-platform-support");
1316 
1317 	/*
1318 	 * First copy the architecture vec template
1319 	 *
1320 	 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1321 	 * by __memcpy() when KASAN is active
1322 	 */
1323 	memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1324 	       sizeof(ibm_architecture_vec));
1325 
1326 	if (prop_len > 1) {
1327 		int i;
1328 		u8 vec[8];
1329 		prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1330 			   prop_len);
1331 		if (prop_len > sizeof(vec))
1332 			prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1333 				    prop_len);
1334 		prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
1335 		for (i = 0; i < prop_len; i += 2) {
1336 			prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
1337 			prom_parse_platform_support(vec[i], vec[i + 1], &supported);
1338 		}
1339 	}
1340 
1341 	if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1342 		/* Radix preferred - Check if GTSE is also supported */
1343 		prom_debug("Asking for radix\n");
1344 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1345 		if (supported.radix_gtse)
1346 			ibm_architecture_vec.vec5.radix_ext =
1347 					OV5_FEAT(OV5_RADIX_GTSE);
1348 		else
1349 			prom_debug("Radix GTSE isn't supported\n");
1350 	} else if (supported.hash_mmu) {
1351 		/* Default to hash mmu (if we can) */
1352 		prom_debug("Asking for hash\n");
1353 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1354 	} else {
1355 		/* We're probably on a legacy hypervisor */
1356 		prom_debug("Assuming legacy hash support\n");
1357 	}
1358 
1359 	if (supported.xive) {
1360 		prom_debug("Asking for XIVE\n");
1361 		ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1362 	}
1363 }
1364 
prom_send_capabilities(void)1365 static void __init prom_send_capabilities(void)
1366 {
1367 	ihandle root;
1368 	prom_arg_t ret;
1369 	u32 cores;
1370 
1371 	/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1372 	prom_check_platform_support();
1373 
1374 	root = call_prom("open", 1, 1, ADDR("/"));
1375 	if (root != 0) {
1376 		/* We need to tell the FW about the number of cores we support.
1377 		 *
1378 		 * To do that, we count the number of threads on the first core
1379 		 * (we assume this is the same for all cores) and use it to
1380 		 * divide NR_CPUS.
1381 		 */
1382 
1383 		cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1384 		prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1385 			    cores, NR_CPUS);
1386 
1387 		ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1388 
1389 		/* try calling the ibm,client-architecture-support method */
1390 		prom_printf("Calling ibm,client-architecture-support...");
1391 		if (call_prom_ret("call-method", 3, 2, &ret,
1392 				  ADDR("ibm,client-architecture-support"),
1393 				  root,
1394 				  ADDR(&ibm_architecture_vec)) == 0) {
1395 			/* the call exists... */
1396 			if (ret)
1397 				prom_printf("\nWARNING: ibm,client-architecture"
1398 					    "-support call FAILED!\n");
1399 			call_prom("close", 1, 0, root);
1400 			prom_printf(" done\n");
1401 			return;
1402 		}
1403 		call_prom("close", 1, 0, root);
1404 		prom_printf(" not implemented\n");
1405 	}
1406 
1407 #ifdef __BIG_ENDIAN__
1408 	{
1409 		ihandle elfloader;
1410 
1411 		/* no ibm,client-architecture-support call, try the old way */
1412 		elfloader = call_prom("open", 1, 1,
1413 				      ADDR("/packages/elf-loader"));
1414 		if (elfloader == 0) {
1415 			prom_printf("couldn't open /packages/elf-loader\n");
1416 			return;
1417 		}
1418 		call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1419 			  elfloader, ADDR(&fake_elf));
1420 		call_prom("close", 1, 0, elfloader);
1421 	}
1422 #endif /* __BIG_ENDIAN__ */
1423 }
1424 #endif /* CONFIG_PPC_PSERIES */
1425 
1426 /*
1427  * Memory allocation strategy... our layout is normally:
1428  *
1429  *  at 14Mb or more we have vmlinux, then a gap and initrd.  In some
1430  *  rare cases, initrd might end up being before the kernel though.
1431  *  We assume this won't override the final kernel at 0, we have no
1432  *  provision to handle that in this version, but it should hopefully
1433  *  never happen.
1434  *
1435  *  alloc_top is set to the top of RMO, eventually shrink down if the
1436  *  TCEs overlap
1437  *
1438  *  alloc_bottom is set to the top of kernel/initrd
1439  *
1440  *  from there, allocations are done this way : rtas is allocated
1441  *  topmost, and the device-tree is allocated from the bottom. We try
1442  *  to grow the device-tree allocation as we progress. If we can't,
1443  *  then we fail, we don't currently have a facility to restart
1444  *  elsewhere, but that shouldn't be necessary.
1445  *
1446  *  Note that calls to reserve_mem have to be done explicitly, memory
1447  *  allocated with either alloc_up or alloc_down isn't automatically
1448  *  reserved.
1449  */
1450 
1451 
1452 /*
1453  * Allocates memory in the RMO upward from the kernel/initrd
1454  *
1455  * When align is 0, this is a special case, it means to allocate in place
1456  * at the current location of alloc_bottom or fail (that is basically
1457  * extending the previous allocation). Used for the device-tree flattening
1458  */
alloc_up(unsigned long size,unsigned long align)1459 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1460 {
1461 	unsigned long base = alloc_bottom;
1462 	unsigned long addr = 0;
1463 
1464 	if (align)
1465 		base = ALIGN(base, align);
1466 	prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1467 	if (ram_top == 0)
1468 		prom_panic("alloc_up() called with mem not initialized\n");
1469 
1470 	if (align)
1471 		base = ALIGN(alloc_bottom, align);
1472 	else
1473 		base = alloc_bottom;
1474 
1475 	for(; (base + size) <= alloc_top;
1476 	    base = ALIGN(base + 0x100000, align)) {
1477 		prom_debug("    trying: 0x%lx\n\r", base);
1478 		addr = (unsigned long)prom_claim(base, size, 0);
1479 		if (addr != PROM_ERROR && addr != 0)
1480 			break;
1481 		addr = 0;
1482 		if (align == 0)
1483 			break;
1484 	}
1485 	if (addr == 0)
1486 		return 0;
1487 	alloc_bottom = addr + size;
1488 
1489 	prom_debug(" -> %lx\n", addr);
1490 	prom_debug("  alloc_bottom : %lx\n", alloc_bottom);
1491 	prom_debug("  alloc_top    : %lx\n", alloc_top);
1492 	prom_debug("  alloc_top_hi : %lx\n", alloc_top_high);
1493 	prom_debug("  rmo_top      : %lx\n", rmo_top);
1494 	prom_debug("  ram_top      : %lx\n", ram_top);
1495 
1496 	return addr;
1497 }
1498 
1499 /*
1500  * Allocates memory downward, either from top of RMO, or if highmem
1501  * is set, from the top of RAM.  Note that this one doesn't handle
1502  * failures.  It does claim memory if highmem is not set.
1503  */
alloc_down(unsigned long size,unsigned long align,int highmem)1504 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1505 				       int highmem)
1506 {
1507 	unsigned long base, addr = 0;
1508 
1509 	prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1510 		   highmem ? "(high)" : "(low)");
1511 	if (ram_top == 0)
1512 		prom_panic("alloc_down() called with mem not initialized\n");
1513 
1514 	if (highmem) {
1515 		/* Carve out storage for the TCE table. */
1516 		addr = ALIGN_DOWN(alloc_top_high - size, align);
1517 		if (addr <= alloc_bottom)
1518 			return 0;
1519 		/* Will we bump into the RMO ? If yes, check out that we
1520 		 * didn't overlap existing allocations there, if we did,
1521 		 * we are dead, we must be the first in town !
1522 		 */
1523 		if (addr < rmo_top) {
1524 			/* Good, we are first */
1525 			if (alloc_top == rmo_top)
1526 				alloc_top = rmo_top = addr;
1527 			else
1528 				return 0;
1529 		}
1530 		alloc_top_high = addr;
1531 		goto bail;
1532 	}
1533 
1534 	base = ALIGN_DOWN(alloc_top - size, align);
1535 	for (; base > alloc_bottom;
1536 	     base = ALIGN_DOWN(base - 0x100000, align))  {
1537 		prom_debug("    trying: 0x%lx\n\r", base);
1538 		addr = (unsigned long)prom_claim(base, size, 0);
1539 		if (addr != PROM_ERROR && addr != 0)
1540 			break;
1541 		addr = 0;
1542 	}
1543 	if (addr == 0)
1544 		return 0;
1545 	alloc_top = addr;
1546 
1547  bail:
1548 	prom_debug(" -> %lx\n", addr);
1549 	prom_debug("  alloc_bottom : %lx\n", alloc_bottom);
1550 	prom_debug("  alloc_top    : %lx\n", alloc_top);
1551 	prom_debug("  alloc_top_hi : %lx\n", alloc_top_high);
1552 	prom_debug("  rmo_top      : %lx\n", rmo_top);
1553 	prom_debug("  ram_top      : %lx\n", ram_top);
1554 
1555 	return addr;
1556 }
1557 
1558 /*
1559  * Parse a "reg" cell
1560  */
prom_next_cell(int s,cell_t ** cellp)1561 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1562 {
1563 	cell_t *p = *cellp;
1564 	unsigned long r = 0;
1565 
1566 	/* Ignore more than 2 cells */
1567 	while (s > sizeof(unsigned long) / 4) {
1568 		p++;
1569 		s--;
1570 	}
1571 	r = be32_to_cpu(*p++);
1572 #ifdef CONFIG_PPC64
1573 	if (s > 1) {
1574 		r <<= 32;
1575 		r |= be32_to_cpu(*(p++));
1576 	}
1577 #endif
1578 	*cellp = p;
1579 	return r;
1580 }
1581 
1582 /*
1583  * Very dumb function for adding to the memory reserve list, but
1584  * we don't need anything smarter at this point
1585  *
1586  * XXX Eventually check for collisions.  They should NEVER happen.
1587  * If problems seem to show up, it would be a good start to track
1588  * them down.
1589  */
reserve_mem(u64 base,u64 size)1590 static void __init reserve_mem(u64 base, u64 size)
1591 {
1592 	u64 top = base + size;
1593 	unsigned long cnt = mem_reserve_cnt;
1594 
1595 	if (size == 0)
1596 		return;
1597 
1598 	/* We need to always keep one empty entry so that we
1599 	 * have our terminator with "size" set to 0 since we are
1600 	 * dumb and just copy this entire array to the boot params
1601 	 */
1602 	base = ALIGN_DOWN(base, PAGE_SIZE);
1603 	top = ALIGN(top, PAGE_SIZE);
1604 	size = top - base;
1605 
1606 	if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1607 		prom_panic("Memory reserve map exhausted !\n");
1608 	mem_reserve_map[cnt].base = cpu_to_be64(base);
1609 	mem_reserve_map[cnt].size = cpu_to_be64(size);
1610 	mem_reserve_cnt = cnt + 1;
1611 }
1612 
1613 /*
1614  * Initialize memory allocation mechanism, parse "memory" nodes and
1615  * obtain that way the top of memory and RMO to setup out local allocator
1616  */
prom_init_mem(void)1617 static void __init prom_init_mem(void)
1618 {
1619 	phandle node;
1620 	char type[64];
1621 	unsigned int plen;
1622 	cell_t *p, *endp;
1623 	__be32 val;
1624 	u32 rac, rsc;
1625 
1626 	/*
1627 	 * We iterate the memory nodes to find
1628 	 * 1) top of RMO (first node)
1629 	 * 2) top of memory
1630 	 */
1631 	val = cpu_to_be32(2);
1632 	prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1633 	rac = be32_to_cpu(val);
1634 	val = cpu_to_be32(1);
1635 	prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1636 	rsc = be32_to_cpu(val);
1637 	prom_debug("root_addr_cells: %x\n", rac);
1638 	prom_debug("root_size_cells: %x\n", rsc);
1639 
1640 	prom_debug("scanning memory:\n");
1641 
1642 	for (node = 0; prom_next_node(&node); ) {
1643 		type[0] = 0;
1644 		prom_getprop(node, "device_type", type, sizeof(type));
1645 
1646 		if (type[0] == 0) {
1647 			/*
1648 			 * CHRP Longtrail machines have no device_type
1649 			 * on the memory node, so check the name instead...
1650 			 */
1651 			prom_getprop(node, "name", type, sizeof(type));
1652 		}
1653 		if (prom_strcmp(type, "memory"))
1654 			continue;
1655 
1656 		plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1657 		if (plen > sizeof(regbuf)) {
1658 			prom_printf("memory node too large for buffer !\n");
1659 			plen = sizeof(regbuf);
1660 		}
1661 		p = regbuf;
1662 		endp = p + (plen / sizeof(cell_t));
1663 
1664 #ifdef DEBUG_PROM
1665 		memset(prom_scratch, 0, sizeof(prom_scratch));
1666 		call_prom("package-to-path", 3, 1, node, prom_scratch,
1667 			  sizeof(prom_scratch) - 1);
1668 		prom_debug("  node %s :\n", prom_scratch);
1669 #endif /* DEBUG_PROM */
1670 
1671 		while ((endp - p) >= (rac + rsc)) {
1672 			unsigned long base, size;
1673 
1674 			base = prom_next_cell(rac, &p);
1675 			size = prom_next_cell(rsc, &p);
1676 
1677 			if (size == 0)
1678 				continue;
1679 			prom_debug("    %lx %lx\n", base, size);
1680 			if (base == 0 && (of_platform & PLATFORM_LPAR))
1681 				rmo_top = size;
1682 			if ((base + size) > ram_top)
1683 				ram_top = base + size;
1684 		}
1685 	}
1686 
1687 	alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1688 
1689 	/*
1690 	 * If prom_memory_limit is set we reduce the upper limits *except* for
1691 	 * alloc_top_high. This must be the real top of RAM so we can put
1692 	 * TCE's up there.
1693 	 */
1694 
1695 	alloc_top_high = ram_top;
1696 
1697 	if (prom_memory_limit) {
1698 		if (prom_memory_limit <= alloc_bottom) {
1699 			prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1700 				    prom_memory_limit);
1701 			prom_memory_limit = 0;
1702 		} else if (prom_memory_limit >= ram_top) {
1703 			prom_printf("Ignoring mem=%lx >= ram_top.\n",
1704 				    prom_memory_limit);
1705 			prom_memory_limit = 0;
1706 		} else {
1707 			ram_top = prom_memory_limit;
1708 			rmo_top = min(rmo_top, prom_memory_limit);
1709 		}
1710 	}
1711 
1712 	/*
1713 	 * Setup our top alloc point, that is top of RMO or top of
1714 	 * segment 0 when running non-LPAR.
1715 	 * Some RS64 machines have buggy firmware where claims up at
1716 	 * 1GB fail.  Cap at 768MB as a workaround.
1717 	 * Since 768MB is plenty of room, and we need to cap to something
1718 	 * reasonable on 32-bit, cap at 768MB on all machines.
1719 	 */
1720 	if (!rmo_top)
1721 		rmo_top = ram_top;
1722 	rmo_top = min(0x30000000ul, rmo_top);
1723 	alloc_top = rmo_top;
1724 	alloc_top_high = ram_top;
1725 
1726 	/*
1727 	 * Check if we have an initrd after the kernel but still inside
1728 	 * the RMO.  If we do move our bottom point to after it.
1729 	 */
1730 	if (prom_initrd_start &&
1731 	    prom_initrd_start < rmo_top &&
1732 	    prom_initrd_end > alloc_bottom)
1733 		alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1734 
1735 	prom_printf("memory layout at init:\n");
1736 	prom_printf("  memory_limit : %lx (16 MB aligned)\n",
1737 		    prom_memory_limit);
1738 	prom_printf("  alloc_bottom : %lx\n", alloc_bottom);
1739 	prom_printf("  alloc_top    : %lx\n", alloc_top);
1740 	prom_printf("  alloc_top_hi : %lx\n", alloc_top_high);
1741 	prom_printf("  rmo_top      : %lx\n", rmo_top);
1742 	prom_printf("  ram_top      : %lx\n", ram_top);
1743 }
1744 
prom_close_stdin(void)1745 static void __init prom_close_stdin(void)
1746 {
1747 	__be32 val;
1748 	ihandle stdin;
1749 
1750 	if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1751 		stdin = be32_to_cpu(val);
1752 		call_prom("close", 1, 0, stdin);
1753 	}
1754 }
1755 
1756 #ifdef CONFIG_PPC_SVM
prom_rtas_hcall(uint64_t args)1757 static int prom_rtas_hcall(uint64_t args)
1758 {
1759 	register uint64_t arg1 asm("r3") = H_RTAS;
1760 	register uint64_t arg2 asm("r4") = args;
1761 
1762 	asm volatile("sc 1\n" : "=r" (arg1) :
1763 			"r" (arg1),
1764 			"r" (arg2) :);
1765 	return arg1;
1766 }
1767 
1768 static struct rtas_args __prombss os_term_args;
1769 
prom_rtas_os_term(char * str)1770 static void __init prom_rtas_os_term(char *str)
1771 {
1772 	phandle rtas_node;
1773 	__be32 val;
1774 	u32 token;
1775 
1776 	prom_debug("%s: start...\n", __func__);
1777 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1778 	prom_debug("rtas_node: %x\n", rtas_node);
1779 	if (!PHANDLE_VALID(rtas_node))
1780 		return;
1781 
1782 	val = 0;
1783 	prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1784 	token = be32_to_cpu(val);
1785 	prom_debug("ibm,os-term: %x\n", token);
1786 	if (token == 0)
1787 		prom_panic("Could not get token for ibm,os-term\n");
1788 	os_term_args.token = cpu_to_be32(token);
1789 	os_term_args.nargs = cpu_to_be32(1);
1790 	os_term_args.nret = cpu_to_be32(1);
1791 	os_term_args.args[0] = cpu_to_be32(__pa(str));
1792 	prom_rtas_hcall((uint64_t)&os_term_args);
1793 }
1794 #endif /* CONFIG_PPC_SVM */
1795 
1796 /*
1797  * Allocate room for and instantiate RTAS
1798  */
prom_instantiate_rtas(void)1799 static void __init prom_instantiate_rtas(void)
1800 {
1801 	phandle rtas_node;
1802 	ihandle rtas_inst;
1803 	u32 base, entry = 0;
1804 	__be32 val;
1805 	u32 size = 0;
1806 
1807 	prom_debug("prom_instantiate_rtas: start...\n");
1808 
1809 	rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1810 	prom_debug("rtas_node: %x\n", rtas_node);
1811 	if (!PHANDLE_VALID(rtas_node))
1812 		return;
1813 
1814 	val = 0;
1815 	prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1816 	size = be32_to_cpu(val);
1817 	if (size == 0)
1818 		return;
1819 
1820 	base = alloc_down(size, PAGE_SIZE, 0);
1821 	if (base == 0)
1822 		prom_panic("Could not allocate memory for RTAS\n");
1823 
1824 	rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1825 	if (!IHANDLE_VALID(rtas_inst)) {
1826 		prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1827 		return;
1828 	}
1829 
1830 	prom_printf("instantiating rtas at 0x%x...", base);
1831 
1832 	if (call_prom_ret("call-method", 3, 2, &entry,
1833 			  ADDR("instantiate-rtas"),
1834 			  rtas_inst, base) != 0
1835 	    || entry == 0) {
1836 		prom_printf(" failed\n");
1837 		return;
1838 	}
1839 	prom_printf(" done\n");
1840 
1841 	reserve_mem(base, size);
1842 
1843 	val = cpu_to_be32(base);
1844 	prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1845 		     &val, sizeof(val));
1846 	val = cpu_to_be32(entry);
1847 	prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1848 		     &val, sizeof(val));
1849 
1850 	/* Check if it supports "query-cpu-stopped-state" */
1851 	if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1852 			 &val, sizeof(val)) != PROM_ERROR)
1853 		rtas_has_query_cpu_stopped = true;
1854 
1855 	prom_debug("rtas base     = 0x%x\n", base);
1856 	prom_debug("rtas entry    = 0x%x\n", entry);
1857 	prom_debug("rtas size     = 0x%x\n", size);
1858 
1859 	prom_debug("prom_instantiate_rtas: end...\n");
1860 }
1861 
1862 #ifdef CONFIG_PPC64
1863 /*
1864  * Allocate room for and instantiate Stored Measurement Log (SML)
1865  */
prom_instantiate_sml(void)1866 static void __init prom_instantiate_sml(void)
1867 {
1868 	phandle ibmvtpm_node;
1869 	ihandle ibmvtpm_inst;
1870 	u32 entry = 0, size = 0, succ = 0;
1871 	u64 base;
1872 	__be32 val;
1873 
1874 	prom_debug("prom_instantiate_sml: start...\n");
1875 
1876 	ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1877 	prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1878 	if (!PHANDLE_VALID(ibmvtpm_node))
1879 		return;
1880 
1881 	ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1882 	if (!IHANDLE_VALID(ibmvtpm_inst)) {
1883 		prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1884 		return;
1885 	}
1886 
1887 	if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1888 			 &val, sizeof(val)) != PROM_ERROR) {
1889 		if (call_prom_ret("call-method", 2, 2, &succ,
1890 				  ADDR("reformat-sml-to-efi-alignment"),
1891 				  ibmvtpm_inst) != 0 || succ == 0) {
1892 			prom_printf("Reformat SML to EFI alignment failed\n");
1893 			return;
1894 		}
1895 
1896 		if (call_prom_ret("call-method", 2, 2, &size,
1897 				  ADDR("sml-get-allocated-size"),
1898 				  ibmvtpm_inst) != 0 || size == 0) {
1899 			prom_printf("SML get allocated size failed\n");
1900 			return;
1901 		}
1902 	} else {
1903 		if (call_prom_ret("call-method", 2, 2, &size,
1904 				  ADDR("sml-get-handover-size"),
1905 				  ibmvtpm_inst) != 0 || size == 0) {
1906 			prom_printf("SML get handover size failed\n");
1907 			return;
1908 		}
1909 	}
1910 
1911 	base = alloc_down(size, PAGE_SIZE, 0);
1912 	if (base == 0)
1913 		prom_panic("Could not allocate memory for sml\n");
1914 
1915 	prom_printf("instantiating sml at 0x%llx...", base);
1916 
1917 	memset((void *)base, 0, size);
1918 
1919 	if (call_prom_ret("call-method", 4, 2, &entry,
1920 			  ADDR("sml-handover"),
1921 			  ibmvtpm_inst, size, base) != 0 || entry == 0) {
1922 		prom_printf("SML handover failed\n");
1923 		return;
1924 	}
1925 	prom_printf(" done\n");
1926 
1927 	reserve_mem(base, size);
1928 
1929 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1930 		     &base, sizeof(base));
1931 	prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1932 		     &size, sizeof(size));
1933 
1934 	prom_debug("sml base     = 0x%llx\n", base);
1935 	prom_debug("sml size     = 0x%x\n", size);
1936 
1937 	prom_debug("prom_instantiate_sml: end...\n");
1938 }
1939 
1940 /*
1941  * Allocate room for and initialize TCE tables
1942  */
1943 #ifdef __BIG_ENDIAN__
prom_initialize_tce_table(void)1944 static void __init prom_initialize_tce_table(void)
1945 {
1946 	phandle node;
1947 	ihandle phb_node;
1948 	char compatible[64], type[64], model[64];
1949 	char *path = prom_scratch;
1950 	u64 base, align;
1951 	u32 minalign, minsize;
1952 	u64 tce_entry, *tce_entryp;
1953 	u64 local_alloc_top, local_alloc_bottom;
1954 	u64 i;
1955 
1956 	if (prom_iommu_off)
1957 		return;
1958 
1959 	prom_debug("starting prom_initialize_tce_table\n");
1960 
1961 	/* Cache current top of allocs so we reserve a single block */
1962 	local_alloc_top = alloc_top_high;
1963 	local_alloc_bottom = local_alloc_top;
1964 
1965 	/* Search all nodes looking for PHBs. */
1966 	for (node = 0; prom_next_node(&node); ) {
1967 		compatible[0] = 0;
1968 		type[0] = 0;
1969 		model[0] = 0;
1970 		prom_getprop(node, "compatible",
1971 			     compatible, sizeof(compatible));
1972 		prom_getprop(node, "device_type", type, sizeof(type));
1973 		prom_getprop(node, "model", model, sizeof(model));
1974 
1975 		if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
1976 			continue;
1977 
1978 		/* Keep the old logic intact to avoid regression. */
1979 		if (compatible[0] != 0) {
1980 			if ((prom_strstr(compatible, "python") == NULL) &&
1981 			    (prom_strstr(compatible, "Speedwagon") == NULL) &&
1982 			    (prom_strstr(compatible, "Winnipeg") == NULL))
1983 				continue;
1984 		} else if (model[0] != 0) {
1985 			if ((prom_strstr(model, "ython") == NULL) &&
1986 			    (prom_strstr(model, "peedwagon") == NULL) &&
1987 			    (prom_strstr(model, "innipeg") == NULL))
1988 				continue;
1989 		}
1990 
1991 		if (prom_getprop(node, "tce-table-minalign", &minalign,
1992 				 sizeof(minalign)) == PROM_ERROR)
1993 			minalign = 0;
1994 		if (prom_getprop(node, "tce-table-minsize", &minsize,
1995 				 sizeof(minsize)) == PROM_ERROR)
1996 			minsize = 4UL << 20;
1997 
1998 		/*
1999 		 * Even though we read what OF wants, we just set the table
2000 		 * size to 4 MB.  This is enough to map 2GB of PCI DMA space.
2001 		 * By doing this, we avoid the pitfalls of trying to DMA to
2002 		 * MMIO space and the DMA alias hole.
2003 		 */
2004 		minsize = 4UL << 20;
2005 
2006 		/* Align to the greater of the align or size */
2007 		align = max(minalign, minsize);
2008 		base = alloc_down(minsize, align, 1);
2009 		if (base == 0)
2010 			prom_panic("ERROR, cannot find space for TCE table.\n");
2011 		if (base < local_alloc_bottom)
2012 			local_alloc_bottom = base;
2013 
2014 		/* It seems OF doesn't null-terminate the path :-( */
2015 		memset(path, 0, sizeof(prom_scratch));
2016 		/* Call OF to setup the TCE hardware */
2017 		if (call_prom("package-to-path", 3, 1, node,
2018 			      path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2019 			prom_printf("package-to-path failed\n");
2020 		}
2021 
2022 		/* Save away the TCE table attributes for later use. */
2023 		prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2024 		prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2025 
2026 		prom_debug("TCE table: %s\n", path);
2027 		prom_debug("\tnode = 0x%x\n", node);
2028 		prom_debug("\tbase = 0x%llx\n", base);
2029 		prom_debug("\tsize = 0x%x\n", minsize);
2030 
2031 		/* Initialize the table to have a one-to-one mapping
2032 		 * over the allocated size.
2033 		 */
2034 		tce_entryp = (u64 *)base;
2035 		for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2036 			tce_entry = (i << PAGE_SHIFT);
2037 			tce_entry |= 0x3;
2038 			*tce_entryp = tce_entry;
2039 		}
2040 
2041 		prom_printf("opening PHB %s", path);
2042 		phb_node = call_prom("open", 1, 1, path);
2043 		if (phb_node == 0)
2044 			prom_printf("... failed\n");
2045 		else
2046 			prom_printf("... done\n");
2047 
2048 		call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2049 			  phb_node, -1, minsize,
2050 			  (u32) base, (u32) (base >> 32));
2051 		call_prom("close", 1, 0, phb_node);
2052 	}
2053 
2054 	reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2055 
2056 	/* These are only really needed if there is a memory limit in
2057 	 * effect, but we don't know so export them always. */
2058 	prom_tce_alloc_start = local_alloc_bottom;
2059 	prom_tce_alloc_end = local_alloc_top;
2060 
2061 	/* Flag the first invalid entry */
2062 	prom_debug("ending prom_initialize_tce_table\n");
2063 }
2064 #endif /* __BIG_ENDIAN__ */
2065 #endif /* CONFIG_PPC64 */
2066 
2067 /*
2068  * With CHRP SMP we need to use the OF to start the other processors.
2069  * We can't wait until smp_boot_cpus (the OF is trashed by then)
2070  * so we have to put the processors into a holding pattern controlled
2071  * by the kernel (not OF) before we destroy the OF.
2072  *
2073  * This uses a chunk of low memory, puts some holding pattern
2074  * code there and sends the other processors off to there until
2075  * smp_boot_cpus tells them to do something.  The holding pattern
2076  * checks that address until its cpu # is there, when it is that
2077  * cpu jumps to __secondary_start().  smp_boot_cpus() takes care
2078  * of setting those values.
2079  *
2080  * We also use physical address 0x4 here to tell when a cpu
2081  * is in its holding pattern code.
2082  *
2083  * -- Cort
2084  */
2085 /*
2086  * We want to reference the copy of __secondary_hold_* in the
2087  * 0 - 0x100 address range
2088  */
2089 #define LOW_ADDR(x)	(((unsigned long) &(x)) & 0xff)
2090 
prom_hold_cpus(void)2091 static void __init prom_hold_cpus(void)
2092 {
2093 	unsigned long i;
2094 	phandle node;
2095 	char type[64];
2096 	unsigned long *spinloop
2097 		= (void *) LOW_ADDR(__secondary_hold_spinloop);
2098 	unsigned long *acknowledge
2099 		= (void *) LOW_ADDR(__secondary_hold_acknowledge);
2100 	unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2101 
2102 	/*
2103 	 * On pseries, if RTAS supports "query-cpu-stopped-state",
2104 	 * we skip this stage, the CPUs will be started by the
2105 	 * kernel using RTAS.
2106 	 */
2107 	if ((of_platform == PLATFORM_PSERIES ||
2108 	     of_platform == PLATFORM_PSERIES_LPAR) &&
2109 	    rtas_has_query_cpu_stopped) {
2110 		prom_printf("prom_hold_cpus: skipped\n");
2111 		return;
2112 	}
2113 
2114 	prom_debug("prom_hold_cpus: start...\n");
2115 	prom_debug("    1) spinloop       = 0x%lx\n", (unsigned long)spinloop);
2116 	prom_debug("    1) *spinloop      = 0x%lx\n", *spinloop);
2117 	prom_debug("    1) acknowledge    = 0x%lx\n",
2118 		   (unsigned long)acknowledge);
2119 	prom_debug("    1) *acknowledge   = 0x%lx\n", *acknowledge);
2120 	prom_debug("    1) secondary_hold = 0x%lx\n", secondary_hold);
2121 
2122 	/* Set the common spinloop variable, so all of the secondary cpus
2123 	 * will block when they are awakened from their OF spinloop.
2124 	 * This must occur for both SMP and non SMP kernels, since OF will
2125 	 * be trashed when we move the kernel.
2126 	 */
2127 	*spinloop = 0;
2128 
2129 	/* look for cpus */
2130 	for (node = 0; prom_next_node(&node); ) {
2131 		unsigned int cpu_no;
2132 		__be32 reg;
2133 
2134 		type[0] = 0;
2135 		prom_getprop(node, "device_type", type, sizeof(type));
2136 		if (prom_strcmp(type, "cpu") != 0)
2137 			continue;
2138 
2139 		/* Skip non-configured cpus. */
2140 		if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2141 			if (prom_strcmp(type, "okay") != 0)
2142 				continue;
2143 
2144 		reg = cpu_to_be32(-1); /* make sparse happy */
2145 		prom_getprop(node, "reg", &reg, sizeof(reg));
2146 		cpu_no = be32_to_cpu(reg);
2147 
2148 		prom_debug("cpu hw idx   = %u\n", cpu_no);
2149 
2150 		/* Init the acknowledge var which will be reset by
2151 		 * the secondary cpu when it awakens from its OF
2152 		 * spinloop.
2153 		 */
2154 		*acknowledge = (unsigned long)-1;
2155 
2156 		if (cpu_no != prom.cpu) {
2157 			/* Primary Thread of non-boot cpu or any thread */
2158 			prom_printf("starting cpu hw idx %u... ", cpu_no);
2159 			call_prom("start-cpu", 3, 0, node,
2160 				  secondary_hold, cpu_no);
2161 
2162 			for (i = 0; (i < 100000000) &&
2163 			     (*acknowledge == ((unsigned long)-1)); i++ )
2164 				mb();
2165 
2166 			if (*acknowledge == cpu_no)
2167 				prom_printf("done\n");
2168 			else
2169 				prom_printf("failed: %lx\n", *acknowledge);
2170 		}
2171 #ifdef CONFIG_SMP
2172 		else
2173 			prom_printf("boot cpu hw idx %u\n", cpu_no);
2174 #endif /* CONFIG_SMP */
2175 	}
2176 
2177 	prom_debug("prom_hold_cpus: end...\n");
2178 }
2179 
2180 
prom_init_client_services(unsigned long pp)2181 static void __init prom_init_client_services(unsigned long pp)
2182 {
2183 	/* Get a handle to the prom entry point before anything else */
2184 	prom_entry = pp;
2185 
2186 	/* get a handle for the stdout device */
2187 	prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2188 	if (!PHANDLE_VALID(prom.chosen))
2189 		prom_panic("cannot find chosen"); /* msg won't be printed :( */
2190 
2191 	/* get device tree root */
2192 	prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2193 	if (!PHANDLE_VALID(prom.root))
2194 		prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2195 
2196 	prom.mmumap = 0;
2197 }
2198 
2199 #ifdef CONFIG_PPC32
2200 /*
2201  * For really old powermacs, we need to map things we claim.
2202  * For that, we need the ihandle of the mmu.
2203  * Also, on the longtrail, we need to work around other bugs.
2204  */
prom_find_mmu(void)2205 static void __init prom_find_mmu(void)
2206 {
2207 	phandle oprom;
2208 	char version[64];
2209 
2210 	oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2211 	if (!PHANDLE_VALID(oprom))
2212 		return;
2213 	if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2214 		return;
2215 	version[sizeof(version) - 1] = 0;
2216 	/* XXX might need to add other versions here */
2217 	if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2218 		of_workarounds = OF_WA_CLAIM;
2219 	else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2220 		of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2221 		call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2222 	} else
2223 		return;
2224 	prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2225 	prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2226 		     sizeof(prom.mmumap));
2227 	prom.mmumap = be32_to_cpu(prom.mmumap);
2228 	if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2229 		of_workarounds &= ~OF_WA_CLAIM;		/* hmmm */
2230 }
2231 #else
2232 #define prom_find_mmu()
2233 #endif
2234 
prom_init_stdout(void)2235 static void __init prom_init_stdout(void)
2236 {
2237 	char *path = of_stdout_device;
2238 	char type[16];
2239 	phandle stdout_node;
2240 	__be32 val;
2241 
2242 	if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2243 		prom_panic("cannot find stdout");
2244 
2245 	prom.stdout = be32_to_cpu(val);
2246 
2247 	/* Get the full OF pathname of the stdout device */
2248 	memset(path, 0, 256);
2249 	call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2250 	prom_printf("OF stdout device is: %s\n", of_stdout_device);
2251 	prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2252 		     path, prom_strlen(path) + 1);
2253 
2254 	/* instance-to-package fails on PA-Semi */
2255 	stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2256 	if (stdout_node != PROM_ERROR) {
2257 		val = cpu_to_be32(stdout_node);
2258 
2259 		/* If it's a display, note it */
2260 		memset(type, 0, sizeof(type));
2261 		prom_getprop(stdout_node, "device_type", type, sizeof(type));
2262 		if (prom_strcmp(type, "display") == 0)
2263 			prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2264 	}
2265 }
2266 
prom_find_machine_type(void)2267 static int __init prom_find_machine_type(void)
2268 {
2269 	char compat[256];
2270 	int len, i = 0;
2271 #ifdef CONFIG_PPC64
2272 	phandle rtas;
2273 	int x;
2274 #endif
2275 
2276 	/* Look for a PowerMac or a Cell */
2277 	len = prom_getprop(prom.root, "compatible",
2278 			   compat, sizeof(compat)-1);
2279 	if (len > 0) {
2280 		compat[len] = 0;
2281 		while (i < len) {
2282 			char *p = &compat[i];
2283 			int sl = prom_strlen(p);
2284 			if (sl == 0)
2285 				break;
2286 			if (prom_strstr(p, "Power Macintosh") ||
2287 			    prom_strstr(p, "MacRISC"))
2288 				return PLATFORM_POWERMAC;
2289 #ifdef CONFIG_PPC64
2290 			/* We must make sure we don't detect the IBM Cell
2291 			 * blades as pSeries due to some firmware issues,
2292 			 * so we do it here.
2293 			 */
2294 			if (prom_strstr(p, "IBM,CBEA") ||
2295 			    prom_strstr(p, "IBM,CPBW-1.0"))
2296 				return PLATFORM_GENERIC;
2297 #endif /* CONFIG_PPC64 */
2298 			i += sl + 1;
2299 		}
2300 	}
2301 #ifdef CONFIG_PPC64
2302 	/* Try to figure out if it's an IBM pSeries or any other
2303 	 * PAPR compliant platform. We assume it is if :
2304 	 *  - /device_type is "chrp" (please, do NOT use that for future
2305 	 *    non-IBM designs !
2306 	 *  - it has /rtas
2307 	 */
2308 	len = prom_getprop(prom.root, "device_type",
2309 			   compat, sizeof(compat)-1);
2310 	if (len <= 0)
2311 		return PLATFORM_GENERIC;
2312 	if (prom_strcmp(compat, "chrp"))
2313 		return PLATFORM_GENERIC;
2314 
2315 	/* Default to pSeries. We need to know if we are running LPAR */
2316 	rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2317 	if (!PHANDLE_VALID(rtas))
2318 		return PLATFORM_GENERIC;
2319 	x = prom_getproplen(rtas, "ibm,hypertas-functions");
2320 	if (x != PROM_ERROR) {
2321 		prom_debug("Hypertas detected, assuming LPAR !\n");
2322 		return PLATFORM_PSERIES_LPAR;
2323 	}
2324 	return PLATFORM_PSERIES;
2325 #else
2326 	return PLATFORM_GENERIC;
2327 #endif
2328 }
2329 
prom_set_color(ihandle ih,int i,int r,int g,int b)2330 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2331 {
2332 	return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2333 }
2334 
2335 /*
2336  * If we have a display that we don't know how to drive,
2337  * we will want to try to execute OF's open method for it
2338  * later.  However, OF will probably fall over if we do that
2339  * we've taken over the MMU.
2340  * So we check whether we will need to open the display,
2341  * and if so, open it now.
2342  */
prom_check_displays(void)2343 static void __init prom_check_displays(void)
2344 {
2345 	char type[16], *path;
2346 	phandle node;
2347 	ihandle ih;
2348 	int i;
2349 
2350 	static const unsigned char default_colors[] __initconst = {
2351 		0x00, 0x00, 0x00,
2352 		0x00, 0x00, 0xaa,
2353 		0x00, 0xaa, 0x00,
2354 		0x00, 0xaa, 0xaa,
2355 		0xaa, 0x00, 0x00,
2356 		0xaa, 0x00, 0xaa,
2357 		0xaa, 0xaa, 0x00,
2358 		0xaa, 0xaa, 0xaa,
2359 		0x55, 0x55, 0x55,
2360 		0x55, 0x55, 0xff,
2361 		0x55, 0xff, 0x55,
2362 		0x55, 0xff, 0xff,
2363 		0xff, 0x55, 0x55,
2364 		0xff, 0x55, 0xff,
2365 		0xff, 0xff, 0x55,
2366 		0xff, 0xff, 0xff
2367 	};
2368 	const unsigned char *clut;
2369 
2370 	prom_debug("Looking for displays\n");
2371 	for (node = 0; prom_next_node(&node); ) {
2372 		memset(type, 0, sizeof(type));
2373 		prom_getprop(node, "device_type", type, sizeof(type));
2374 		if (prom_strcmp(type, "display") != 0)
2375 			continue;
2376 
2377 		/* It seems OF doesn't null-terminate the path :-( */
2378 		path = prom_scratch;
2379 		memset(path, 0, sizeof(prom_scratch));
2380 
2381 		/*
2382 		 * leave some room at the end of the path for appending extra
2383 		 * arguments
2384 		 */
2385 		if (call_prom("package-to-path", 3, 1, node, path,
2386 			      sizeof(prom_scratch) - 10) == PROM_ERROR)
2387 			continue;
2388 		prom_printf("found display   : %s, opening... ", path);
2389 
2390 		ih = call_prom("open", 1, 1, path);
2391 		if (ih == 0) {
2392 			prom_printf("failed\n");
2393 			continue;
2394 		}
2395 
2396 		/* Success */
2397 		prom_printf("done\n");
2398 		prom_setprop(node, path, "linux,opened", NULL, 0);
2399 
2400 		/* Setup a usable color table when the appropriate
2401 		 * method is available. Should update this to set-colors */
2402 		clut = default_colors;
2403 		for (i = 0; i < 16; i++, clut += 3)
2404 			if (prom_set_color(ih, i, clut[0], clut[1],
2405 					   clut[2]) != 0)
2406 				break;
2407 
2408 #ifdef CONFIG_LOGO_LINUX_CLUT224
2409 		clut = PTRRELOC(logo_linux_clut224.clut);
2410 		for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2411 			if (prom_set_color(ih, i + 32, clut[0], clut[1],
2412 					   clut[2]) != 0)
2413 				break;
2414 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
2415 
2416 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2417 		if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2418 		    PROM_ERROR) {
2419 			u32 width, height, pitch, addr;
2420 
2421 			prom_printf("Setting btext !\n");
2422 
2423 			if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
2424 				return;
2425 
2426 			if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
2427 				return;
2428 
2429 			if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
2430 				return;
2431 
2432 			if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
2433 				return;
2434 
2435 			prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2436 				    width, height, pitch, addr);
2437 			btext_setup_display(width, height, 8, pitch, addr);
2438 			btext_prepare_BAT();
2439 		}
2440 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2441 	}
2442 }
2443 
2444 
2445 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
make_room(unsigned long * mem_start,unsigned long * mem_end,unsigned long needed,unsigned long align)2446 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2447 			      unsigned long needed, unsigned long align)
2448 {
2449 	void *ret;
2450 
2451 	*mem_start = ALIGN(*mem_start, align);
2452 	while ((*mem_start + needed) > *mem_end) {
2453 		unsigned long room, chunk;
2454 
2455 		prom_debug("Chunk exhausted, claiming more at %lx...\n",
2456 			   alloc_bottom);
2457 		room = alloc_top - alloc_bottom;
2458 		if (room > DEVTREE_CHUNK_SIZE)
2459 			room = DEVTREE_CHUNK_SIZE;
2460 		if (room < PAGE_SIZE)
2461 			prom_panic("No memory for flatten_device_tree "
2462 				   "(no room)\n");
2463 		chunk = alloc_up(room, 0);
2464 		if (chunk == 0)
2465 			prom_panic("No memory for flatten_device_tree "
2466 				   "(claim failed)\n");
2467 		*mem_end = chunk + room;
2468 	}
2469 
2470 	ret = (void *)*mem_start;
2471 	*mem_start += needed;
2472 
2473 	return ret;
2474 }
2475 
2476 #define dt_push_token(token, mem_start, mem_end) do { 			\
2477 		void *room = make_room(mem_start, mem_end, 4, 4);	\
2478 		*(__be32 *)room = cpu_to_be32(token);			\
2479 	} while(0)
2480 
dt_find_string(char * str)2481 static unsigned long __init dt_find_string(char *str)
2482 {
2483 	char *s, *os;
2484 
2485 	s = os = (char *)dt_string_start;
2486 	s += 4;
2487 	while (s <  (char *)dt_string_end) {
2488 		if (prom_strcmp(s, str) == 0)
2489 			return s - os;
2490 		s += prom_strlen(s) + 1;
2491 	}
2492 	return 0;
2493 }
2494 
2495 /*
2496  * The Open Firmware 1275 specification states properties must be 31 bytes or
2497  * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2498  */
2499 #define MAX_PROPERTY_NAME 64
2500 
scan_dt_build_strings(phandle node,unsigned long * mem_start,unsigned long * mem_end)2501 static void __init scan_dt_build_strings(phandle node,
2502 					 unsigned long *mem_start,
2503 					 unsigned long *mem_end)
2504 {
2505 	char *prev_name, *namep, *sstart;
2506 	unsigned long soff;
2507 	phandle child;
2508 
2509 	sstart =  (char *)dt_string_start;
2510 
2511 	/* get and store all property names */
2512 	prev_name = "";
2513 	for (;;) {
2514 		/* 64 is max len of name including nul. */
2515 		namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2516 		if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2517 			/* No more nodes: unwind alloc */
2518 			*mem_start = (unsigned long)namep;
2519 			break;
2520 		}
2521 
2522  		/* skip "name" */
2523 		if (prom_strcmp(namep, "name") == 0) {
2524  			*mem_start = (unsigned long)namep;
2525  			prev_name = "name";
2526  			continue;
2527  		}
2528 		/* get/create string entry */
2529 		soff = dt_find_string(namep);
2530 		if (soff != 0) {
2531 			*mem_start = (unsigned long)namep;
2532 			namep = sstart + soff;
2533 		} else {
2534 			/* Trim off some if we can */
2535 			*mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2536 			dt_string_end = *mem_start;
2537 		}
2538 		prev_name = namep;
2539 	}
2540 
2541 	/* do all our children */
2542 	child = call_prom("child", 1, 1, node);
2543 	while (child != 0) {
2544 		scan_dt_build_strings(child, mem_start, mem_end);
2545 		child = call_prom("peer", 1, 1, child);
2546 	}
2547 }
2548 
scan_dt_build_struct(phandle node,unsigned long * mem_start,unsigned long * mem_end)2549 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2550 					unsigned long *mem_end)
2551 {
2552 	phandle child;
2553 	char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2554 	unsigned long soff;
2555 	unsigned char *valp;
2556 	static char pname[MAX_PROPERTY_NAME] __prombss;
2557 	int l, room, has_phandle = 0;
2558 
2559 	dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2560 
2561 	/* get the node's full name */
2562 	namep = (char *)*mem_start;
2563 	room = *mem_end - *mem_start;
2564 	if (room > 255)
2565 		room = 255;
2566 	l = call_prom("package-to-path", 3, 1, node, namep, room);
2567 	if (l >= 0) {
2568 		/* Didn't fit?  Get more room. */
2569 		if (l >= room) {
2570 			if (l >= *mem_end - *mem_start)
2571 				namep = make_room(mem_start, mem_end, l+1, 1);
2572 			call_prom("package-to-path", 3, 1, node, namep, l);
2573 		}
2574 		namep[l] = '\0';
2575 
2576 		/* Fixup an Apple bug where they have bogus \0 chars in the
2577 		 * middle of the path in some properties, and extract
2578 		 * the unit name (everything after the last '/').
2579 		 */
2580 		for (lp = p = namep, ep = namep + l; p < ep; p++) {
2581 			if (*p == '/')
2582 				lp = namep;
2583 			else if (*p != 0)
2584 				*lp++ = *p;
2585 		}
2586 		*lp = 0;
2587 		*mem_start = ALIGN((unsigned long)lp + 1, 4);
2588 	}
2589 
2590 	/* get it again for debugging */
2591 	path = prom_scratch;
2592 	memset(path, 0, sizeof(prom_scratch));
2593 	call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2594 
2595 	/* get and store all properties */
2596 	prev_name = "";
2597 	sstart = (char *)dt_string_start;
2598 	for (;;) {
2599 		if (call_prom("nextprop", 3, 1, node, prev_name,
2600 			      pname) != 1)
2601 			break;
2602 
2603  		/* skip "name" */
2604 		if (prom_strcmp(pname, "name") == 0) {
2605  			prev_name = "name";
2606  			continue;
2607  		}
2608 
2609 		/* find string offset */
2610 		soff = dt_find_string(pname);
2611 		if (soff == 0) {
2612 			prom_printf("WARNING: Can't find string index for"
2613 				    " <%s>, node %s\n", pname, path);
2614 			break;
2615 		}
2616 		prev_name = sstart + soff;
2617 
2618 		/* get length */
2619 		l = call_prom("getproplen", 2, 1, node, pname);
2620 
2621 		/* sanity checks */
2622 		if (l == PROM_ERROR)
2623 			continue;
2624 
2625 		/* push property head */
2626 		dt_push_token(OF_DT_PROP, mem_start, mem_end);
2627 		dt_push_token(l, mem_start, mem_end);
2628 		dt_push_token(soff, mem_start, mem_end);
2629 
2630 		/* push property content */
2631 		valp = make_room(mem_start, mem_end, l, 4);
2632 		call_prom("getprop", 4, 1, node, pname, valp, l);
2633 		*mem_start = ALIGN(*mem_start, 4);
2634 
2635 		if (!prom_strcmp(pname, "phandle"))
2636 			has_phandle = 1;
2637 	}
2638 
2639 	/* Add a "phandle" property if none already exist */
2640 	if (!has_phandle) {
2641 		soff = dt_find_string("phandle");
2642 		if (soff == 0)
2643 			prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2644 		else {
2645 			dt_push_token(OF_DT_PROP, mem_start, mem_end);
2646 			dt_push_token(4, mem_start, mem_end);
2647 			dt_push_token(soff, mem_start, mem_end);
2648 			valp = make_room(mem_start, mem_end, 4, 4);
2649 			*(__be32 *)valp = cpu_to_be32(node);
2650 		}
2651 	}
2652 
2653 	/* do all our children */
2654 	child = call_prom("child", 1, 1, node);
2655 	while (child != 0) {
2656 		scan_dt_build_struct(child, mem_start, mem_end);
2657 		child = call_prom("peer", 1, 1, child);
2658 	}
2659 
2660 	dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2661 }
2662 
flatten_device_tree(void)2663 static void __init flatten_device_tree(void)
2664 {
2665 	phandle root;
2666 	unsigned long mem_start, mem_end, room;
2667 	struct boot_param_header *hdr;
2668 	char *namep;
2669 	u64 *rsvmap;
2670 
2671 	/*
2672 	 * Check how much room we have between alloc top & bottom (+/- a
2673 	 * few pages), crop to 1MB, as this is our "chunk" size
2674 	 */
2675 	room = alloc_top - alloc_bottom - 0x4000;
2676 	if (room > DEVTREE_CHUNK_SIZE)
2677 		room = DEVTREE_CHUNK_SIZE;
2678 	prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2679 
2680 	/* Now try to claim that */
2681 	mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2682 	if (mem_start == 0)
2683 		prom_panic("Can't allocate initial device-tree chunk\n");
2684 	mem_end = mem_start + room;
2685 
2686 	/* Get root of tree */
2687 	root = call_prom("peer", 1, 1, (phandle)0);
2688 	if (root == (phandle)0)
2689 		prom_panic ("couldn't get device tree root\n");
2690 
2691 	/* Build header and make room for mem rsv map */
2692 	mem_start = ALIGN(mem_start, 4);
2693 	hdr = make_room(&mem_start, &mem_end,
2694 			sizeof(struct boot_param_header), 4);
2695 	dt_header_start = (unsigned long)hdr;
2696 	rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2697 
2698 	/* Start of strings */
2699 	mem_start = PAGE_ALIGN(mem_start);
2700 	dt_string_start = mem_start;
2701 	mem_start += 4; /* hole */
2702 
2703 	/* Add "phandle" in there, we'll need it */
2704 	namep = make_room(&mem_start, &mem_end, 16, 1);
2705 	prom_strcpy(namep, "phandle");
2706 	mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2707 
2708 	/* Build string array */
2709 	prom_printf("Building dt strings...\n");
2710 	scan_dt_build_strings(root, &mem_start, &mem_end);
2711 	dt_string_end = mem_start;
2712 
2713 	/* Build structure */
2714 	mem_start = PAGE_ALIGN(mem_start);
2715 	dt_struct_start = mem_start;
2716 	prom_printf("Building dt structure...\n");
2717 	scan_dt_build_struct(root, &mem_start, &mem_end);
2718 	dt_push_token(OF_DT_END, &mem_start, &mem_end);
2719 	dt_struct_end = PAGE_ALIGN(mem_start);
2720 
2721 	/* Finish header */
2722 	hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2723 	hdr->magic = cpu_to_be32(OF_DT_HEADER);
2724 	hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2725 	hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2726 	hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2727 	hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2728 	hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2729 	hdr->version = cpu_to_be32(OF_DT_VERSION);
2730 	/* Version 16 is not backward compatible */
2731 	hdr->last_comp_version = cpu_to_be32(0x10);
2732 
2733 	/* Copy the reserve map in */
2734 	memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2735 
2736 #ifdef DEBUG_PROM
2737 	{
2738 		int i;
2739 		prom_printf("reserved memory map:\n");
2740 		for (i = 0; i < mem_reserve_cnt; i++)
2741 			prom_printf("  %llx - %llx\n",
2742 				    be64_to_cpu(mem_reserve_map[i].base),
2743 				    be64_to_cpu(mem_reserve_map[i].size));
2744 	}
2745 #endif
2746 	/* Bump mem_reserve_cnt to cause further reservations to fail
2747 	 * since it's too late.
2748 	 */
2749 	mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2750 
2751 	prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2752 		    dt_string_start, dt_string_end);
2753 	prom_printf("Device tree struct  0x%lx -> 0x%lx\n",
2754 		    dt_struct_start, dt_struct_end);
2755 }
2756 
2757 #ifdef CONFIG_PPC_MAPLE
2758 /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2759  * The values are bad, and it doesn't even have the right number of cells. */
fixup_device_tree_maple(void)2760 static void __init fixup_device_tree_maple(void)
2761 {
2762 	phandle isa;
2763 	u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2764 	u32 isa_ranges[6];
2765 	char *name;
2766 
2767 	name = "/ht@0/isa@4";
2768 	isa = call_prom("finddevice", 1, 1, ADDR(name));
2769 	if (!PHANDLE_VALID(isa)) {
2770 		name = "/ht@0/isa@6";
2771 		isa = call_prom("finddevice", 1, 1, ADDR(name));
2772 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2773 	}
2774 	if (!PHANDLE_VALID(isa))
2775 		return;
2776 
2777 	if (prom_getproplen(isa, "ranges") != 12)
2778 		return;
2779 	if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2780 		== PROM_ERROR)
2781 		return;
2782 
2783 	if (isa_ranges[0] != 0x1 ||
2784 		isa_ranges[1] != 0xf4000000 ||
2785 		isa_ranges[2] != 0x00010000)
2786 		return;
2787 
2788 	prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2789 
2790 	isa_ranges[0] = 0x1;
2791 	isa_ranges[1] = 0x0;
2792 	isa_ranges[2] = rloc;
2793 	isa_ranges[3] = 0x0;
2794 	isa_ranges[4] = 0x0;
2795 	isa_ranges[5] = 0x00010000;
2796 	prom_setprop(isa, name, "ranges",
2797 			isa_ranges, sizeof(isa_ranges));
2798 }
2799 
2800 #define CPC925_MC_START		0xf8000000
2801 #define CPC925_MC_LENGTH	0x1000000
2802 /* The values for memory-controller don't have right number of cells */
fixup_device_tree_maple_memory_controller(void)2803 static void __init fixup_device_tree_maple_memory_controller(void)
2804 {
2805 	phandle mc;
2806 	u32 mc_reg[4];
2807 	char *name = "/hostbridge@f8000000";
2808 	u32 ac, sc;
2809 
2810 	mc = call_prom("finddevice", 1, 1, ADDR(name));
2811 	if (!PHANDLE_VALID(mc))
2812 		return;
2813 
2814 	if (prom_getproplen(mc, "reg") != 8)
2815 		return;
2816 
2817 	prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2818 	prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2819 	if ((ac != 2) || (sc != 2))
2820 		return;
2821 
2822 	if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2823 		return;
2824 
2825 	if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2826 		return;
2827 
2828 	prom_printf("Fixing up bogus hostbridge on Maple...\n");
2829 
2830 	mc_reg[0] = 0x0;
2831 	mc_reg[1] = CPC925_MC_START;
2832 	mc_reg[2] = 0x0;
2833 	mc_reg[3] = CPC925_MC_LENGTH;
2834 	prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2835 }
2836 #else
2837 #define fixup_device_tree_maple()
2838 #define fixup_device_tree_maple_memory_controller()
2839 #endif
2840 
2841 #ifdef CONFIG_PPC_CHRP
2842 /*
2843  * Pegasos and BriQ lacks the "ranges" property in the isa node
2844  * Pegasos needs decimal IRQ 14/15, not hexadecimal
2845  * Pegasos has the IDE configured in legacy mode, but advertised as native
2846  */
fixup_device_tree_chrp(void)2847 static void __init fixup_device_tree_chrp(void)
2848 {
2849 	phandle ph;
2850 	u32 prop[6];
2851 	u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2852 	char *name;
2853 	int rc;
2854 
2855 	name = "/pci@80000000/isa@c";
2856 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2857 	if (!PHANDLE_VALID(ph)) {
2858 		name = "/pci@ff500000/isa@6";
2859 		ph = call_prom("finddevice", 1, 1, ADDR(name));
2860 		rloc = 0x01003000; /* IO space; PCI device = 6 */
2861 	}
2862 	if (PHANDLE_VALID(ph)) {
2863 		rc = prom_getproplen(ph, "ranges");
2864 		if (rc == 0 || rc == PROM_ERROR) {
2865 			prom_printf("Fixing up missing ISA range on Pegasos...\n");
2866 
2867 			prop[0] = 0x1;
2868 			prop[1] = 0x0;
2869 			prop[2] = rloc;
2870 			prop[3] = 0x0;
2871 			prop[4] = 0x0;
2872 			prop[5] = 0x00010000;
2873 			prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2874 		}
2875 	}
2876 
2877 	name = "/pci@80000000/ide@C,1";
2878 	ph = call_prom("finddevice", 1, 1, ADDR(name));
2879 	if (PHANDLE_VALID(ph)) {
2880 		prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2881 		prop[0] = 14;
2882 		prop[1] = 0x0;
2883 		prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2884 		prom_printf("Fixing up IDE class-code on Pegasos...\n");
2885 		rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2886 		if (rc == sizeof(u32)) {
2887 			prop[0] &= ~0x5;
2888 			prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2889 		}
2890 	}
2891 }
2892 #else
2893 #define fixup_device_tree_chrp()
2894 #endif
2895 
2896 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
fixup_device_tree_pmac(void)2897 static void __init fixup_device_tree_pmac(void)
2898 {
2899 	phandle u3, i2c, mpic;
2900 	u32 u3_rev;
2901 	u32 interrupts[2];
2902 	u32 parent;
2903 
2904 	/* Some G5s have a missing interrupt definition, fix it up here */
2905 	u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2906 	if (!PHANDLE_VALID(u3))
2907 		return;
2908 	i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2909 	if (!PHANDLE_VALID(i2c))
2910 		return;
2911 	mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2912 	if (!PHANDLE_VALID(mpic))
2913 		return;
2914 
2915 	/* check if proper rev of u3 */
2916 	if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2917 	    == PROM_ERROR)
2918 		return;
2919 	if (u3_rev < 0x35 || u3_rev > 0x39)
2920 		return;
2921 	/* does it need fixup ? */
2922 	if (prom_getproplen(i2c, "interrupts") > 0)
2923 		return;
2924 
2925 	prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2926 
2927 	/* interrupt on this revision of u3 is number 0 and level */
2928 	interrupts[0] = 0;
2929 	interrupts[1] = 1;
2930 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2931 		     &interrupts, sizeof(interrupts));
2932 	parent = (u32)mpic;
2933 	prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2934 		     &parent, sizeof(parent));
2935 }
2936 #else
2937 #define fixup_device_tree_pmac()
2938 #endif
2939 
2940 #ifdef CONFIG_PPC_EFIKA
2941 /*
2942  * The MPC5200 FEC driver requires an phy-handle property to tell it how
2943  * to talk to the phy.  If the phy-handle property is missing, then this
2944  * function is called to add the appropriate nodes and link it to the
2945  * ethernet node.
2946  */
fixup_device_tree_efika_add_phy(void)2947 static void __init fixup_device_tree_efika_add_phy(void)
2948 {
2949 	u32 node;
2950 	char prop[64];
2951 	int rv;
2952 
2953 	/* Check if /builtin/ethernet exists - bail if it doesn't */
2954 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2955 	if (!PHANDLE_VALID(node))
2956 		return;
2957 
2958 	/* Check if the phy-handle property exists - bail if it does */
2959 	rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2960 	if (!rv)
2961 		return;
2962 
2963 	/*
2964 	 * At this point the ethernet device doesn't have a phy described.
2965 	 * Now we need to add the missing phy node and linkage
2966 	 */
2967 
2968 	/* Check for an MDIO bus node - if missing then create one */
2969 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2970 	if (!PHANDLE_VALID(node)) {
2971 		prom_printf("Adding Ethernet MDIO node\n");
2972 		call_prom("interpret", 1, 1,
2973 			" s\" /builtin\" find-device"
2974 			" new-device"
2975 				" 1 encode-int s\" #address-cells\" property"
2976 				" 0 encode-int s\" #size-cells\" property"
2977 				" s\" mdio\" device-name"
2978 				" s\" fsl,mpc5200b-mdio\" encode-string"
2979 				" s\" compatible\" property"
2980 				" 0xf0003000 0x400 reg"
2981 				" 0x2 encode-int"
2982 				" 0x5 encode-int encode+"
2983 				" 0x3 encode-int encode+"
2984 				" s\" interrupts\" property"
2985 			" finish-device");
2986 	}
2987 
2988 	/* Check for a PHY device node - if missing then create one and
2989 	 * give it's phandle to the ethernet node */
2990 	node = call_prom("finddevice", 1, 1,
2991 			 ADDR("/builtin/mdio/ethernet-phy"));
2992 	if (!PHANDLE_VALID(node)) {
2993 		prom_printf("Adding Ethernet PHY node\n");
2994 		call_prom("interpret", 1, 1,
2995 			" s\" /builtin/mdio\" find-device"
2996 			" new-device"
2997 				" s\" ethernet-phy\" device-name"
2998 				" 0x10 encode-int s\" reg\" property"
2999 				" my-self"
3000 				" ihandle>phandle"
3001 			" finish-device"
3002 			" s\" /builtin/ethernet\" find-device"
3003 				" encode-int"
3004 				" s\" phy-handle\" property"
3005 			" device-end");
3006 	}
3007 }
3008 
fixup_device_tree_efika(void)3009 static void __init fixup_device_tree_efika(void)
3010 {
3011 	int sound_irq[3] = { 2, 2, 0 };
3012 	int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3013 				3,4,0, 3,5,0, 3,6,0, 3,7,0,
3014 				3,8,0, 3,9,0, 3,10,0, 3,11,0,
3015 				3,12,0, 3,13,0, 3,14,0, 3,15,0 };
3016 	u32 node;
3017 	char prop[64];
3018 	int rv, len;
3019 
3020 	/* Check if we're really running on a EFIKA */
3021 	node = call_prom("finddevice", 1, 1, ADDR("/"));
3022 	if (!PHANDLE_VALID(node))
3023 		return;
3024 
3025 	rv = prom_getprop(node, "model", prop, sizeof(prop));
3026 	if (rv == PROM_ERROR)
3027 		return;
3028 	if (prom_strcmp(prop, "EFIKA5K2"))
3029 		return;
3030 
3031 	prom_printf("Applying EFIKA device tree fixups\n");
3032 
3033 	/* Claiming to be 'chrp' is death */
3034 	node = call_prom("finddevice", 1, 1, ADDR("/"));
3035 	rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3036 	if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3037 		prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3038 
3039 	/* CODEGEN,description is exposed in /proc/cpuinfo so
3040 	   fix that too */
3041 	rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3042 	if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3043 		prom_setprop(node, "/", "CODEGEN,description",
3044 			     "Efika 5200B PowerPC System",
3045 			     sizeof("Efika 5200B PowerPC System"));
3046 
3047 	/* Fixup bestcomm interrupts property */
3048 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3049 	if (PHANDLE_VALID(node)) {
3050 		len = prom_getproplen(node, "interrupts");
3051 		if (len == 12) {
3052 			prom_printf("Fixing bestcomm interrupts property\n");
3053 			prom_setprop(node, "/builtin/bestcom", "interrupts",
3054 				     bcomm_irq, sizeof(bcomm_irq));
3055 		}
3056 	}
3057 
3058 	/* Fixup sound interrupts property */
3059 	node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3060 	if (PHANDLE_VALID(node)) {
3061 		rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3062 		if (rv == PROM_ERROR) {
3063 			prom_printf("Adding sound interrupts property\n");
3064 			prom_setprop(node, "/builtin/sound", "interrupts",
3065 				     sound_irq, sizeof(sound_irq));
3066 		}
3067 	}
3068 
3069 	/* Make sure ethernet phy-handle property exists */
3070 	fixup_device_tree_efika_add_phy();
3071 }
3072 #else
3073 #define fixup_device_tree_efika()
3074 #endif
3075 
3076 #ifdef CONFIG_PPC_PASEMI_NEMO
3077 /*
3078  * CFE supplied on Nemo is broken in several ways, biggest
3079  * problem is that it reassigns ISA interrupts to unused mpic ints.
3080  * Add an interrupt-controller property for the io-bridge to use
3081  * and correct the ints so we can attach them to an irq_domain
3082  */
fixup_device_tree_pasemi(void)3083 static void __init fixup_device_tree_pasemi(void)
3084 {
3085 	u32 interrupts[2], parent, rval, val = 0;
3086 	char *name, *pci_name;
3087 	phandle iob, node;
3088 
3089 	/* Find the root pci node */
3090 	name = "/pxp@0,e0000000";
3091 	iob = call_prom("finddevice", 1, 1, ADDR(name));
3092 	if (!PHANDLE_VALID(iob))
3093 		return;
3094 
3095 	/* check if interrupt-controller node set yet */
3096 	if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3097 		return;
3098 
3099 	prom_printf("adding interrupt-controller property for SB600...\n");
3100 
3101 	prom_setprop(iob, name, "interrupt-controller", &val, 0);
3102 
3103 	pci_name = "/pxp@0,e0000000/pci@11";
3104 	node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3105 	parent = ADDR(iob);
3106 
3107 	for( ; prom_next_node(&node); ) {
3108 		/* scan each node for one with an interrupt */
3109 		if (!PHANDLE_VALID(node))
3110 			continue;
3111 
3112 		rval = prom_getproplen(node, "interrupts");
3113 		if (rval == 0 || rval == PROM_ERROR)
3114 			continue;
3115 
3116 		prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3117 		if ((interrupts[0] < 212) || (interrupts[0] > 222))
3118 			continue;
3119 
3120 		/* found a node, update both interrupts and interrupt-parent */
3121 		if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3122 			interrupts[0] -= 203;
3123 		if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3124 			interrupts[0] -= 213;
3125 		if (interrupts[0] == 221)
3126 			interrupts[0] = 14;
3127 		if (interrupts[0] == 222)
3128 			interrupts[0] = 8;
3129 
3130 		prom_setprop(node, pci_name, "interrupts", interrupts,
3131 					sizeof(interrupts));
3132 		prom_setprop(node, pci_name, "interrupt-parent", &parent,
3133 					sizeof(parent));
3134 	}
3135 
3136 	/*
3137 	 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3138 	 * so that generic isa-bridge code can add the SB600 and its on-board
3139 	 * peripherals.
3140 	 */
3141 	name = "/pxp@0,e0000000/io-bridge@0";
3142 	iob = call_prom("finddevice", 1, 1, ADDR(name));
3143 	if (!PHANDLE_VALID(iob))
3144 		return;
3145 
3146 	/* device_type is already set, just change it. */
3147 
3148 	prom_printf("Changing device_type of SB600 node...\n");
3149 
3150 	prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3151 }
3152 #else	/* !CONFIG_PPC_PASEMI_NEMO */
fixup_device_tree_pasemi(void)3153 static inline void fixup_device_tree_pasemi(void) { }
3154 #endif
3155 
fixup_device_tree(void)3156 static void __init fixup_device_tree(void)
3157 {
3158 	fixup_device_tree_maple();
3159 	fixup_device_tree_maple_memory_controller();
3160 	fixup_device_tree_chrp();
3161 	fixup_device_tree_pmac();
3162 	fixup_device_tree_efika();
3163 	fixup_device_tree_pasemi();
3164 }
3165 
prom_find_boot_cpu(void)3166 static void __init prom_find_boot_cpu(void)
3167 {
3168 	__be32 rval;
3169 	ihandle prom_cpu;
3170 	phandle cpu_pkg;
3171 
3172 	rval = 0;
3173 	if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3174 		return;
3175 	prom_cpu = be32_to_cpu(rval);
3176 
3177 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3178 
3179 	if (!PHANDLE_VALID(cpu_pkg))
3180 		return;
3181 
3182 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3183 	prom.cpu = be32_to_cpu(rval);
3184 
3185 	prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3186 }
3187 
prom_check_initrd(unsigned long r3,unsigned long r4)3188 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3189 {
3190 #ifdef CONFIG_BLK_DEV_INITRD
3191 	if (r3 && r4 && r4 != 0xdeadbeef) {
3192 		__be64 val;
3193 
3194 		prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3195 		prom_initrd_end = prom_initrd_start + r4;
3196 
3197 		val = cpu_to_be64(prom_initrd_start);
3198 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3199 			     &val, sizeof(val));
3200 		val = cpu_to_be64(prom_initrd_end);
3201 		prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3202 			     &val, sizeof(val));
3203 
3204 		reserve_mem(prom_initrd_start,
3205 			    prom_initrd_end - prom_initrd_start);
3206 
3207 		prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3208 		prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3209 	}
3210 #endif /* CONFIG_BLK_DEV_INITRD */
3211 }
3212 
3213 #ifdef CONFIG_PPC64
3214 #ifdef CONFIG_RELOCATABLE
reloc_toc(void)3215 static void reloc_toc(void)
3216 {
3217 }
3218 
unreloc_toc(void)3219 static void unreloc_toc(void)
3220 {
3221 }
3222 #else
__reloc_toc(unsigned long offset,unsigned long nr_entries)3223 static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
3224 {
3225 	unsigned long i;
3226 	unsigned long *toc_entry;
3227 
3228 	/* Get the start of the TOC by using r2 directly. */
3229 	asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3230 
3231 	for (i = 0; i < nr_entries; i++) {
3232 		*toc_entry = *toc_entry + offset;
3233 		toc_entry++;
3234 	}
3235 }
3236 
reloc_toc(void)3237 static void reloc_toc(void)
3238 {
3239 	unsigned long offset = reloc_offset();
3240 	unsigned long nr_entries =
3241 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3242 
3243 	__reloc_toc(offset, nr_entries);
3244 
3245 	mb();
3246 }
3247 
unreloc_toc(void)3248 static void unreloc_toc(void)
3249 {
3250 	unsigned long offset = reloc_offset();
3251 	unsigned long nr_entries =
3252 		(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3253 
3254 	mb();
3255 
3256 	__reloc_toc(-offset, nr_entries);
3257 }
3258 #endif
3259 #endif
3260 
3261 #ifdef CONFIG_PPC_SVM
3262 /*
3263  * Perform the Enter Secure Mode ultracall.
3264  */
enter_secure_mode(unsigned long kbase,unsigned long fdt)3265 static int enter_secure_mode(unsigned long kbase, unsigned long fdt)
3266 {
3267 	register unsigned long r3 asm("r3") = UV_ESM;
3268 	register unsigned long r4 asm("r4") = kbase;
3269 	register unsigned long r5 asm("r5") = fdt;
3270 
3271 	asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3272 
3273 	return r3;
3274 }
3275 
3276 /*
3277  * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3278  */
setup_secure_guest(unsigned long kbase,unsigned long fdt)3279 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3280 {
3281 	int ret;
3282 
3283 	if (!prom_svm_enable)
3284 		return;
3285 
3286 	/* Switch to secure mode. */
3287 	prom_printf("Switching to secure mode.\n");
3288 
3289 	/*
3290 	 * The ultravisor will do an integrity check of the kernel image but we
3291 	 * relocated it so the check will fail. Restore the original image by
3292 	 * relocating it back to the kernel virtual base address.
3293 	 */
3294 	if (IS_ENABLED(CONFIG_RELOCATABLE))
3295 		relocate(KERNELBASE);
3296 
3297 	ret = enter_secure_mode(kbase, fdt);
3298 
3299 	/* Relocate the kernel again. */
3300 	if (IS_ENABLED(CONFIG_RELOCATABLE))
3301 		relocate(kbase);
3302 
3303 	if (ret != U_SUCCESS) {
3304 		prom_printf("Returned %d from switching to secure mode.\n", ret);
3305 		prom_rtas_os_term("Switch to secure mode failed.\n");
3306 	}
3307 }
3308 #else
setup_secure_guest(unsigned long kbase,unsigned long fdt)3309 static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3310 {
3311 }
3312 #endif /* CONFIG_PPC_SVM */
3313 
3314 /*
3315  * We enter here early on, when the Open Firmware prom is still
3316  * handling exceptions and the MMU hash table for us.
3317  */
3318 
prom_init(unsigned long r3,unsigned long r4,unsigned long pp,unsigned long r6,unsigned long r7,unsigned long kbase)3319 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3320 			       unsigned long pp,
3321 			       unsigned long r6, unsigned long r7,
3322 			       unsigned long kbase)
3323 {
3324 	unsigned long hdr;
3325 
3326 #ifdef CONFIG_PPC32
3327 	unsigned long offset = reloc_offset();
3328 	reloc_got2(offset);
3329 #else
3330 	reloc_toc();
3331 #endif
3332 
3333 	/*
3334 	 * First zero the BSS
3335 	 */
3336 	memset(&__bss_start, 0, __bss_stop - __bss_start);
3337 
3338 	/*
3339 	 * Init interface to Open Firmware, get some node references,
3340 	 * like /chosen
3341 	 */
3342 	prom_init_client_services(pp);
3343 
3344 	/*
3345 	 * See if this OF is old enough that we need to do explicit maps
3346 	 * and other workarounds
3347 	 */
3348 	prom_find_mmu();
3349 
3350 	/*
3351 	 * Init prom stdout device
3352 	 */
3353 	prom_init_stdout();
3354 
3355 	prom_printf("Preparing to boot %s", linux_banner);
3356 
3357 	/*
3358 	 * Get default machine type. At this point, we do not differentiate
3359 	 * between pSeries SMP and pSeries LPAR
3360 	 */
3361 	of_platform = prom_find_machine_type();
3362 	prom_printf("Detected machine type: %x\n", of_platform);
3363 
3364 #ifndef CONFIG_NONSTATIC_KERNEL
3365 	/* Bail if this is a kdump kernel. */
3366 	if (PHYSICAL_START > 0)
3367 		prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3368 #endif
3369 
3370 	/*
3371 	 * Check for an initrd
3372 	 */
3373 	prom_check_initrd(r3, r4);
3374 
3375 	/*
3376 	 * Do early parsing of command line
3377 	 */
3378 	early_cmdline_parse();
3379 
3380 #ifdef CONFIG_PPC_PSERIES
3381 	/*
3382 	 * On pSeries, inform the firmware about our capabilities
3383 	 */
3384 	if (of_platform == PLATFORM_PSERIES ||
3385 	    of_platform == PLATFORM_PSERIES_LPAR)
3386 		prom_send_capabilities();
3387 #endif
3388 
3389 	/*
3390 	 * Copy the CPU hold code
3391 	 */
3392 	if (of_platform != PLATFORM_POWERMAC)
3393 		copy_and_flush(0, kbase, 0x100, 0);
3394 
3395 	/*
3396 	 * Initialize memory management within prom_init
3397 	 */
3398 	prom_init_mem();
3399 
3400 	/*
3401 	 * Determine which cpu is actually running right _now_
3402 	 */
3403 	prom_find_boot_cpu();
3404 
3405 	/*
3406 	 * Initialize display devices
3407 	 */
3408 	prom_check_displays();
3409 
3410 #if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3411 	/*
3412 	 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3413 	 * that uses the allocator, we need to make sure we get the top of memory
3414 	 * available for us here...
3415 	 */
3416 	if (of_platform == PLATFORM_PSERIES)
3417 		prom_initialize_tce_table();
3418 #endif
3419 
3420 	/*
3421 	 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3422 	 * have a usable RTAS implementation.
3423 	 */
3424 	if (of_platform != PLATFORM_POWERMAC)
3425 		prom_instantiate_rtas();
3426 
3427 #ifdef CONFIG_PPC64
3428 	/* instantiate sml */
3429 	prom_instantiate_sml();
3430 #endif
3431 
3432 	/*
3433 	 * On non-powermacs, put all CPUs in spin-loops.
3434 	 *
3435 	 * PowerMacs use a different mechanism to spin CPUs
3436 	 *
3437 	 * (This must be done after instanciating RTAS)
3438 	 */
3439 	if (of_platform != PLATFORM_POWERMAC)
3440 		prom_hold_cpus();
3441 
3442 	/*
3443 	 * Fill in some infos for use by the kernel later on
3444 	 */
3445 	if (prom_memory_limit) {
3446 		__be64 val = cpu_to_be64(prom_memory_limit);
3447 		prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3448 			     &val, sizeof(val));
3449 	}
3450 #ifdef CONFIG_PPC64
3451 	if (prom_iommu_off)
3452 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3453 			     NULL, 0);
3454 
3455 	if (prom_iommu_force_on)
3456 		prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3457 			     NULL, 0);
3458 
3459 	if (prom_tce_alloc_start) {
3460 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3461 			     &prom_tce_alloc_start,
3462 			     sizeof(prom_tce_alloc_start));
3463 		prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3464 			     &prom_tce_alloc_end,
3465 			     sizeof(prom_tce_alloc_end));
3466 	}
3467 #endif
3468 
3469 	/*
3470 	 * Fixup any known bugs in the device-tree
3471 	 */
3472 	fixup_device_tree();
3473 
3474 	/*
3475 	 * Now finally create the flattened device-tree
3476 	 */
3477 	prom_printf("copying OF device tree...\n");
3478 	flatten_device_tree();
3479 
3480 	/*
3481 	 * in case stdin is USB and still active on IBM machines...
3482 	 * Unfortunately quiesce crashes on some powermacs if we have
3483 	 * closed stdin already (in particular the powerbook 101).
3484 	 */
3485 	if (of_platform != PLATFORM_POWERMAC)
3486 		prom_close_stdin();
3487 
3488 	/*
3489 	 * Call OF "quiesce" method to shut down pending DMA's from
3490 	 * devices etc...
3491 	 */
3492 	prom_printf("Quiescing Open Firmware ...\n");
3493 	call_prom("quiesce", 0, 0);
3494 
3495 	/*
3496 	 * And finally, call the kernel passing it the flattened device
3497 	 * tree and NULL as r5, thus triggering the new entry point which
3498 	 * is common to us and kexec
3499 	 */
3500 	hdr = dt_header_start;
3501 
3502 	prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3503 	prom_debug("->dt_header_start=0x%lx\n", hdr);
3504 
3505 #ifdef CONFIG_PPC32
3506 	reloc_got2(-offset);
3507 #else
3508 	unreloc_toc();
3509 #endif
3510 
3511 	/* Move to secure memory if we're supposed to be secure guests. */
3512 	setup_secure_guest(kbase, hdr);
3513 
3514 	__start(hdr, kbase, 0, 0, 0, 0, 0);
3515 
3516 	return 0;
3517 }
3518