xref: /openbsd/sys/arch/powerpc/powerpc/cpu_subr.c (revision 462c954b)
1 /*	$OpenBSD: cpu_subr.c,v 1.8 2015/04/27 07:20:57 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2013 Martin Pieuchot
5  * Copyright (c) 2005 Mark Kettenis
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 
22 #include <powerpc/cpu.h>
23 
24 int		ppc_cpuidle;		/* Support DOZE, NAP or DEEP NAP? */
25 int		ppc_altivec;		/* CPU has altivec support. */
26 int		ppc_proc_is_64b;	/* CPU is 64bit */
27 int		ppc_nobat;		/* Do not use BAT registers. */
28 
29 struct patch {
30 	uint32_t *s;
31 	uint32_t *e;
32 };
33 extern struct patch	rfi_start, nop32_start, nopbat_start;
34 extern uint32_t		rfid_inst, nop_inst;
35 
36 void
cpu_bootstrap(void)37 cpu_bootstrap(void)
38 {
39 	uint32_t cpu;
40 	uint32_t *inst;
41 	struct patch *p;
42 
43 	cpu = ppc_mfpvr() >> 16;
44 
45 	switch (cpu) {
46 	case PPC_CPU_IBM970:
47 	case PPC_CPU_IBM970FX:
48 	case PPC_CPU_IBM970MP:
49 		ppc_nobat = 1;
50 		ppc_proc_is_64b = 1;
51 
52 		for (p = &rfi_start; p->s; p++) {
53 			for (inst = p->s; inst < p->e; inst++)
54 				*inst = rfid_inst;
55 			syncicache(p->s, (p->e - p->s) * sizeof(*p->e));
56 		}
57 		break;
58 	case PPC_CPU_MPC83xx:
59 		ppc_mtibat4u(0);
60 		ppc_mtibat5u(0);
61 		ppc_mtibat6u(0);
62 		ppc_mtibat7u(0);
63 		ppc_mtdbat4u(0);
64 		ppc_mtdbat5u(0);
65 		ppc_mtdbat6u(0);
66 		ppc_mtdbat7u(0);
67 		/* FALLTHROUGH */
68 	default:
69 		ppc_mtibat0u(0);
70 		ppc_mtibat1u(0);
71 		ppc_mtibat2u(0);
72 		ppc_mtibat3u(0);
73 		ppc_mtdbat0u(0);
74 		ppc_mtdbat1u(0);
75 		ppc_mtdbat2u(0);
76 		ppc_mtdbat3u(0);
77 
78 		for (p = &nop32_start; p->s; p++) {
79 			for (inst = p->s; inst < p->e; inst++)
80 				*inst = nop_inst;
81 			syncicache(p->s, (p->e - p->s) * sizeof(*p->e));
82 		}
83 	}
84 
85 	if (ppc_nobat) {
86 		for (p = &nopbat_start; p->s; p++) {
87 			for (inst = p->s; inst < p->e; inst++)
88 				*inst = nop_inst;
89 			syncicache(p->s, (p->e - p->s) * sizeof(*p->e));
90 		}
91 	}
92 }
93 
94 void
ppc_mtscomc(u_int32_t val)95 ppc_mtscomc(u_int32_t val)
96 {
97 	int s;
98 
99 	s = ppc_intr_disable();
100 	__asm volatile ("mtspr 276,%0; isync" :: "r" (val));
101 	ppc_intr_enable(s);
102 }
103 
104 void
ppc_mtscomd(u_int32_t val)105 ppc_mtscomd(u_int32_t val)
106 {
107 	int s;
108 
109 	s = ppc_intr_disable();
110 	__asm volatile ("mtspr 277,%0; isync" :: "r" (val));
111 	ppc_intr_enable(s);
112 }
113 
114 u_int64_t
ppc64_mfscomc(void)115 ppc64_mfscomc(void)
116 {
117 	u_int64_t ret;
118 	int s;
119 
120 	s = ppc_intr_disable();
121 	__asm volatile ("mfspr %0,276;"
122 	    " mr %0+1, %0; srdi %0,%0,32" : "=r" (ret));
123 	ppc_intr_enable(s);
124 	return ret;
125 }
126 
127 void
ppc64_mtscomc(u_int64_t val)128 ppc64_mtscomc(u_int64_t val)
129 {
130 	int s;
131 
132 	s = ppc_intr_disable();
133 	__asm volatile ("sldi %0,%0,32; or %0,%0,%0+1;"
134 	    " mtspr 276,%0; isync" :: "r" (val));
135 	ppc_intr_enable(s);
136 }
137 
138 u_int64_t
ppc64_mfscomd(void)139 ppc64_mfscomd(void)
140 {
141 	u_int64_t ret;
142 	int s;
143 
144 	s = ppc_intr_disable();
145 	__asm volatile ("mfspr %0,277;"
146             " mr %0+1, %0; srdi %0,%0,32" : "=r" (ret));
147 	ppc_intr_enable(s);
148 	return ret;
149 }
150 
151 static __inline u_int32_t
ppc64_mfhid0(u_int32_t * lo)152 ppc64_mfhid0(u_int32_t *lo)
153 {
154 	u_int32_t hid0_hi, hid0_lo;
155 
156 	__asm volatile ("mfspr %0,1008;"
157 	    " mr %1, %0; srdi %0,%0,32;" : "=r" (hid0_hi), "=r" (hid0_lo));
158 	if (lo != NULL)
159 		*lo = hid0_lo;
160 	return hid0_hi;
161 }
162 
163 static __inline void
ppc64_mthid0(u_int32_t hid0_hi,u_int32_t hid0_lo)164 ppc64_mthid0(u_int32_t hid0_hi, u_int32_t hid0_lo)
165 {
166 	/*
167 	 * No! It's not a joke (:
168 	 *
169 	 * Note 1 of the Table 2-3 from the 970MP User manual.
170 	 */
171 	__asm volatile ("sldi %0,%0,32; or %0,%0,%1;"
172 	    "sync; mtspr 1008,%0;"
173 	    "mfspr %0,1008; mfspr %0,1008; mfspr %0,1008;"
174 	    "mfspr %0,1008; mfspr %0,1008; mfspr %0,1008;"
175 	    "isync" :: "r" (hid0_hi), "r"(hid0_lo));
176 }
177 
178 u_int32_t
ppc_mfhid0(void)179 ppc_mfhid0(void)
180 {
181 	u_int32_t ret;
182 
183 	/* Since the lower 32 bits are reserved, do not expose them. */
184 	if (ppc_proc_is_64b)
185 		return ppc64_mfhid0(NULL);
186 
187 	__asm volatile ("mfspr %0,1008" : "=r" (ret));
188 	return ret;
189 }
190 
191 void
ppc_mthid0(u_int32_t val)192 ppc_mthid0(u_int32_t val)
193 {
194 	if (ppc_proc_is_64b) {
195 		u_int32_t lo;
196 
197 		/* Don't write any garbage in the lower 32 bits. */
198 		(void)ppc64_mfhid0(&lo);
199 		return ppc64_mthid0(val, lo);
200 	}
201 
202 	__asm volatile ("mtspr 1008,%0; isync" :: "r" (val));
203 }
204 
205 u_int64_t
ppc64_mfhid1(void)206 ppc64_mfhid1(void)
207 {
208 	u_int64_t ret;
209 
210 	__asm volatile ("mfspr %0,1009;"
211             " mr %0+1, %0; srdi %0,%0,32" : "=r" (ret));
212 	return ret;
213 }
214 
215 void
ppc64_mthid1(u_int64_t val)216 ppc64_mthid1(u_int64_t val)
217 {
218 	__asm volatile ("sldi %0,%0,32; or %0,%0,%0+1;"
219 	    "mtspr 1009,%0; mtspr 1009,%0; isync;" :: "r" (val));
220 }
221 
222 u_int64_t
ppc64_mfhid4(void)223 ppc64_mfhid4(void)
224 {
225 	u_int64_t ret;
226 
227 	__asm volatile ("mfspr %0,1012;"
228             " mr %0+1, %0; srdi %0,%0,32" : "=r" (ret));
229 	return ret;
230 }
231 
232 void
ppc64_mthid4(u_int64_t val)233 ppc64_mthid4(u_int64_t val)
234 {
235 	__asm volatile ("sldi %0,%0,32; or %0,%0,%0+1;"
236 	    "sync; mtspr 1012,%0; isync;" :: "r" (val));
237 }
238 
239 u_int64_t
ppc64_mfhid5(void)240 ppc64_mfhid5(void)
241 {
242 	u_int64_t ret;
243 
244 	__asm volatile ("mfspr %0,1014;"
245             " mr %0+1, %0; srdi %0,%0,32" : "=r" (ret));
246 	return ret;
247 }
248 
249 void
ppc64_mthid5(u_int64_t val)250 ppc64_mthid5(u_int64_t val)
251 {
252 	__asm volatile ("sldi %0,%0,32; or %0,%0,%0+1;"
253 	    "sync; mtspr 1014,%0; isync;" :: "r" (val));
254 }
255