1 #ifndef __PROCESSOR_H__
2 #define __PROCESSOR_H__
3
4 #include <gctypes.h>
5 #include "asm.h"
6
7 #define __stringify(rn) #rn
8 #define ATTRIBUTE_ALIGN(v) __attribute__((aligned(v)))
9 // courtesy of Marcan
10 #define STACK_ALIGN(type, name, cnt, alignment) u8 _al__##name[((sizeof(type)*(cnt)) + (alignment) + (((sizeof(type)*(cnt))%(alignment)) > 0 ? ((alignment) - ((sizeof(type)*(cnt))%(alignment))) : 0))]; \
11 type *name = (type*)(((u32)(_al__##name)) + ((alignment) - (((u32)(_al__##name))&((alignment)-1))))
12
13 #define _sync() asm volatile("sync")
14 #define _nop() asm volatile("nop")
15 #define ppcsync() asm volatile("sc")
16 #define ppchalt() ({ \
17 asm volatile("sync"); \
18 while(1) { \
19 asm volatile("nop"); \
20 asm volatile("li 3,0"); \
21 asm volatile("nop"); \
22 } \
23 })
24
25 #define mfpvr() ({register u32 _rval; \
26 asm volatile("mfpvr %0" : "=r"(_rval)); _rval;})
27
28 #define mfdcr(_rn) ({register u32 _rval; \
29 asm volatile("mfdcr %0," __stringify(_rn) \
30 : "=r" (_rval)); _rval;})
31 #define mtdcr(rn, val) asm volatile("mtdcr " __stringify(rn) ",%0" : : "r" (val))
32
33 #define mfmsr() ({register u32 _rval; \
34 asm volatile("mfmsr %0" : "=r" (_rval)); _rval;})
35 #define mtmsr(val) asm volatile("mtmsr %0" : : "r" (val))
36
37 #define mfdec() ({register u32 _rval; \
38 asm volatile("mfdec %0" : "=r" (_rval)); _rval;})
39 #define mtdec(_val) asm volatile("mtdec %0" : : "r" (_val))
40
41 #define mfspr(_rn) \
42 ({ register u32 _rval = 0; \
43 asm volatile("mfspr %0," __stringify(_rn) \
44 : "=r" (_rval));\
45 _rval; \
46 })
47
48 #define mtspr(_rn, _val) asm volatile("mtspr " __stringify(_rn) ",%0" : : "r" (_val))
49
50 #define mfwpar() mfspr(WPAR)
51 #define mtwpar(_val) mtspr(WPAR,_val)
52
53 #define mfmmcr0() mfspr(MMCR0)
54 #define mtmmcr0(_val) mtspr(MMCR0,_val)
55 #define mfmmcr1() mfspr(MMCR1)
56 #define mtmmcr1(_val) mtspr(MMCR1,_val)
57
58 #define mfpmc1() mfspr(PMC1)
59 #define mtpmc1(_val) mtspr(PMC1,_val)
60 #define mfpmc2() mfspr(PMC2)
61 #define mtpmc2(_val) mtspr(PMC2,_val)
62 #define mfpmc3() mfspr(PMC3)
63 #define mtpmc3(_val) mtspr(PMC3,_val)
64 #define mfpmc4() mfspr(PMC4)
65 #define mtpmc4(_val) mtspr(PMC4,_val)
66
67 #define mfhid0() mfspr(HID0)
68 #define mthid0(_val) mtspr(HID0,_val)
69 #define mfhid1() mfspr(HID1)
70 #define mthid1(_val) mtspr(HID1,_val)
71 #define mfhid2() mfspr(HID2)
72 #define mthid2(_val) mtspr(HID2,_val)
73 #define mfhid4() mfspr(HID4)
74 #define mthid4(_val) mtspr(HID4,_val)
75
76 #define __lhbrx(base,index) \
77 ({ register u16 res; \
78 __asm__ volatile ("lhbrx %0,%1,%2" : "=r"(res) : "b%"(index), "r"(base) : "memory"); \
79 res; })
80
81 #define __lwbrx(base,index) \
82 ({ register u32 res; \
83 __asm__ volatile ("lwbrx %0,%1,%2" : "=r"(res) : "b%"(index), "r"(base) : "memory"); \
84 res; })
85
86 #define __sthbrx(base,index,value) \
87 __asm__ volatile ("sthbrx %0,%1,%2" : : "r"(value), "b%"(index), "r"(base) : "memory")
88
89 #define __stwbrx(base,index,value) \
90 __asm__ volatile ("stwbrx %0,%1,%2" : : "r"(value), "b%"(index), "r"(base) : "memory")
91
92 #define cntlzw(_val) ({register u32 _rval; \
93 asm volatile("cntlzw %0, %1" : "=r"((_rval)) : "r"((_val))); _rval;})
94
95 #define _CPU_MSR_GET( _msr_value ) \
96 do { \
97 _msr_value = 0; \
98 asm volatile ("mfmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \
99 } while (0)
100
101 #define _CPU_MSR_SET( _msr_value ) \
102 { asm volatile ("mtmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); }
103
104 #define _CPU_ISR_Enable() \
105 { register u32 _val = 0; \
106 __asm__ __volatile__ ( \
107 "mfmsr %0\n" \
108 "ori %0,%0,0x8000\n" \
109 "mtmsr %0" \
110 : "=&r" ((_val)) : "0" ((_val)) \
111 ); \
112 }
113
114 #define _CPU_ISR_Disable( _isr_cookie ) \
115 { register u32 _disable_mask = 0; \
116 _isr_cookie = 0; \
117 __asm__ __volatile__ ( \
118 "mfmsr %0\n" \
119 "rlwinm %1,%0,0,17,15\n" \
120 "mtmsr %1\n" \
121 "extrwi %0,%0,1,16" \
122 : "=&r" ((_isr_cookie)), "=&r" ((_disable_mask)) \
123 : "0" ((_isr_cookie)), "1" ((_disable_mask)) \
124 ); \
125 }
126
127 #define _CPU_ISR_Restore( _isr_cookie ) \
128 { register u32 _enable_mask = 0; \
129 __asm__ __volatile__ ( \
130 " cmpwi %0,0\n" \
131 " beq 1f\n" \
132 " mfmsr %1\n" \
133 " ori %1,%1,0x8000\n" \
134 " mtmsr %1\n" \
135 "1:" \
136 : "=r"((_isr_cookie)),"=&r" ((_enable_mask)) \
137 : "0"((_isr_cookie)),"1" ((_enable_mask)) \
138 ); \
139 }
140
141 #define _CPU_ISR_Flash( _isr_cookie ) \
142 { register u32 _flash_mask = 0; \
143 __asm__ __volatile__ ( \
144 " cmpwi %0,0\n" \
145 " beq 1f\n" \
146 " mfmsr %1\n" \
147 " ori %1,%1,0x8000\n" \
148 " mtmsr %1\n" \
149 " rlwinm %1,%1,0,17,15\n" \
150 " mtmsr %1\n" \
151 "1:" \
152 : "=r" ((_isr_cookie)), "=&r" ((_flash_mask)) \
153 : "0" ((_isr_cookie)), "1" ((_flash_mask)) \
154 ); \
155 }
156
157 #define _CPU_FPR_Enable() \
158 { register u32 _val = 0; \
159 asm volatile ("mfmsr %0; ori %0,%0,0x2000; mtmsr %0" : \
160 "=&r" (_val) : "0" (_val));\
161 }
162
163 #define _CPU_FPR_Disable() \
164 { register u32 _val = 0; \
165 asm volatile ("mfmsr %0; rlwinm %0,%0,0,19,17; mtmsr %0" : \
166 "=&r" (_val) : "0" (_val));\
167 }
168
169 #ifdef __cplusplus
170 extern "C" {
171 #endif /* __cplusplus */
172
bswap16(u16 val)173 static inline u16 bswap16(u16 val)
174 {
175 u16 tmp = val;
176 return __lhbrx(&tmp,0);
177 }
178
bswap32(u32 val)179 static inline u32 bswap32(u32 val)
180 {
181 u32 tmp = val;
182 return __lwbrx(&tmp,0);
183 }
184
bswap64(u64 val)185 static inline u64 bswap64(u64 val)
186 {
187 union ullc {
188 u64 ull;
189 u32 ul[2];
190 } outv;
191 u64 tmp = val;
192
193 outv.ul[0] = __lwbrx(&tmp,4);
194 outv.ul[1] = __lwbrx(&tmp,0);
195
196 return outv.ull;
197 }
198
199 // Basic I/O
200
read32(u32 addr)201 static inline u32 read32(u32 addr)
202 {
203 u32 x;
204 asm volatile("lwz %0,0(%1) ; sync" : "=r"(x) : "b"(0xc0000000 | addr));
205 return x;
206 }
207
write32(u32 addr,u32 x)208 static inline void write32(u32 addr, u32 x)
209 {
210 asm("stw %0,0(%1) ; eieio" : : "r"(x), "b"(0xc0000000 | addr));
211 }
212
mask32(u32 addr,u32 clear,u32 set)213 static inline void mask32(u32 addr, u32 clear, u32 set)
214 {
215 write32(addr, (read32(addr)&(~clear)) | set);
216 }
217
read16(u32 addr)218 static inline u16 read16(u32 addr)
219 {
220 u16 x;
221 asm volatile("lhz %0,0(%1) ; sync" : "=r"(x) : "b"(0xc0000000 | addr));
222 return x;
223 }
224
write16(u32 addr,u16 x)225 static inline void write16(u32 addr, u16 x)
226 {
227 asm("sth %0,0(%1) ; eieio" : : "r"(x), "b"(0xc0000000 | addr));
228 }
229
read8(u32 addr)230 static inline u8 read8(u32 addr)
231 {
232 u8 x;
233 asm volatile("lbz %0,0(%1) ; sync" : "=r"(x) : "b"(0xc0000000 | addr));
234 return x;
235 }
236
write8(u32 addr,u8 x)237 static inline void write8(u32 addr, u8 x)
238 {
239 asm("stb %0,0(%1) ; eieio" : : "r"(x), "b"(0xc0000000 | addr));
240 }
241
writef32(u32 addr,f32 x)242 static inline void writef32(u32 addr, f32 x)
243 {
244 asm("stfs %0,0(%1) ; eieio" : : "f"(x), "b"(0xc0000000 | addr));
245 }
246
247 #ifdef __cplusplus
248 }
249 #endif /* __cplusplus */
250
251 #endif
252