1 /*
2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3  * Copyright 2012, 2013 SAP AG. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #ifndef CPU_PPC_VM_BYTES_PPC_HPP
27 #define CPU_PPC_VM_BYTES_PPC_HPP
28 
29 #include "memory/allocation.hpp"
30 
31 class Bytes: AllStatic {
32  public:
33   // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
34   // PowerPC needs to check for alignment.
35 
36   // Can I count on address always being a pointer to an unsigned char? Yes.
37 
38 #if defined(VM_LITTLE_ENDIAN)
39 
40   // Returns true, if the byte ordering used by Java is different from the native byte ordering
41   // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
is_Java_byte_ordering_different()42   static inline bool is_Java_byte_ordering_different() { return true; }
43 
44   // Forward declarations of the compiler-dependent implementation
45   static inline u2 swap_u2(u2 x);
46   static inline u4 swap_u4(u4 x);
47   static inline u8 swap_u8(u8 x);
48 
get_native_u2(address p)49   static inline u2   get_native_u2(address p) {
50     return (intptr_t(p) & 1) == 0
51              ?   *(u2*)p
52              :   ( u2(p[1]) << 8 )
53                | ( u2(p[0])      );
54   }
55 
get_native_u4(address p)56   static inline u4   get_native_u4(address p) {
57     switch (intptr_t(p) & 3) {
58      case 0:  return *(u4*)p;
59 
60      case 2:  return (  u4( ((u2*)p)[1] ) << 16  )
61                    | (  u4( ((u2*)p)[0] )        );
62 
63     default:  return ( u4(p[3]) << 24 )
64                    | ( u4(p[2]) << 16 )
65                    | ( u4(p[1]) <<  8 )
66                    |   u4(p[0]);
67     }
68   }
69 
get_native_u8(address p)70   static inline u8   get_native_u8(address p) {
71     switch (intptr_t(p) & 7) {
72       case 0:  return *(u8*)p;
73 
74       case 4:  return (  u8( ((u4*)p)[1] ) << 32  )
75                     | (  u8( ((u4*)p)[0] )        );
76 
77       case 2:  return (  u8( ((u2*)p)[3] ) << 48  )
78                     | (  u8( ((u2*)p)[2] ) << 32  )
79                     | (  u8( ((u2*)p)[1] ) << 16  )
80                     | (  u8( ((u2*)p)[0] )        );
81 
82      default:  return ( u8(p[7]) << 56 )
83                     | ( u8(p[6]) << 48 )
84                     | ( u8(p[5]) << 40 )
85                     | ( u8(p[4]) << 32 )
86                     | ( u8(p[3]) << 24 )
87                     | ( u8(p[2]) << 16 )
88                     | ( u8(p[1]) <<  8 )
89                     |   u8(p[0]);
90     }
91   }
92 
93 
94 
put_native_u2(address p,u2 x)95   static inline void put_native_u2(address p, u2 x) {
96     if ( (intptr_t(p) & 1) == 0 )  *(u2*)p = x;
97     else {
98       p[1] = x >> 8;
99       p[0] = x;
100     }
101   }
102 
put_native_u4(address p,u4 x)103   static inline void put_native_u4(address p, u4 x) {
104     switch ( intptr_t(p) & 3 ) {
105     case 0:  *(u4*)p = x;
106               break;
107 
108     case 2:  ((u2*)p)[1] = x >> 16;
109              ((u2*)p)[0] = x;
110              break;
111 
112     default: ((u1*)p)[3] = x >> 24;
113              ((u1*)p)[2] = x >> 16;
114              ((u1*)p)[1] = x >>  8;
115              ((u1*)p)[0] = x;
116              break;
117     }
118   }
119 
put_native_u8(address p,u8 x)120   static inline void put_native_u8(address p, u8 x) {
121     switch ( intptr_t(p) & 7 ) {
122     case 0:  *(u8*)p = x;
123              break;
124 
125     case 4:  ((u4*)p)[1] = x >> 32;
126              ((u4*)p)[0] = x;
127              break;
128 
129     case 2:  ((u2*)p)[3] = x >> 48;
130              ((u2*)p)[2] = x >> 32;
131              ((u2*)p)[1] = x >> 16;
132              ((u2*)p)[0] = x;
133              break;
134 
135     default: ((u1*)p)[7] = x >> 56;
136              ((u1*)p)[6] = x >> 48;
137              ((u1*)p)[5] = x >> 40;
138              ((u1*)p)[4] = x >> 32;
139              ((u1*)p)[3] = x >> 24;
140              ((u1*)p)[2] = x >> 16;
141              ((u1*)p)[1] = x >>  8;
142              ((u1*)p)[0] = x;
143     }
144   }
145 
146   // Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
147   // (no byte-order reversal is needed since Power CPUs are big-endian oriented).
get_Java_u2(address p)148   static inline u2   get_Java_u2(address p) { return swap_u2(get_native_u2(p)); }
get_Java_u4(address p)149   static inline u4   get_Java_u4(address p) { return swap_u4(get_native_u4(p)); }
get_Java_u8(address p)150   static inline u8   get_Java_u8(address p) { return swap_u8(get_native_u8(p)); }
151 
put_Java_u2(address p,u2 x)152   static inline void put_Java_u2(address p, u2 x)     { put_native_u2(p, swap_u2(x)); }
put_Java_u4(address p,u4 x)153   static inline void put_Java_u4(address p, u4 x)     { put_native_u4(p, swap_u4(x)); }
put_Java_u8(address p,u8 x)154   static inline void put_Java_u8(address p, u8 x)     { put_native_u8(p, swap_u8(x)); }
155 
156 #else // !defined(VM_LITTLE_ENDIAN)
157 
158   // Returns true, if the byte ordering used by Java is different from the nativ byte ordering
159   // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
160   static inline bool is_Java_byte_ordering_different() { return false; }
161 
162   // Thus, a swap between native and Java ordering is always a no-op:
163   static inline u2   swap_u2(u2 x)  { return x; }
164   static inline u4   swap_u4(u4 x)  { return x; }
165   static inline u8   swap_u8(u8 x)  { return x; }
166 
167   static inline u2   get_native_u2(address p) {
168     return (intptr_t(p) & 1) == 0
169              ?   *(u2*)p
170              :   ( u2(p[0]) << 8 )
171                | ( u2(p[1])      );
172   }
173 
174   static inline u4   get_native_u4(address p) {
175     switch (intptr_t(p) & 3) {
176      case 0:  return *(u4*)p;
177 
178      case 2:  return (  u4( ((u2*)p)[0] ) << 16  )
179                    | (  u4( ((u2*)p)[1] )        );
180 
181     default:  return ( u4(p[0]) << 24 )
182                    | ( u4(p[1]) << 16 )
183                    | ( u4(p[2]) <<  8 )
184                    |   u4(p[3]);
185     }
186   }
187 
188   static inline u8   get_native_u8(address p) {
189     switch (intptr_t(p) & 7) {
190       case 0:  return *(u8*)p;
191 
192       case 4:  return (  u8( ((u4*)p)[0] ) << 32  )
193                     | (  u8( ((u4*)p)[1] )        );
194 
195       case 2:  return (  u8( ((u2*)p)[0] ) << 48  )
196                     | (  u8( ((u2*)p)[1] ) << 32  )
197                     | (  u8( ((u2*)p)[2] ) << 16  )
198                     | (  u8( ((u2*)p)[3] )        );
199 
200      default:  return ( u8(p[0]) << 56 )
201                     | ( u8(p[1]) << 48 )
202                     | ( u8(p[2]) << 40 )
203                     | ( u8(p[3]) << 32 )
204                     | ( u8(p[4]) << 24 )
205                     | ( u8(p[5]) << 16 )
206                     | ( u8(p[6]) <<  8 )
207                     |   u8(p[7]);
208     }
209   }
210 
211 
212 
213   static inline void put_native_u2(address p, u2 x) {
214     if ( (intptr_t(p) & 1) == 0 ) { *(u2*)p = x; }
215     else {
216       p[0] = x >> 8;
217       p[1] = x;
218     }
219   }
220 
221   static inline void put_native_u4(address p, u4 x) {
222     switch ( intptr_t(p) & 3 ) {
223     case 0:  *(u4*)p = x;
224               break;
225 
226     case 2:  ((u2*)p)[0] = x >> 16;
227              ((u2*)p)[1] = x;
228              break;
229 
230     default: ((u1*)p)[0] = x >> 24;
231              ((u1*)p)[1] = x >> 16;
232              ((u1*)p)[2] = x >>  8;
233              ((u1*)p)[3] = x;
234              break;
235     }
236   }
237 
238   static inline void put_native_u8(address p, u8 x) {
239     switch ( intptr_t(p) & 7 ) {
240     case 0:  *(u8*)p = x;
241              break;
242 
243     case 4:  ((u4*)p)[0] = x >> 32;
244              ((u4*)p)[1] = x;
245              break;
246 
247     case 2:  ((u2*)p)[0] = x >> 48;
248              ((u2*)p)[1] = x >> 32;
249              ((u2*)p)[2] = x >> 16;
250              ((u2*)p)[3] = x;
251              break;
252 
253     default: ((u1*)p)[0] = x >> 56;
254              ((u1*)p)[1] = x >> 48;
255              ((u1*)p)[2] = x >> 40;
256              ((u1*)p)[3] = x >> 32;
257              ((u1*)p)[4] = x >> 24;
258              ((u1*)p)[5] = x >> 16;
259              ((u1*)p)[6] = x >>  8;
260              ((u1*)p)[7] = x;
261     }
262   }
263 
264   // Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
265   // (no byte-order reversal is needed since Power CPUs are big-endian oriented).
266   static inline u2   get_Java_u2(address p) { return get_native_u2(p); }
267   static inline u4   get_Java_u4(address p) { return get_native_u4(p); }
268   static inline u8   get_Java_u8(address p) { return get_native_u8(p); }
269 
270   static inline void put_Java_u2(address p, u2 x)     { put_native_u2(p, x); }
271   static inline void put_Java_u4(address p, u4 x)     { put_native_u4(p, x); }
272   static inline void put_Java_u8(address p, u8 x)     { put_native_u8(p, x); }
273 
274 #endif // VM_LITTLE_ENDIAN
275 };
276 
277 #if defined(TARGET_OS_ARCH_linux_ppc)
278 #include "bytes_linux_ppc.inline.hpp"
279 #endif
280 #if defined(TARGET_OS_ARCH_bsd_ppc)
281 #include "bytes_bsd_ppc.inline.hpp"
282 #endif
283 
284 
285 #endif // CPU_PPC_VM_BYTES_PPC_HPP
286