1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4 
5 #include <errno.h>
6 #include <signal.h>
7 #include <unistd.h>
8 
9 #if defined(__i386__) || defined(__x86_64__)
10 #include <cpuid.h>
11 #endif
12 
13 #ifdef __linux__
14 #include <syscall.h>
15 #endif
16 
17 #include "config.h"
18 
19 #include "runtime.h"
20 #include "arch.h"
21 #include "array.h"
22 
23 int32
runtime_atoi(const byte * p,intgo len)24 runtime_atoi(const byte *p, intgo len)
25 {
26 	int32 n;
27 
28 	n = 0;
29 	while(len > 0 && '0' <= *p && *p <= '9') {
30 		n = n*10 + *p++ - '0';
31 		len--;
32 	}
33 	return n;
34 }
35 
36 // A random number from the GNU/Linux auxv array.
37 static uint32 randomNumber;
38 
39 // Set the random number from Go code.
40 
41 void
setRandomNumber(uint32 r)42 setRandomNumber(uint32 r)
43 {
44 	randomNumber = r;
45 }
46 
47 #if defined(__i386__) || defined(__x86_64__) || defined (__s390__) || defined (__s390x__)
48 
49 // When cputicks is just asm instructions, skip the split stack
50 // prologue for speed.
51 
52 int64 runtime_cputicks(void) __attribute__((no_split_stack));
53 
54 #endif
55 
56 // Whether the processor supports SSE2.
57 #if defined (__i386__)
58 static _Bool hasSSE2;
59 
60 // Force appropriate CPU level so that we can call the lfence/mfence
61 // builtins.
62 
63 #pragma GCC push_options
64 #pragma GCC target("sse2")
65 
66 #elif defined(__x86_64__)
67 #define hasSSE2 true
68 #endif
69 
70 #if defined(__i386__) || defined(__x86_64__)
71 // Whether to use lfence, as opposed to mfence.
72 // Set based on cpuid.
73 static _Bool lfenceBeforeRdtsc;
74 #endif // defined(__i386__) || defined(__x86_64__)
75 
76 int64
runtime_cputicks(void)77 runtime_cputicks(void)
78 {
79 #if defined(__i386__) || defined(__x86_64__)
80   if (hasSSE2) {
81     if (lfenceBeforeRdtsc) {
82       __builtin_ia32_lfence();
83     } else {
84       __builtin_ia32_mfence();
85     }
86   }
87   return __builtin_ia32_rdtsc();
88 #elif defined (__s390__) || defined (__s390x__)
89   uint64 clock = 0;
90   /* stckf may not write the return variable in case of a clock error, so make
91      it read-write to prevent that the initialisation is optimised out.
92      Note: Targets below z9-109 will crash when executing store clock fast, i.e.
93      we don't support Go for machines older than that.  */
94   asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock) : : "cc" );
95   return (int64)clock;
96 #else
97   // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
98   // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
99   // randomNumber provides better seeding of fastrand.
100   return runtime_nanotime() + randomNumber;
101 #endif
102 }
103 
104 #if defined(__i386__)
105 #pragma GCC pop_options
106 #endif
107 
108 void
runtime_signalstack(byte * p,uintptr n)109 runtime_signalstack(byte *p, uintptr n)
110 {
111 	stack_t st;
112 
113 	st.ss_sp = p;
114 	st.ss_size = n;
115 	st.ss_flags = 0;
116 	if(p == nil)
117 		st.ss_flags = SS_DISABLE;
118 	if(sigaltstack(&st, nil) < 0)
119 		*(int *)0xf1 = 0xf1;
120 }
121 
122 int32 go_open(char *, int32, int32)
123   __asm__ (GOSYM_PREFIX "runtime.open");
124 
125 int32
go_open(char * name,int32 mode,int32 perm)126 go_open(char *name, int32 mode, int32 perm)
127 {
128   return runtime_open(name, mode, perm);
129 }
130 
131 int32 go_read(int32, void *, int32)
132   __asm__ (GOSYM_PREFIX "runtime.read");
133 
134 int32
go_read(int32 fd,void * p,int32 n)135 go_read(int32 fd, void *p, int32 n)
136 {
137   return runtime_read(fd, p, n);
138 }
139 
140 int32 go_write(uintptr, void *, int32)
141   __asm__ (GOSYM_PREFIX "runtime.write");
142 
143 int32
go_write(uintptr fd,void * p,int32 n)144 go_write(uintptr fd, void *p, int32 n)
145 {
146   return runtime_write(fd, p, n);
147 }
148 
149 int32 go_closefd(int32)
150   __asm__ (GOSYM_PREFIX "runtime.closefd");
151 
152 int32
go_closefd(int32 fd)153 go_closefd(int32 fd)
154 {
155   return runtime_close(fd);
156 }
157 
158 intgo go_errno(void)
159   __asm__ (GOSYM_PREFIX "runtime.errno");
160 
161 intgo
go_errno()162 go_errno()
163 {
164   return (intgo)errno;
165 }
166 
167 uintptr getEnd(void)
168   __asm__ (GOSYM_PREFIX "runtime.getEnd");
169 
170 uintptr
getEnd()171 getEnd()
172 {
173 #ifdef _AIX
174   // mmap adresses range start at 0x30000000 on AIX for 32 bits processes
175   uintptr end = 0x30000000U;
176 #else
177   uintptr end = 0;
178   uintptr *pend;
179 
180   pend = &__go_end;
181   if (pend != nil) {
182     end = *pend;
183   }
184 #endif
185 
186   return end;
187 }
188 
189 // CPU-specific initialization.
190 // Fetch CPUID info on x86.
191 
192 void
runtime_cpuinit()193 runtime_cpuinit()
194 {
195 #if defined(__i386__) || defined(__x86_64__)
196 	unsigned int eax, ebx, ecx, edx;
197 
198 	if (__get_cpuid(0, &eax, &ebx, &ecx, &edx)) {
199 		if (eax != 0
200 		    && ebx == 0x756E6547    // "Genu"
201 		    && edx == 0x49656E69    // "ineI"
202 		    && ecx == 0x6C65746E) { // "ntel"
203 			lfenceBeforeRdtsc = true;
204 		}
205 	}
206 	if (__get_cpuid(1, &eax, &ebx, &ecx, &edx)) {
207 #if defined(__i386__)
208 		if ((edx & bit_SSE2) != 0) {
209 			hasSSE2 = true;
210 		}
211 #endif
212 	}
213 
214 #if defined(HAVE_AS_X86_AES)
215 	setSupportAES(true);
216 #endif
217 #endif
218 }
219 
220 // A publication barrier: a store/store barrier.
221 
222 void publicationBarrier(void)
223   __asm__ (GOSYM_PREFIX "runtime.publicationBarrier");
224 
225 void
publicationBarrier()226 publicationBarrier()
227 {
228   __atomic_thread_fence(__ATOMIC_RELEASE);
229 }
230 
231 #ifdef __linux__
232 
233 /* Currently sbrk0 is only called on GNU/Linux.  */
234 
235 uintptr sbrk0(void)
236   __asm__ (GOSYM_PREFIX "runtime.sbrk0");
237 
238 uintptr
sbrk0()239 sbrk0()
240 {
241   return syscall(SYS_brk, (uintptr)(0));
242 }
243 
244 #endif /* __linux__ */
245