1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include <errno.h>
6 #include <signal.h>
7 #include <unistd.h>
8
9 #if defined(__i386__) || defined(__x86_64__)
10 #include <cpuid.h>
11 #endif
12
13 #ifdef __linux__
14 #include <syscall.h>
15 #endif
16
17 #include "config.h"
18
19 #include "runtime.h"
20 #include "arch.h"
21 #include "array.h"
22
23 int32
runtime_atoi(const byte * p,intgo len)24 runtime_atoi(const byte *p, intgo len)
25 {
26 int32 n;
27
28 n = 0;
29 while(len > 0 && '0' <= *p && *p <= '9') {
30 n = n*10 + *p++ - '0';
31 len--;
32 }
33 return n;
34 }
35
36 #if defined(__i386__) || defined(__x86_64__) || defined (__s390__) || defined (__s390x__)
37
38 // When cputicks is just asm instructions, skip the split stack
39 // prologue for speed.
40
41 int64 runtime_cputicks(void) __attribute__((no_split_stack));
42
43 #endif
44
45 // Whether the processor supports SSE2.
46 #if defined (__i386__)
47 static _Bool hasSSE2;
48
49 // Force appropriate CPU level so that we can call the lfence/mfence
50 // builtins.
51
52 #pragma GCC push_options
53 #pragma GCC target("sse2")
54
55 #elif defined(__x86_64__)
56 #define hasSSE2 true
57 #endif
58
59 #if defined(__i386__) || defined(__x86_64__)
60 // Whether to use lfence, as opposed to mfence.
61 // Set based on cpuid.
62 static _Bool lfenceBeforeRdtsc;
63 #endif // defined(__i386__) || defined(__x86_64__)
64
65 int64
runtime_cputicks(void)66 runtime_cputicks(void)
67 {
68 #if defined(__i386__) || defined(__x86_64__)
69 if (hasSSE2) {
70 if (lfenceBeforeRdtsc) {
71 __builtin_ia32_lfence();
72 } else {
73 __builtin_ia32_mfence();
74 }
75 }
76 return __builtin_ia32_rdtsc();
77 #elif defined (__s390__) || defined (__s390x__)
78 uint64 clock = 0;
79 /* stckf may not write the return variable in case of a clock error, so make
80 it read-write to prevent that the initialisation is optimised out.
81 Note: Targets below z9-109 will crash when executing store clock fast, i.e.
82 we don't support Go for machines older than that. */
83 asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock) : : "cc" );
84 return (int64)clock;
85 #else
86 // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
87 // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
88 // TODO: need more entropy to better seed fastrand.
89 return runtime_nanotime();
90 #endif
91 }
92
93 #if defined(__i386__)
94 #pragma GCC pop_options
95 #endif
96
97 void
runtime_signalstack(byte * p,uintptr n)98 runtime_signalstack(byte *p, uintptr n)
99 {
100 stack_t st;
101
102 st.ss_sp = p;
103 st.ss_size = n;
104 st.ss_flags = 0;
105 if(p == nil)
106 st.ss_flags = SS_DISABLE;
107 if(sigaltstack(&st, nil) < 0)
108 *(int *)0xf1 = 0xf1;
109 }
110
111 int32 go_open(char *, int32, int32)
112 __asm__ (GOSYM_PREFIX "runtime.open");
113
114 int32
go_open(char * name,int32 mode,int32 perm)115 go_open(char *name, int32 mode, int32 perm)
116 {
117 return runtime_open(name, mode, perm);
118 }
119
120 int32 go_read(int32, void *, int32)
121 __asm__ (GOSYM_PREFIX "runtime.read");
122
123 int32
go_read(int32 fd,void * p,int32 n)124 go_read(int32 fd, void *p, int32 n)
125 {
126 return runtime_read(fd, p, n);
127 }
128
129 int32 go_write(uintptr, void *, int32)
130 __asm__ (GOSYM_PREFIX "runtime.write");
131
132 int32
go_write(uintptr fd,void * p,int32 n)133 go_write(uintptr fd, void *p, int32 n)
134 {
135 return runtime_write(fd, p, n);
136 }
137
138 int32 go_closefd(int32)
139 __asm__ (GOSYM_PREFIX "runtime.closefd");
140
141 int32
go_closefd(int32 fd)142 go_closefd(int32 fd)
143 {
144 return runtime_close(fd);
145 }
146
147 intgo go_errno(void)
148 __asm__ (GOSYM_PREFIX "runtime.errno");
149
150 intgo
go_errno()151 go_errno()
152 {
153 return (intgo)errno;
154 }
155
156 uintptr getEnd(void)
157 __asm__ (GOSYM_PREFIX "runtime.getEnd");
158
159 uintptr
getEnd()160 getEnd()
161 {
162 #ifdef _AIX
163 // mmap adresses range start at 0x30000000 on AIX for 32 bits processes
164 uintptr end = 0x30000000U;
165 #else
166 uintptr end = 0;
167 uintptr *pend;
168
169 pend = &__go_end;
170 if (pend != nil) {
171 end = *pend;
172 }
173 #endif
174
175 return end;
176 }
177
178 // CPU-specific initialization.
179 // Fetch CPUID info on x86.
180
181 void
runtime_cpuinit()182 runtime_cpuinit()
183 {
184 #if defined(__i386__) || defined(__x86_64__)
185 unsigned int eax, ebx, ecx, edx;
186
187 if (__get_cpuid(0, &eax, &ebx, &ecx, &edx)) {
188 if (eax != 0
189 && ebx == 0x756E6547 // "Genu"
190 && edx == 0x49656E69 // "ineI"
191 && ecx == 0x6C65746E) { // "ntel"
192 lfenceBeforeRdtsc = true;
193 }
194 }
195 if (__get_cpuid(1, &eax, &ebx, &ecx, &edx)) {
196 setCpuidECX(ecx);
197 #if defined(__i386__)
198 if ((edx & bit_SSE2) != 0) {
199 hasSSE2 = true;
200 }
201 #endif
202 }
203
204 #if defined(HAVE_AS_X86_AES)
205 setSupportAES(true);
206 #endif
207 #endif
208 }
209
210 // A publication barrier: a store/store barrier.
211
212 void publicationBarrier(void)
213 __asm__ (GOSYM_PREFIX "runtime.publicationBarrier");
214
215 void
publicationBarrier()216 publicationBarrier()
217 {
218 __atomic_thread_fence(__ATOMIC_RELEASE);
219 }
220
221 #ifdef __linux__
222
223 /* Currently sbrk0 is only called on GNU/Linux. */
224
225 uintptr sbrk0(void)
226 __asm__ (GOSYM_PREFIX "runtime.sbrk0");
227
228 uintptr
sbrk0()229 sbrk0()
230 {
231 return syscall(SYS_brk, (uintptr)(0));
232 }
233
234 #endif /* __linux__ */
235