1// Copyright 2010 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Export guts for testing.
6
7package runtime
8
9import (
10	"runtime/internal/atomic"
11	"runtime/internal/sys"
12	"unsafe"
13)
14
15//var Fadd64 = fadd64
16//var Fsub64 = fsub64
17//var Fmul64 = fmul64
18//var Fdiv64 = fdiv64
19//var F64to32 = f64to32
20//var F32to64 = f32to64
21//var Fcmp64 = fcmp64
22//var Fintto64 = fintto64
23//var F64toint = f64toint
24//var Sqrt = sqrt
25
26var Entersyscall = entersyscall
27var Exitsyscall = exitsyscall
28var LockedOSThread = lockedOSThread
29var Xadduintptr = atomic.Xadduintptr
30
31var FuncPC = funcPC
32
33var Fastlog2 = fastlog2
34
35var Atoi = atoi
36var Atoi32 = atoi32
37
38type LFNode struct {
39	Next    uint64
40	Pushcnt uintptr
41}
42
43func LFStackPush(head *uint64, node *LFNode) {
44	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
45}
46
47func LFStackPop(head *uint64) *LFNode {
48	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
49}
50
51func GCMask(x interface{}) (ret []byte) {
52	return nil
53}
54
55func RunSchedLocalQueueTest() {
56	_p_ := new(p)
57	gs := make([]g, len(_p_.runq))
58	for i := 0; i < len(_p_.runq); i++ {
59		if g, _ := runqget(_p_); g != nil {
60			throw("runq is not empty initially")
61		}
62		for j := 0; j < i; j++ {
63			runqput(_p_, &gs[i], false)
64		}
65		for j := 0; j < i; j++ {
66			if g, _ := runqget(_p_); g != &gs[i] {
67				print("bad element at iter ", i, "/", j, "\n")
68				throw("bad element")
69			}
70		}
71		if g, _ := runqget(_p_); g != nil {
72			throw("runq is not empty afterwards")
73		}
74	}
75}
76
77func RunSchedLocalQueueStealTest() {
78	p1 := new(p)
79	p2 := new(p)
80	gs := make([]g, len(p1.runq))
81	for i := 0; i < len(p1.runq); i++ {
82		for j := 0; j < i; j++ {
83			gs[j].sig = 0
84			runqput(p1, &gs[j], false)
85		}
86		gp := runqsteal(p2, p1, true)
87		s := 0
88		if gp != nil {
89			s++
90			gp.sig++
91		}
92		for {
93			gp, _ = runqget(p2)
94			if gp == nil {
95				break
96			}
97			s++
98			gp.sig++
99		}
100		for {
101			gp, _ = runqget(p1)
102			if gp == nil {
103				break
104			}
105			gp.sig++
106		}
107		for j := 0; j < i; j++ {
108			if gs[j].sig != 1 {
109				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
110				throw("bad element")
111			}
112		}
113		if s != i/2 && s != i/2+1 {
114			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
115			throw("bad steal")
116		}
117	}
118}
119
120func RunSchedLocalQueueEmptyTest(iters int) {
121	// Test that runq is not spuriously reported as empty.
122	// Runq emptiness affects scheduling decisions and spurious emptiness
123	// can lead to underutilization (both runnable Gs and idle Ps coexist
124	// for arbitrary long time).
125	done := make(chan bool, 1)
126	_p_ := new(p)
127	gs := make([]g, 2)
128	ready := new(uint32)
129	for i := 0; i < iters; i++ {
130		*ready = 0
131		next0 := (i & 1) == 0
132		next1 := (i & 2) == 0
133		runqput(_p_, &gs[0], next0)
134		go func(done chan bool, p *p, ready *uint32, next0, next1 bool) {
135			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
136			}
137			if runqempty(p) {
138				println("next:", next0, next1)
139				throw("queue is empty")
140			}
141			done <- true
142		}(done, _p_, ready, next0, next1)
143		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
144		}
145		runqput(_p_, &gs[1], next1)
146		runqget(_p_)
147		<-done
148		runqget(_p_)
149	}
150}
151
152var (
153	StringHash = stringHash
154	BytesHash  = bytesHash
155	Int32Hash  = int32Hash
156	Int64Hash  = int64Hash
157	MemHash    = memhash
158	MemHash32  = memhash32
159	MemHash64  = memhash64
160	EfaceHash  = efaceHash
161	IfaceHash  = ifaceHash
162)
163
164var UseAeshash = &useAeshash
165
166func MemclrBytes(b []byte) {
167	s := (*slice)(unsafe.Pointer(&b))
168	memclrNoHeapPointers(s.array, uintptr(s.len))
169}
170
171var HashLoad = &hashLoad
172
173// entry point for testing
174//func GostringW(w []uint16) (s string) {
175//	s = gostringw(&w[0])
176//	return
177//}
178
179type Uintreg sys.Uintreg
180
181var Open = open
182var Close = closefd
183var Read = read
184var Write = write
185
186func Envs() []string     { return envs }
187func SetEnvs(e []string) { envs = e }
188
189//var BigEndian = sys.BigEndian
190
191// For benchmarking.
192
193func BenchSetType(n int, x interface{}) {
194	e := *efaceOf(&x)
195	t := e._type
196	var size uintptr
197	var p unsafe.Pointer
198	switch t.kind & kindMask {
199	case kindPtr:
200		t = (*ptrtype)(unsafe.Pointer(t)).elem
201		size = t.size
202		p = e.data
203	case kindSlice:
204		slice := *(*struct {
205			ptr      unsafe.Pointer
206			len, cap uintptr
207		})(e.data)
208		t = (*slicetype)(unsafe.Pointer(t)).elem
209		size = t.size * slice.len
210		p = slice.ptr
211	}
212	allocSize := roundupsize(size)
213	systemstack(func() {
214		for i := 0; i < n; i++ {
215			heapBitsSetType(uintptr(p), allocSize, size, t)
216		}
217	})
218}
219
220const PtrSize = sys.PtrSize
221
222var ForceGCPeriod = &forcegcperiod
223
224// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
225// the "environment" traceback level, so later calls to
226// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
227func SetTracebackEnv(level string) {
228	setTraceback(level)
229	traceback_env = traceback_cache
230}
231
232var ReadUnaligned32 = readUnaligned32
233var ReadUnaligned64 = readUnaligned64
234
235func CountPagesInUse() (pagesInUse, counted uintptr) {
236	stopTheWorld("CountPagesInUse")
237
238	pagesInUse = uintptr(mheap_.pagesInUse)
239
240	for _, s := range mheap_.allspans {
241		if s.state == mSpanInUse {
242			counted += s.npages
243		}
244	}
245
246	startTheWorld()
247
248	return
249}
250
251func Fastrand() uint32          { return fastrand() }
252func Fastrandn(n uint32) uint32 { return fastrandn(n) }
253
254type ProfBuf profBuf
255
256func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
257	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
258}
259
260func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
261	(*profBuf)(p).write(tag, now, hdr, stk)
262}
263
264const (
265	ProfBufBlocking    = profBufBlocking
266	ProfBufNonBlocking = profBufNonBlocking
267)
268
269func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
270	return (*profBuf)(p).read(profBufReadMode(mode))
271}
272
273func (p *ProfBuf) Close() {
274	(*profBuf)(p).close()
275}
276
277// ReadMemStatsSlow returns both the runtime-computed MemStats and
278// MemStats accumulated by scanning the heap.
279func ReadMemStatsSlow() (base, slow MemStats) {
280	stopTheWorld("ReadMemStatsSlow")
281
282	// Run on the system stack to avoid stack growth allocation.
283	systemstack(func() {
284		// Make sure stats don't change.
285		getg().m.mallocing++
286
287		readmemstats_m(&base)
288
289		// Initialize slow from base and zero the fields we're
290		// recomputing.
291		slow = base
292		slow.Alloc = 0
293		slow.TotalAlloc = 0
294		slow.Mallocs = 0
295		slow.Frees = 0
296		var bySize [_NumSizeClasses]struct {
297			Mallocs, Frees uint64
298		}
299
300		// Add up current allocations in spans.
301		for _, s := range mheap_.allspans {
302			if s.state != mSpanInUse {
303				continue
304			}
305			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
306				slow.Mallocs++
307				slow.Alloc += uint64(s.elemsize)
308			} else {
309				slow.Mallocs += uint64(s.allocCount)
310				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
311				bySize[sizeclass].Mallocs += uint64(s.allocCount)
312			}
313		}
314
315		// Add in frees. readmemstats_m flushed the cached stats, so
316		// these are up-to-date.
317		var smallFree uint64
318		slow.Frees = mheap_.nlargefree
319		for i := range mheap_.nsmallfree {
320			slow.Frees += mheap_.nsmallfree[i]
321			bySize[i].Frees = mheap_.nsmallfree[i]
322			bySize[i].Mallocs += mheap_.nsmallfree[i]
323			smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
324		}
325		slow.Frees += memstats.tinyallocs
326		slow.Mallocs += slow.Frees
327
328		slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
329
330		for i := range slow.BySize {
331			slow.BySize[i].Mallocs = bySize[i].Mallocs
332			slow.BySize[i].Frees = bySize[i].Frees
333		}
334
335		getg().m.mallocing--
336	})
337
338	startTheWorld()
339	return
340}
341
342// BlockOnSystemStack switches to the system stack, prints "x\n" to
343// stderr, and blocks in a stack containing
344// "runtime.blockOnSystemStackInternal".
345func BlockOnSystemStack() {
346	systemstack(blockOnSystemStackInternal)
347}
348
349func blockOnSystemStackInternal() {
350	print("x\n")
351	lock(&deadlock)
352	lock(&deadlock)
353}
354
355type RWMutex struct {
356	rw rwmutex
357}
358
359func (rw *RWMutex) RLock() {
360	rw.rw.rlock()
361}
362
363func (rw *RWMutex) RUnlock() {
364	rw.rw.runlock()
365}
366
367func (rw *RWMutex) Lock() {
368	rw.rw.lock()
369}
370
371func (rw *RWMutex) Unlock() {
372	rw.rw.unlock()
373}
374
375func MapBucketsCount(m map[int]int) int {
376	h := *(**hmap)(unsafe.Pointer(&m))
377	return 1 << h.B
378}
379
380func MapBucketsPointerIsNil(m map[int]int) bool {
381	h := *(**hmap)(unsafe.Pointer(&m))
382	return h.buckets == nil
383}
384
385func LockOSCounts() (external, internal uint32) {
386	g := getg()
387	if g.m.lockedExt+g.m.lockedInt == 0 {
388		if g.lockedm != 0 {
389			panic("lockedm on non-locked goroutine")
390		}
391	} else {
392		if g.lockedm == 0 {
393			panic("nil lockedm on locked goroutine")
394		}
395	}
396	return g.m.lockedExt, g.m.lockedInt
397}
398