1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Runtime type representation.
6
7package runtime
8
9import "unsafe"
10
11// tflag is documented in reflect/type.go.
12//
13// tflag values must be kept in sync with copies in:
14//	cmd/compile/internal/gc/reflect.go
15//	cmd/link/internal/ld/decodesym.go
16//	reflect/type.go
17//      internal/reflectlite/type.go
18type tflag uint8
19
20const (
21	tflagUncommon      tflag = 1 << 0
22	tflagExtraStar     tflag = 1 << 1
23	tflagNamed         tflag = 1 << 2
24	tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes
25)
26
27// Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize,
28// ../cmd/compile/internal/gc/reflect.go:/^func.dcommontype and
29// ../reflect/type.go:/^type.rtype.
30// ../internal/reflectlite/type.go:/^type.rtype.
31type _type struct {
32	size       uintptr
33	ptrdata    uintptr // size of memory prefix holding all pointers
34	hash       uint32
35	tflag      tflag
36	align      uint8
37	fieldAlign uint8
38	kind       uint8
39	// function for comparing objects of this type
40	// (ptr to object A, ptr to object B) -> ==?
41	equal func(unsafe.Pointer, unsafe.Pointer) bool
42	// gcdata stores the GC type data for the garbage collector.
43	// If the KindGCProg bit is set in kind, gcdata is a GC program.
44	// Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
45	gcdata    *byte
46	str       nameOff
47	ptrToThis typeOff
48}
49
50func (t *_type) string() string {
51	s := t.nameOff(t.str).name()
52	if t.tflag&tflagExtraStar != 0 {
53		return s[1:]
54	}
55	return s
56}
57
58func (t *_type) uncommon() *uncommontype {
59	if t.tflag&tflagUncommon == 0 {
60		return nil
61	}
62	switch t.kind & kindMask {
63	case kindStruct:
64		type u struct {
65			structtype
66			u uncommontype
67		}
68		return &(*u)(unsafe.Pointer(t)).u
69	case kindPtr:
70		type u struct {
71			ptrtype
72			u uncommontype
73		}
74		return &(*u)(unsafe.Pointer(t)).u
75	case kindFunc:
76		type u struct {
77			functype
78			u uncommontype
79		}
80		return &(*u)(unsafe.Pointer(t)).u
81	case kindSlice:
82		type u struct {
83			slicetype
84			u uncommontype
85		}
86		return &(*u)(unsafe.Pointer(t)).u
87	case kindArray:
88		type u struct {
89			arraytype
90			u uncommontype
91		}
92		return &(*u)(unsafe.Pointer(t)).u
93	case kindChan:
94		type u struct {
95			chantype
96			u uncommontype
97		}
98		return &(*u)(unsafe.Pointer(t)).u
99	case kindMap:
100		type u struct {
101			maptype
102			u uncommontype
103		}
104		return &(*u)(unsafe.Pointer(t)).u
105	case kindInterface:
106		type u struct {
107			interfacetype
108			u uncommontype
109		}
110		return &(*u)(unsafe.Pointer(t)).u
111	default:
112		type u struct {
113			_type
114			u uncommontype
115		}
116		return &(*u)(unsafe.Pointer(t)).u
117	}
118}
119
120func (t *_type) name() string {
121	if t.tflag&tflagNamed == 0 {
122		return ""
123	}
124	s := t.string()
125	i := len(s) - 1
126	for i >= 0 && s[i] != '.' {
127		i--
128	}
129	return s[i+1:]
130}
131
132// pkgpath returns the path of the package where t was defined, if
133// available. This is not the same as the reflect package's PkgPath
134// method, in that it returns the package path for struct and interface
135// types, not just named types.
136func (t *_type) pkgpath() string {
137	if u := t.uncommon(); u != nil {
138		return t.nameOff(u.pkgpath).name()
139	}
140	switch t.kind & kindMask {
141	case kindStruct:
142		st := (*structtype)(unsafe.Pointer(t))
143		return st.pkgPath.name()
144	case kindInterface:
145		it := (*interfacetype)(unsafe.Pointer(t))
146		return it.pkgpath.name()
147	}
148	return ""
149}
150
151// reflectOffs holds type offsets defined at run time by the reflect package.
152//
153// When a type is defined at run time, its *rtype data lives on the heap.
154// There are a wide range of possible addresses the heap may use, that
155// may not be representable as a 32-bit offset. Moreover the GC may
156// one day start moving heap memory, in which case there is no stable
157// offset that can be defined.
158//
159// To provide stable offsets, we add pin *rtype objects in a global map
160// and treat the offset as an identifier. We use negative offsets that
161// do not overlap with any compile-time module offsets.
162//
163// Entries are created by reflect.addReflectOff.
164var reflectOffs struct {
165	lock mutex
166	next int32
167	m    map[int32]unsafe.Pointer
168	minv map[unsafe.Pointer]int32
169}
170
171func reflectOffsLock() {
172	lock(&reflectOffs.lock)
173	if raceenabled {
174		raceacquire(unsafe.Pointer(&reflectOffs.lock))
175	}
176}
177
178func reflectOffsUnlock() {
179	if raceenabled {
180		racerelease(unsafe.Pointer(&reflectOffs.lock))
181	}
182	unlock(&reflectOffs.lock)
183}
184
185func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
186	if off == 0 {
187		return name{}
188	}
189	base := uintptr(ptrInModule)
190	for md := &firstmoduledata; md != nil; md = md.next {
191		if base >= md.types && base < md.etypes {
192			res := md.types + uintptr(off)
193			if res > md.etypes {
194				println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
195				throw("runtime: name offset out of range")
196			}
197			return name{(*byte)(unsafe.Pointer(res))}
198		}
199	}
200
201	// No module found. see if it is a run time name.
202	reflectOffsLock()
203	res, found := reflectOffs.m[int32(off)]
204	reflectOffsUnlock()
205	if !found {
206		println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
207		for next := &firstmoduledata; next != nil; next = next.next {
208			println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
209		}
210		throw("runtime: name offset base pointer out of range")
211	}
212	return name{(*byte)(res)}
213}
214
215func (t *_type) nameOff(off nameOff) name {
216	return resolveNameOff(unsafe.Pointer(t), off)
217}
218
219func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
220	if off == 0 {
221		return nil
222	}
223	base := uintptr(ptrInModule)
224	var md *moduledata
225	for next := &firstmoduledata; next != nil; next = next.next {
226		if base >= next.types && base < next.etypes {
227			md = next
228			break
229		}
230	}
231	if md == nil {
232		reflectOffsLock()
233		res := reflectOffs.m[int32(off)]
234		reflectOffsUnlock()
235		if res == nil {
236			println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
237			for next := &firstmoduledata; next != nil; next = next.next {
238				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
239			}
240			throw("runtime: type offset base pointer out of range")
241		}
242		return (*_type)(res)
243	}
244	if t := md.typemap[off]; t != nil {
245		return t
246	}
247	res := md.types + uintptr(off)
248	if res > md.etypes {
249		println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
250		throw("runtime: type offset out of range")
251	}
252	return (*_type)(unsafe.Pointer(res))
253}
254
255func (t *_type) typeOff(off typeOff) *_type {
256	return resolveTypeOff(unsafe.Pointer(t), off)
257}
258
259func (t *_type) textOff(off textOff) unsafe.Pointer {
260	base := uintptr(unsafe.Pointer(t))
261	var md *moduledata
262	for next := &firstmoduledata; next != nil; next = next.next {
263		if base >= next.types && base < next.etypes {
264			md = next
265			break
266		}
267	}
268	if md == nil {
269		reflectOffsLock()
270		res := reflectOffs.m[int32(off)]
271		reflectOffsUnlock()
272		if res == nil {
273			println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
274			for next := &firstmoduledata; next != nil; next = next.next {
275				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
276			}
277			throw("runtime: text offset base pointer out of range")
278		}
279		return res
280	}
281	res := uintptr(0)
282
283	// The text, or instruction stream is generated as one large buffer.  The off (offset) for a method is
284	// its offset within this buffer.  If the total text size gets too large, there can be issues on platforms like ppc64 if
285	// the target of calls are too far for the call instruction.  To resolve the large text issue, the text is split
286	// into multiple text sections to allow the linker to generate long calls when necessary.  When this happens, the vaddr
287	// for each text section is set to its offset within the text.  Each method's offset is compared against the section
288	// vaddrs and sizes to determine the containing section.  Then the section relative offset is added to the section's
289	// relocated baseaddr to compute the method addess.
290
291	if len(md.textsectmap) > 1 {
292		for i := range md.textsectmap {
293			sectaddr := md.textsectmap[i].vaddr
294			sectlen := md.textsectmap[i].length
295			if uintptr(off) >= sectaddr && uintptr(off) < sectaddr+sectlen {
296				res = md.textsectmap[i].baseaddr + uintptr(off) - uintptr(md.textsectmap[i].vaddr)
297				break
298			}
299		}
300	} else {
301		// single text section
302		res = md.text + uintptr(off)
303	}
304
305	if res > md.etext && GOARCH != "wasm" { // on wasm, functions do not live in the same address space as the linear memory
306		println("runtime: textOff", hex(off), "out of range", hex(md.text), "-", hex(md.etext))
307		throw("runtime: text offset out of range")
308	}
309	return unsafe.Pointer(res)
310}
311
312func (t *functype) in() []*_type {
313	// See funcType in reflect/type.go for details on data layout.
314	uadd := uintptr(unsafe.Sizeof(functype{}))
315	if t.typ.tflag&tflagUncommon != 0 {
316		uadd += unsafe.Sizeof(uncommontype{})
317	}
318	return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[:t.inCount]
319}
320
321func (t *functype) out() []*_type {
322	// See funcType in reflect/type.go for details on data layout.
323	uadd := uintptr(unsafe.Sizeof(functype{}))
324	if t.typ.tflag&tflagUncommon != 0 {
325		uadd += unsafe.Sizeof(uncommontype{})
326	}
327	outCount := t.outCount & (1<<15 - 1)
328	return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount]
329}
330
331func (t *functype) dotdotdot() bool {
332	return t.outCount&(1<<15) != 0
333}
334
335type nameOff int32
336type typeOff int32
337type textOff int32
338
339type method struct {
340	name nameOff
341	mtyp typeOff
342	ifn  textOff
343	tfn  textOff
344}
345
346type uncommontype struct {
347	pkgpath nameOff
348	mcount  uint16 // number of methods
349	xcount  uint16 // number of exported methods
350	moff    uint32 // offset from this uncommontype to [mcount]method
351	_       uint32 // unused
352}
353
354type imethod struct {
355	name nameOff
356	ityp typeOff
357}
358
359type interfacetype struct {
360	typ     _type
361	pkgpath name
362	mhdr    []imethod
363}
364
365type maptype struct {
366	typ    _type
367	key    *_type
368	elem   *_type
369	bucket *_type // internal type representing a hash bucket
370	// function for hashing keys (ptr to key, seed) -> hash
371	hasher     func(unsafe.Pointer, uintptr) uintptr
372	keysize    uint8  // size of key slot
373	elemsize   uint8  // size of elem slot
374	bucketsize uint16 // size of bucket
375	flags      uint32
376}
377
378// Note: flag values must match those used in the TMAP case
379// in ../cmd/compile/internal/gc/reflect.go:dtypesym.
380func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself
381	return mt.flags&1 != 0
382}
383func (mt *maptype) indirectelem() bool { // store ptr to elem instead of elem itself
384	return mt.flags&2 != 0
385}
386func (mt *maptype) reflexivekey() bool { // true if k==k for all keys
387	return mt.flags&4 != 0
388}
389func (mt *maptype) needkeyupdate() bool { // true if we need to update key on an overwrite
390	return mt.flags&8 != 0
391}
392func (mt *maptype) hashMightPanic() bool { // true if hash function might panic
393	return mt.flags&16 != 0
394}
395
396type arraytype struct {
397	typ   _type
398	elem  *_type
399	slice *_type
400	len   uintptr
401}
402
403type chantype struct {
404	typ  _type
405	elem *_type
406	dir  uintptr
407}
408
409type slicetype struct {
410	typ  _type
411	elem *_type
412}
413
414type functype struct {
415	typ      _type
416	inCount  uint16
417	outCount uint16
418}
419
420type ptrtype struct {
421	typ  _type
422	elem *_type
423}
424
425type structfield struct {
426	name       name
427	typ        *_type
428	offsetAnon uintptr
429}
430
431func (f *structfield) offset() uintptr {
432	return f.offsetAnon >> 1
433}
434
435type structtype struct {
436	typ     _type
437	pkgPath name
438	fields  []structfield
439}
440
441// name is an encoded type name with optional extra data.
442// See reflect/type.go for details.
443type name struct {
444	bytes *byte
445}
446
447func (n name) data(off int) *byte {
448	return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off)))
449}
450
451func (n name) isExported() bool {
452	return (*n.bytes)&(1<<0) != 0
453}
454
455func (n name) nameLen() int {
456	return int(uint16(*n.data(1))<<8 | uint16(*n.data(2)))
457}
458
459func (n name) tagLen() int {
460	if *n.data(0)&(1<<1) == 0 {
461		return 0
462	}
463	off := 3 + n.nameLen()
464	return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1)))
465}
466
467func (n name) name() (s string) {
468	if n.bytes == nil {
469		return ""
470	}
471	nl := n.nameLen()
472	if nl == 0 {
473		return ""
474	}
475	hdr := (*stringStruct)(unsafe.Pointer(&s))
476	hdr.str = unsafe.Pointer(n.data(3))
477	hdr.len = nl
478	return s
479}
480
481func (n name) tag() (s string) {
482	tl := n.tagLen()
483	if tl == 0 {
484		return ""
485	}
486	nl := n.nameLen()
487	hdr := (*stringStruct)(unsafe.Pointer(&s))
488	hdr.str = unsafe.Pointer(n.data(3 + nl + 2))
489	hdr.len = tl
490	return s
491}
492
493func (n name) pkgPath() string {
494	if n.bytes == nil || *n.data(0)&(1<<2) == 0 {
495		return ""
496	}
497	off := 3 + n.nameLen()
498	if tl := n.tagLen(); tl > 0 {
499		off += 2 + tl
500	}
501	var nameOff nameOff
502	copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:])
503	pkgPathName := resolveNameOff(unsafe.Pointer(n.bytes), nameOff)
504	return pkgPathName.name()
505}
506
507func (n name) isBlank() bool {
508	if n.bytes == nil {
509		return false
510	}
511	if n.nameLen() != 1 {
512		return false
513	}
514	return *n.data(3) == '_'
515}
516
517// typelinksinit scans the types from extra modules and builds the
518// moduledata typemap used to de-duplicate type pointers.
519func typelinksinit() {
520	if firstmoduledata.next == nil {
521		return
522	}
523	typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks))
524
525	modules := activeModules()
526	prev := modules[0]
527	for _, md := range modules[1:] {
528		// Collect types from the previous module into typehash.
529	collect:
530		for _, tl := range prev.typelinks {
531			var t *_type
532			if prev.typemap == nil {
533				t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
534			} else {
535				t = prev.typemap[typeOff(tl)]
536			}
537			// Add to typehash if not seen before.
538			tlist := typehash[t.hash]
539			for _, tcur := range tlist {
540				if tcur == t {
541					continue collect
542				}
543			}
544			typehash[t.hash] = append(tlist, t)
545		}
546
547		if md.typemap == nil {
548			// If any of this module's typelinks match a type from a
549			// prior module, prefer that prior type by adding the offset
550			// to this module's typemap.
551			tm := make(map[typeOff]*_type, len(md.typelinks))
552			pinnedTypemaps = append(pinnedTypemaps, tm)
553			md.typemap = tm
554			for _, tl := range md.typelinks {
555				t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
556				for _, candidate := range typehash[t.hash] {
557					seen := map[_typePair]struct{}{}
558					if typesEqual(t, candidate, seen) {
559						t = candidate
560						break
561					}
562				}
563				md.typemap[typeOff(tl)] = t
564			}
565		}
566
567		prev = md
568	}
569}
570
571type _typePair struct {
572	t1 *_type
573	t2 *_type
574}
575
576// typesEqual reports whether two types are equal.
577//
578// Everywhere in the runtime and reflect packages, it is assumed that
579// there is exactly one *_type per Go type, so that pointer equality
580// can be used to test if types are equal. There is one place that
581// breaks this assumption: buildmode=shared. In this case a type can
582// appear as two different pieces of memory. This is hidden from the
583// runtime and reflect package by the per-module typemap built in
584// typelinksinit. It uses typesEqual to map types from later modules
585// back into earlier ones.
586//
587// Only typelinksinit needs this function.
588func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
589	tp := _typePair{t, v}
590	if _, ok := seen[tp]; ok {
591		return true
592	}
593
594	// mark these types as seen, and thus equivalent which prevents an infinite loop if
595	// the two types are identical, but recursively defined and loaded from
596	// different modules
597	seen[tp] = struct{}{}
598
599	if t == v {
600		return true
601	}
602	kind := t.kind & kindMask
603	if kind != v.kind&kindMask {
604		return false
605	}
606	if t.string() != v.string() {
607		return false
608	}
609	ut := t.uncommon()
610	uv := v.uncommon()
611	if ut != nil || uv != nil {
612		if ut == nil || uv == nil {
613			return false
614		}
615		pkgpatht := t.nameOff(ut.pkgpath).name()
616		pkgpathv := v.nameOff(uv.pkgpath).name()
617		if pkgpatht != pkgpathv {
618			return false
619		}
620	}
621	if kindBool <= kind && kind <= kindComplex128 {
622		return true
623	}
624	switch kind {
625	case kindString, kindUnsafePointer:
626		return true
627	case kindArray:
628		at := (*arraytype)(unsafe.Pointer(t))
629		av := (*arraytype)(unsafe.Pointer(v))
630		return typesEqual(at.elem, av.elem, seen) && at.len == av.len
631	case kindChan:
632		ct := (*chantype)(unsafe.Pointer(t))
633		cv := (*chantype)(unsafe.Pointer(v))
634		return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem, seen)
635	case kindFunc:
636		ft := (*functype)(unsafe.Pointer(t))
637		fv := (*functype)(unsafe.Pointer(v))
638		if ft.outCount != fv.outCount || ft.inCount != fv.inCount {
639			return false
640		}
641		tin, vin := ft.in(), fv.in()
642		for i := 0; i < len(tin); i++ {
643			if !typesEqual(tin[i], vin[i], seen) {
644				return false
645			}
646		}
647		tout, vout := ft.out(), fv.out()
648		for i := 0; i < len(tout); i++ {
649			if !typesEqual(tout[i], vout[i], seen) {
650				return false
651			}
652		}
653		return true
654	case kindInterface:
655		it := (*interfacetype)(unsafe.Pointer(t))
656		iv := (*interfacetype)(unsafe.Pointer(v))
657		if it.pkgpath.name() != iv.pkgpath.name() {
658			return false
659		}
660		if len(it.mhdr) != len(iv.mhdr) {
661			return false
662		}
663		for i := range it.mhdr {
664			tm := &it.mhdr[i]
665			vm := &iv.mhdr[i]
666			// Note the mhdr array can be relocated from
667			// another module. See #17724.
668			tname := resolveNameOff(unsafe.Pointer(tm), tm.name)
669			vname := resolveNameOff(unsafe.Pointer(vm), vm.name)
670			if tname.name() != vname.name() {
671				return false
672			}
673			if tname.pkgPath() != vname.pkgPath() {
674				return false
675			}
676			tityp := resolveTypeOff(unsafe.Pointer(tm), tm.ityp)
677			vityp := resolveTypeOff(unsafe.Pointer(vm), vm.ityp)
678			if !typesEqual(tityp, vityp, seen) {
679				return false
680			}
681		}
682		return true
683	case kindMap:
684		mt := (*maptype)(unsafe.Pointer(t))
685		mv := (*maptype)(unsafe.Pointer(v))
686		return typesEqual(mt.key, mv.key, seen) && typesEqual(mt.elem, mv.elem, seen)
687	case kindPtr:
688		pt := (*ptrtype)(unsafe.Pointer(t))
689		pv := (*ptrtype)(unsafe.Pointer(v))
690		return typesEqual(pt.elem, pv.elem, seen)
691	case kindSlice:
692		st := (*slicetype)(unsafe.Pointer(t))
693		sv := (*slicetype)(unsafe.Pointer(v))
694		return typesEqual(st.elem, sv.elem, seen)
695	case kindStruct:
696		st := (*structtype)(unsafe.Pointer(t))
697		sv := (*structtype)(unsafe.Pointer(v))
698		if len(st.fields) != len(sv.fields) {
699			return false
700		}
701		if st.pkgPath.name() != sv.pkgPath.name() {
702			return false
703		}
704		for i := range st.fields {
705			tf := &st.fields[i]
706			vf := &sv.fields[i]
707			if tf.name.name() != vf.name.name() {
708				return false
709			}
710			if !typesEqual(tf.typ, vf.typ, seen) {
711				return false
712			}
713			if tf.name.tag() != vf.name.tag() {
714				return false
715			}
716			if tf.offsetAnon != vf.offsetAnon {
717				return false
718			}
719		}
720		return true
721	default:
722		println("runtime: impossible type kind", kind)
723		throw("runtime: impossible type kind")
724		return false
725	}
726}
727