1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package reflect implements run-time reflection, allowing a program to
6// manipulate objects with arbitrary types. The typical use is to take a value
7// with static type interface{} and extract its dynamic type information by
8// calling TypeOf, which returns a Type.
9//
10// A call to ValueOf returns a Value representing the run-time data.
11// Zero takes a Type and returns a Value representing a zero value
12// for that type.
13//
14// See "The Laws of Reflection" for an introduction to reflection in Go:
15// https://golang.org/doc/articles/laws_of_reflection.html
16package reflect
17
18import (
19	"strconv"
20	"sync"
21	"unicode"
22	"unicode/utf8"
23	"unsafe"
24)
25
26// Type is the representation of a Go type.
27//
28// Not all methods apply to all kinds of types. Restrictions,
29// if any, are noted in the documentation for each method.
30// Use the Kind method to find out the kind of type before
31// calling kind-specific methods. Calling a method
32// inappropriate to the kind of type causes a run-time panic.
33//
34// Type values are comparable, such as with the == operator,
35// so they can be used as map keys.
36// Two Type values are equal if they represent identical types.
37type Type interface {
38	// Methods applicable to all types.
39
40	// Align returns the alignment in bytes of a value of
41	// this type when allocated in memory.
42	Align() int
43
44	// FieldAlign returns the alignment in bytes of a value of
45	// this type when used as a field in a struct.
46	FieldAlign() int
47
48	// Method returns the i'th method in the type's method set.
49	// It panics if i is not in the range [0, NumMethod()).
50	//
51	// For a non-interface type T or *T, the returned Method's Type and Func
52	// fields describe a function whose first argument is the receiver,
53	// and only exported methods are accessible.
54	//
55	// For an interface type, the returned Method's Type field gives the
56	// method signature, without a receiver, and the Func field is nil.
57	//
58	// Methods are sorted in lexicographic order.
59	Method(int) Method
60
61	// MethodByName returns the method with that name in the type's
62	// method set and a boolean indicating if the method was found.
63	//
64	// For a non-interface type T or *T, the returned Method's Type and Func
65	// fields describe a function whose first argument is the receiver.
66	//
67	// For an interface type, the returned Method's Type field gives the
68	// method signature, without a receiver, and the Func field is nil.
69	MethodByName(string) (Method, bool)
70
71	// NumMethod returns the number of methods accessible using Method.
72	//
73	// Note that NumMethod counts unexported methods only for interface types.
74	NumMethod() int
75
76	// Name returns the type's name within its package for a defined type.
77	// For other (non-defined) types it returns the empty string.
78	Name() string
79
80	// PkgPath returns a defined type's package path, that is, the import path
81	// that uniquely identifies the package, such as "encoding/base64".
82	// If the type was predeclared (string, error) or not defined (*T, struct{},
83	// []int, or A where A is an alias for a non-defined type), the package path
84	// will be the empty string.
85	PkgPath() string
86
87	// Size returns the number of bytes needed to store
88	// a value of the given type; it is analogous to unsafe.Sizeof.
89	Size() uintptr
90
91	// String returns a string representation of the type.
92	// The string representation may use shortened package names
93	// (e.g., base64 instead of "encoding/base64") and is not
94	// guaranteed to be unique among types. To test for type identity,
95	// compare the Types directly.
96	String() string
97
98	// Used internally by gccgo--the string retaining quoting.
99	rawString() string
100
101	// Kind returns the specific kind of this type.
102	Kind() Kind
103
104	// Implements reports whether the type implements the interface type u.
105	Implements(u Type) bool
106
107	// AssignableTo reports whether a value of the type is assignable to type u.
108	AssignableTo(u Type) bool
109
110	// ConvertibleTo reports whether a value of the type is convertible to type u.
111	// Even if ConvertibleTo returns true, the conversion may still panic.
112	// For example, a slice of type []T is convertible to *[N]T,
113	// but the conversion will panic if its length is less than N.
114	ConvertibleTo(u Type) bool
115
116	// Comparable reports whether values of this type are comparable.
117	// Even if Comparable returns true, the comparison may still panic.
118	// For example, values of interface type are comparable,
119	// but the comparison will panic if their dynamic type is not comparable.
120	Comparable() bool
121
122	// Methods applicable only to some types, depending on Kind.
123	// The methods allowed for each kind are:
124	//
125	//	Int*, Uint*, Float*, Complex*: Bits
126	//	Array: Elem, Len
127	//	Chan: ChanDir, Elem
128	//	Func: In, NumIn, Out, NumOut, IsVariadic.
129	//	Map: Key, Elem
130	//	Ptr: Elem
131	//	Slice: Elem
132	//	Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
133
134	// Bits returns the size of the type in bits.
135	// It panics if the type's Kind is not one of the
136	// sized or unsized Int, Uint, Float, or Complex kinds.
137	Bits() int
138
139	// ChanDir returns a channel type's direction.
140	// It panics if the type's Kind is not Chan.
141	ChanDir() ChanDir
142
143	// IsVariadic reports whether a function type's final input parameter
144	// is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
145	// implicit actual type []T.
146	//
147	// For concreteness, if t represents func(x int, y ... float64), then
148	//
149	//	t.NumIn() == 2
150	//	t.In(0) is the reflect.Type for "int"
151	//	t.In(1) is the reflect.Type for "[]float64"
152	//	t.IsVariadic() == true
153	//
154	// IsVariadic panics if the type's Kind is not Func.
155	IsVariadic() bool
156
157	// Elem returns a type's element type.
158	// It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
159	Elem() Type
160
161	// Field returns a struct type's i'th field.
162	// It panics if the type's Kind is not Struct.
163	// It panics if i is not in the range [0, NumField()).
164	Field(i int) StructField
165
166	// FieldByIndex returns the nested field corresponding
167	// to the index sequence. It is equivalent to calling Field
168	// successively for each index i.
169	// It panics if the type's Kind is not Struct.
170	FieldByIndex(index []int) StructField
171
172	// FieldByName returns the struct field with the given name
173	// and a boolean indicating if the field was found.
174	FieldByName(name string) (StructField, bool)
175
176	// FieldByNameFunc returns the struct field with a name
177	// that satisfies the match function and a boolean indicating if
178	// the field was found.
179	//
180	// FieldByNameFunc considers the fields in the struct itself
181	// and then the fields in any embedded structs, in breadth first order,
182	// stopping at the shallowest nesting depth containing one or more
183	// fields satisfying the match function. If multiple fields at that depth
184	// satisfy the match function, they cancel each other
185	// and FieldByNameFunc returns no match.
186	// This behavior mirrors Go's handling of name lookup in
187	// structs containing embedded fields.
188	FieldByNameFunc(match func(string) bool) (StructField, bool)
189
190	// In returns the type of a function type's i'th input parameter.
191	// It panics if the type's Kind is not Func.
192	// It panics if i is not in the range [0, NumIn()).
193	In(i int) Type
194
195	// Key returns a map type's key type.
196	// It panics if the type's Kind is not Map.
197	Key() Type
198
199	// Len returns an array type's length.
200	// It panics if the type's Kind is not Array.
201	Len() int
202
203	// NumField returns a struct type's field count.
204	// It panics if the type's Kind is not Struct.
205	NumField() int
206
207	// NumIn returns a function type's input parameter count.
208	// It panics if the type's Kind is not Func.
209	NumIn() int
210
211	// NumOut returns a function type's output parameter count.
212	// It panics if the type's Kind is not Func.
213	NumOut() int
214
215	// Out returns the type of a function type's i'th output parameter.
216	// It panics if the type's Kind is not Func.
217	// It panics if i is not in the range [0, NumOut()).
218	Out(i int) Type
219
220	common() *rtype
221	uncommon() *uncommonType
222}
223
224// BUG(rsc): FieldByName and related functions consider struct field names to be equal
225// if the names are equal, even if they are unexported names originating
226// in different packages. The practical effect of this is that the result of
227// t.FieldByName("x") is not well defined if the struct type t contains
228// multiple fields named x (embedded from different packages).
229// FieldByName may return one of the fields named x or may report that there are none.
230// See https://golang.org/issue/4876 for more details.
231
232/*
233 * These data structures are known to the compiler (../../cmd/internal/reflectdata/reflect.go).
234 * A few are known to ../runtime/type.go to convey to debuggers.
235 * They are also known to ../runtime/type.go.
236 */
237
238// A Kind represents the specific kind of type that a Type represents.
239// The zero Kind is not a valid kind.
240type Kind uint
241
242const (
243	Invalid Kind = iota
244	Bool
245	Int
246	Int8
247	Int16
248	Int32
249	Int64
250	Uint
251	Uint8
252	Uint16
253	Uint32
254	Uint64
255	Uintptr
256	Float32
257	Float64
258	Complex64
259	Complex128
260	Array
261	Chan
262	Func
263	Interface
264	Map
265	Ptr
266	Slice
267	String
268	Struct
269	UnsafePointer
270)
271
272// tflag is used by an rtype to signal what extra type information is
273// available in the memory directly following the rtype value.
274//
275// tflag values must be kept in sync with copies in:
276//	go/types.cc
277//	runtime/type.go
278type tflag uint8
279
280const (
281	// tflagRegularMemory means that equal and hash functions can treat
282	// this type as a single region of t.size bytes.
283	tflagRegularMemory tflag = 1 << 3
284)
285
286// rtype is the common implementation of most values.
287// It is embedded in other struct types.
288//
289// rtype must be kept in sync with ../runtime/type.go:/^type._type.
290type rtype struct {
291	size       uintptr
292	ptrdata    uintptr // size of memory prefix holding all pointers
293	hash       uint32  // hash of type; avoids computation in hash tables
294	tflag      tflag   // extra type information flags
295	align      uint8   // alignment of variable with this type
296	fieldAlign uint8   // alignment of struct field with this type
297	kind       uint8   // enumeration for C
298	// function for comparing objects of this type
299	// (ptr to object A, ptr to object B) -> ==?
300	equal         func(unsafe.Pointer, unsafe.Pointer) bool
301	gcdata        *byte   // garbage collection data
302	string        *string // string form; unnecessary but undeniably useful
303	*uncommonType         // (relatively) uncommon fields
304	ptrToThis     *rtype  // type for pointer to this type, if used in binary or has methods
305}
306
307// Method on non-interface type
308type method struct {
309	name    *string        // name of method
310	pkgPath *string        // nil for exported Names; otherwise import path
311	mtyp    *rtype         // method type (without receiver)
312	typ     *rtype         // .(*FuncType) underneath (with receiver)
313	tfn     unsafe.Pointer // fn used for normal method call
314}
315
316// uncommonType is present only for defined types or types with methods
317// (if T is a defined type, the uncommonTypes for T and *T have methods).
318// Using a pointer to this struct reduces the overall size required
319// to describe a non-defined type with no methods.
320type uncommonType struct {
321	name    *string  // name of type
322	pkgPath *string  // import path; nil for built-in types like int, string
323	methods []method // methods associated with type
324}
325
326// ChanDir represents a channel type's direction.
327type ChanDir int
328
329const (
330	RecvDir ChanDir             = 1 << iota // <-chan
331	SendDir                                 // chan<-
332	BothDir = RecvDir | SendDir             // chan
333)
334
335// arrayType represents a fixed array type.
336type arrayType struct {
337	rtype
338	elem  *rtype // array element type
339	slice *rtype // slice type
340	len   uintptr
341}
342
343// chanType represents a channel type.
344type chanType struct {
345	rtype
346	elem *rtype  // channel element type
347	dir  uintptr // channel direction (ChanDir)
348}
349
350// funcType represents a function type.
351type funcType struct {
352	rtype
353	dotdotdot bool     // last input parameter is ...
354	in        []*rtype // input parameter types
355	out       []*rtype // output parameter types
356}
357
358// imethod represents a method on an interface type
359type imethod struct {
360	name    *string // name of method
361	pkgPath *string // nil for exported Names; otherwise import path
362	typ     *rtype  // .(*FuncType) underneath
363}
364
365// interfaceType represents an interface type.
366type interfaceType struct {
367	rtype
368	methods []imethod // sorted by hash
369}
370
371// mapType represents a map type.
372type mapType struct {
373	rtype
374	key    *rtype // map key type
375	elem   *rtype // map element (value) type
376	bucket *rtype // internal bucket structure
377	// function for hashing keys (ptr to key, seed) -> hash
378	hasher     func(unsafe.Pointer, uintptr) uintptr
379	keysize    uint8  // size of key slot
380	valuesize  uint8  // size of value slot
381	bucketsize uint16 // size of bucket
382	flags      uint32
383}
384
385// ptrType represents a pointer type.
386type ptrType struct {
387	rtype
388	elem *rtype // pointer element (pointed at) type
389}
390
391// sliceType represents a slice type.
392type sliceType struct {
393	rtype
394	elem *rtype // slice element type
395}
396
397// Struct field
398type structField struct {
399	name        *string // name is always non-empty
400	pkgPath     *string // nil for exported Names; otherwise import path
401	typ         *rtype  // type of field
402	tag         *string // nil if no tag
403	offsetEmbed uintptr // byte offset of field<<1 | isAnonymous
404}
405
406func (f *structField) offset() uintptr {
407	return f.offsetEmbed >> 1
408}
409
410func (f *structField) embedded() bool {
411	return f.offsetEmbed&1 != 0
412}
413
414// structType represents a struct type.
415type structType struct {
416	rtype
417	fields []structField // sorted by offset
418}
419
420/*
421 * The compiler knows the exact layout of all the data structures above.
422 * The compiler does not know about the data structures and methods below.
423 */
424
425// Method represents a single method.
426type Method struct {
427	// Name is the method name.
428	Name string
429
430	// PkgPath is the package path that qualifies a lower case (unexported)
431	// method name. It is empty for upper case (exported) method names.
432	// The combination of PkgPath and Name uniquely identifies a method
433	// in a method set.
434	// See https://golang.org/ref/spec#Uniqueness_of_identifiers
435	PkgPath string
436
437	Type  Type  // method type
438	Func  Value // func with receiver as first argument
439	Index int   // index for Type.Method
440}
441
442// IsExported reports whether the method is exported.
443func (m Method) IsExported() bool {
444	return m.PkgPath == ""
445}
446
447const (
448	kindDirectIface = 1 << 5
449	kindGCProg      = 1 << 6 // Type.gc points to GC program
450	kindMask        = (1 << 5) - 1
451)
452
453// String returns the name of k.
454func (k Kind) String() string {
455	if int(k) < len(kindNames) {
456		return kindNames[k]
457	}
458	return "kind" + strconv.Itoa(int(k))
459}
460
461var kindNames = []string{
462	Invalid:       "invalid",
463	Bool:          "bool",
464	Int:           "int",
465	Int8:          "int8",
466	Int16:         "int16",
467	Int32:         "int32",
468	Int64:         "int64",
469	Uint:          "uint",
470	Uint8:         "uint8",
471	Uint16:        "uint16",
472	Uint32:        "uint32",
473	Uint64:        "uint64",
474	Uintptr:       "uintptr",
475	Float32:       "float32",
476	Float64:       "float64",
477	Complex64:     "complex64",
478	Complex128:    "complex128",
479	Array:         "array",
480	Chan:          "chan",
481	Func:          "func",
482	Interface:     "interface",
483	Map:           "map",
484	Ptr:           "ptr",
485	Slice:         "slice",
486	String:        "string",
487	Struct:        "struct",
488	UnsafePointer: "unsafe.Pointer",
489}
490
491func (t *uncommonType) uncommon() *uncommonType {
492	return t
493}
494
495func (t *uncommonType) PkgPath() string {
496	if t == nil || t.pkgPath == nil {
497		return ""
498	}
499	return *t.pkgPath
500}
501
502func (t *uncommonType) Name() string {
503	if t == nil || t.name == nil {
504		return ""
505	}
506	return *t.name
507}
508
509var methodCache sync.Map // map[*uncommonType][]method
510
511func (t *uncommonType) exportedMethods() []method {
512	methodsi, found := methodCache.Load(t)
513	if found {
514		return methodsi.([]method)
515	}
516
517	allm := t.methods
518	allExported := true
519	for _, m := range allm {
520		if m.pkgPath != nil {
521			allExported = false
522			break
523		}
524	}
525	var methods []method
526	if allExported {
527		methods = allm
528	} else {
529		methods = make([]method, 0, len(allm))
530		for _, m := range allm {
531			if m.pkgPath == nil {
532				methods = append(methods, m)
533			}
534		}
535		methods = methods[:len(methods):len(methods)]
536	}
537
538	methodsi, _ = methodCache.LoadOrStore(t, methods)
539	return methodsi.([]method)
540}
541
542func (t *rtype) rawString() string { return *t.string }
543
544func (t *rtype) String() string {
545	// For gccgo, strip out quoted strings.
546	s := *t.string
547	var q bool
548	r := make([]byte, len(s))
549	j := 0
550	for i := 0; i < len(s); i++ {
551		if s[i] == '\t' {
552			q = !q
553		} else if !q {
554			r[j] = s[i]
555			j++
556		}
557	}
558	return string(r[:j])
559}
560
561func (t *rtype) Size() uintptr { return t.size }
562
563func (t *rtype) Bits() int {
564	if t == nil {
565		panic("reflect: Bits of nil Type")
566	}
567	k := t.Kind()
568	if k < Int || k > Complex128 {
569		panic("reflect: Bits of non-arithmetic Type " + t.String())
570	}
571	return int(t.size) * 8
572}
573
574func (t *rtype) Align() int { return int(t.align) }
575
576func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
577
578func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
579
580func (t *rtype) pointers() bool { return t.ptrdata != 0 }
581
582func (t *rtype) common() *rtype { return t }
583
584func (t *rtype) exportedMethods() []method {
585	ut := t.uncommon()
586	if ut == nil {
587		return nil
588	}
589	return ut.exportedMethods()
590}
591
592func (t *rtype) NumMethod() int {
593	if t.Kind() == Interface {
594		tt := (*interfaceType)(unsafe.Pointer(t))
595		return tt.NumMethod()
596	}
597	return len(t.exportedMethods())
598}
599
600func (t *rtype) Method(i int) (m Method) {
601	if t.Kind() == Interface {
602		tt := (*interfaceType)(unsafe.Pointer(t))
603		return tt.Method(i)
604	}
605	methods := t.exportedMethods()
606	if i < 0 || i >= len(methods) {
607		panic("reflect: Method index out of range")
608	}
609	p := methods[i]
610	if p.name != nil {
611		m.Name = *p.name
612	}
613	fl := flag(Func)
614	mt := p.typ
615	m.Type = toType(mt)
616	x := new(unsafe.Pointer)
617	*x = unsafe.Pointer(&p.tfn)
618	m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
619	m.Index = i
620	return m
621}
622
623func (t *rtype) MethodByName(name string) (m Method, ok bool) {
624	if t.Kind() == Interface {
625		tt := (*interfaceType)(unsafe.Pointer(t))
626		return tt.MethodByName(name)
627	}
628	ut := t.uncommon()
629	if ut == nil {
630		return Method{}, false
631	}
632	utmethods := ut.methods
633	var eidx int
634	for i := 0; i < len(utmethods); i++ {
635		p := utmethods[i]
636		if p.pkgPath == nil {
637			if p.name != nil && *p.name == name {
638				return t.Method(eidx), true
639			}
640			eidx++
641		}
642	}
643	return Method{}, false
644}
645
646func (t *rtype) PkgPath() string {
647	return t.uncommonType.PkgPath()
648}
649
650func (t *rtype) hasName() bool {
651	return t.uncommonType != nil && t.uncommonType.name != nil
652}
653
654func (t *rtype) Name() string {
655	return t.uncommonType.Name()
656}
657
658func (t *rtype) ChanDir() ChanDir {
659	if t.Kind() != Chan {
660		panic("reflect: ChanDir of non-chan type " + t.String())
661	}
662	tt := (*chanType)(unsafe.Pointer(t))
663	return ChanDir(tt.dir)
664}
665
666func (t *rtype) IsVariadic() bool {
667	if t.Kind() != Func {
668		panic("reflect: IsVariadic of non-func type " + t.String())
669	}
670	tt := (*funcType)(unsafe.Pointer(t))
671	return tt.dotdotdot
672}
673
674func (t *rtype) Elem() Type {
675	switch t.Kind() {
676	case Array:
677		tt := (*arrayType)(unsafe.Pointer(t))
678		return toType(tt.elem)
679	case Chan:
680		tt := (*chanType)(unsafe.Pointer(t))
681		return toType(tt.elem)
682	case Map:
683		tt := (*mapType)(unsafe.Pointer(t))
684		return toType(tt.elem)
685	case Ptr:
686		tt := (*ptrType)(unsafe.Pointer(t))
687		return toType(tt.elem)
688	case Slice:
689		tt := (*sliceType)(unsafe.Pointer(t))
690		return toType(tt.elem)
691	}
692	panic("reflect: Elem of invalid type " + t.String())
693}
694
695func (t *rtype) Field(i int) StructField {
696	if t.Kind() != Struct {
697		panic("reflect: Field of non-struct type " + t.String())
698	}
699	tt := (*structType)(unsafe.Pointer(t))
700	return tt.Field(i)
701}
702
703func (t *rtype) FieldByIndex(index []int) StructField {
704	if t.Kind() != Struct {
705		panic("reflect: FieldByIndex of non-struct type " + t.String())
706	}
707	tt := (*structType)(unsafe.Pointer(t))
708	return tt.FieldByIndex(index)
709}
710
711func (t *rtype) FieldByName(name string) (StructField, bool) {
712	if t.Kind() != Struct {
713		panic("reflect: FieldByName of non-struct type " + t.String())
714	}
715	tt := (*structType)(unsafe.Pointer(t))
716	return tt.FieldByName(name)
717}
718
719func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
720	if t.Kind() != Struct {
721		panic("reflect: FieldByNameFunc of non-struct type " + t.String())
722	}
723	tt := (*structType)(unsafe.Pointer(t))
724	return tt.FieldByNameFunc(match)
725}
726
727func (t *rtype) In(i int) Type {
728	if t.Kind() != Func {
729		panic("reflect: In of non-func type " + t.String())
730	}
731	tt := (*funcType)(unsafe.Pointer(t))
732	return toType(tt.in[i])
733}
734
735func (t *rtype) Key() Type {
736	if t.Kind() != Map {
737		panic("reflect: Key of non-map type " + t.String())
738	}
739	tt := (*mapType)(unsafe.Pointer(t))
740	return toType(tt.key)
741}
742
743func (t *rtype) Len() int {
744	if t.Kind() != Array {
745		panic("reflect: Len of non-array type " + t.String())
746	}
747	tt := (*arrayType)(unsafe.Pointer(t))
748	return int(tt.len)
749}
750
751func (t *rtype) NumField() int {
752	if t.Kind() != Struct {
753		panic("reflect: NumField of non-struct type " + t.String())
754	}
755	tt := (*structType)(unsafe.Pointer(t))
756	return len(tt.fields)
757}
758
759func (t *rtype) NumIn() int {
760	if t.Kind() != Func {
761		panic("reflect: NumIn of non-func type " + t.String())
762	}
763	tt := (*funcType)(unsafe.Pointer(t))
764	return len(tt.in)
765}
766
767func (t *rtype) NumOut() int {
768	if t.Kind() != Func {
769		panic("reflect: NumOut of non-func type " + t.String())
770	}
771	tt := (*funcType)(unsafe.Pointer(t))
772	return len(tt.out)
773}
774
775func (t *rtype) Out(i int) Type {
776	if t.Kind() != Func {
777		panic("reflect: Out of non-func type " + t.String())
778	}
779	tt := (*funcType)(unsafe.Pointer(t))
780	return toType(tt.out[i])
781}
782
783// add returns p+x.
784//
785// The whySafe string is ignored, so that the function still inlines
786// as efficiently as p+x, but all call sites should use the string to
787// record why the addition is safe, which is to say why the addition
788// does not cause x to advance to the very end of p's allocation
789// and therefore point incorrectly at the next block in memory.
790func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
791	return unsafe.Pointer(uintptr(p) + x)
792}
793
794func (d ChanDir) String() string {
795	switch d {
796	case SendDir:
797		return "chan<-"
798	case RecvDir:
799		return "<-chan"
800	case BothDir:
801		return "chan"
802	}
803	return "ChanDir" + strconv.Itoa(int(d))
804}
805
806// Method returns the i'th method in the type's method set.
807func (t *interfaceType) Method(i int) (m Method) {
808	if i < 0 || i >= len(t.methods) {
809		return
810	}
811	p := &t.methods[i]
812	m.Name = *p.name
813	if p.pkgPath != nil {
814		m.PkgPath = *p.pkgPath
815	}
816	m.Type = toType(p.typ)
817	m.Index = i
818	return
819}
820
821// NumMethod returns the number of interface methods in the type's method set.
822func (t *interfaceType) NumMethod() int { return len(t.methods) }
823
824// MethodByName method with the given name in the type's method set.
825func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
826	if t == nil {
827		return
828	}
829	var p *imethod
830	for i := range t.methods {
831		p = &t.methods[i]
832		if *p.name == name {
833			return t.Method(i), true
834		}
835	}
836	return
837}
838
839// A StructField describes a single field in a struct.
840type StructField struct {
841	// Name is the field name.
842	Name string
843
844	// PkgPath is the package path that qualifies a lower case (unexported)
845	// field name. It is empty for upper case (exported) field names.
846	// See https://golang.org/ref/spec#Uniqueness_of_identifiers
847	PkgPath string
848
849	Type      Type      // field type
850	Tag       StructTag // field tag string
851	Offset    uintptr   // offset within struct, in bytes
852	Index     []int     // index sequence for Type.FieldByIndex
853	Anonymous bool      // is an embedded field
854}
855
856// IsExported reports whether the field is exported.
857func (f StructField) IsExported() bool {
858	return f.PkgPath == ""
859}
860
861// A StructTag is the tag string in a struct field.
862//
863// By convention, tag strings are a concatenation of
864// optionally space-separated key:"value" pairs.
865// Each key is a non-empty string consisting of non-control
866// characters other than space (U+0020 ' '), quote (U+0022 '"'),
867// and colon (U+003A ':').  Each value is quoted using U+0022 '"'
868// characters and Go string literal syntax.
869type StructTag string
870
871// Get returns the value associated with key in the tag string.
872// If there is no such key in the tag, Get returns the empty string.
873// If the tag does not have the conventional format, the value
874// returned by Get is unspecified. To determine whether a tag is
875// explicitly set to the empty string, use Lookup.
876func (tag StructTag) Get(key string) string {
877	v, _ := tag.Lookup(key)
878	return v
879}
880
881// Lookup returns the value associated with key in the tag string.
882// If the key is present in the tag the value (which may be empty)
883// is returned. Otherwise the returned value will be the empty string.
884// The ok return value reports whether the value was explicitly set in
885// the tag string. If the tag does not have the conventional format,
886// the value returned by Lookup is unspecified.
887func (tag StructTag) Lookup(key string) (value string, ok bool) {
888	// When modifying this code, also update the validateStructTag code
889	// in cmd/vet/structtag.go.
890
891	for tag != "" {
892		// Skip leading space.
893		i := 0
894		for i < len(tag) && tag[i] == ' ' {
895			i++
896		}
897		tag = tag[i:]
898		if tag == "" {
899			break
900		}
901
902		// Scan to colon. A space, a quote or a control character is a syntax error.
903		// Strictly speaking, control chars include the range [0x7f, 0x9f], not just
904		// [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
905		// as it is simpler to inspect the tag's bytes than the tag's runes.
906		i = 0
907		for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
908			i++
909		}
910		if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
911			break
912		}
913		name := string(tag[:i])
914		tag = tag[i+1:]
915
916		// Scan quoted string to find value.
917		i = 1
918		for i < len(tag) && tag[i] != '"' {
919			if tag[i] == '\\' {
920				i++
921			}
922			i++
923		}
924		if i >= len(tag) {
925			break
926		}
927		qvalue := string(tag[:i+1])
928		tag = tag[i+1:]
929
930		if key == name {
931			value, err := strconv.Unquote(qvalue)
932			if err != nil {
933				break
934			}
935			return value, true
936		}
937	}
938	return "", false
939}
940
941// Field returns the i'th struct field.
942func (t *structType) Field(i int) (f StructField) {
943	if i < 0 || i >= len(t.fields) {
944		panic("reflect: Field index out of bounds")
945	}
946	p := &t.fields[i]
947	f.Type = toType(p.typ)
948	f.Name = *p.name
949	f.Anonymous = p.embedded()
950	if p.pkgPath != nil {
951		f.PkgPath = *p.pkgPath
952	}
953	if p.tag != nil {
954		f.Tag = StructTag(*p.tag)
955	}
956	f.Offset = p.offset()
957
958	// NOTE(rsc): This is the only allocation in the interface
959	// presented by a reflect.Type. It would be nice to avoid,
960	// at least in the common cases, but we need to make sure
961	// that misbehaving clients of reflect cannot affect other
962	// uses of reflect. One possibility is CL 5371098, but we
963	// postponed that ugliness until there is a demonstrated
964	// need for the performance. This is issue 2320.
965	f.Index = []int{i}
966	return
967}
968
969// TODO(gri): Should there be an error/bool indicator if the index
970//            is wrong for FieldByIndex?
971
972// FieldByIndex returns the nested field corresponding to index.
973func (t *structType) FieldByIndex(index []int) (f StructField) {
974	f.Type = toType(&t.rtype)
975	for i, x := range index {
976		if i > 0 {
977			ft := f.Type
978			if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
979				ft = ft.Elem()
980			}
981			f.Type = ft
982		}
983		f = f.Type.Field(x)
984	}
985	return
986}
987
988// A fieldScan represents an item on the fieldByNameFunc scan work list.
989type fieldScan struct {
990	typ   *structType
991	index []int
992}
993
994// FieldByNameFunc returns the struct field with a name that satisfies the
995// match function and a boolean to indicate if the field was found.
996func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
997	// This uses the same condition that the Go language does: there must be a unique instance
998	// of the match at a given depth level. If there are multiple instances of a match at the
999	// same depth, they annihilate each other and inhibit any possible match at a lower level.
1000	// The algorithm is breadth first search, one depth level at a time.
1001
1002	// The current and next slices are work queues:
1003	// current lists the fields to visit on this depth level,
1004	// and next lists the fields on the next lower level.
1005	current := []fieldScan{}
1006	next := []fieldScan{{typ: t}}
1007
1008	// nextCount records the number of times an embedded type has been
1009	// encountered and considered for queueing in the 'next' slice.
1010	// We only queue the first one, but we increment the count on each.
1011	// If a struct type T can be reached more than once at a given depth level,
1012	// then it annihilates itself and need not be considered at all when we
1013	// process that next depth level.
1014	var nextCount map[*structType]int
1015
1016	// visited records the structs that have been considered already.
1017	// Embedded pointer fields can create cycles in the graph of
1018	// reachable embedded types; visited avoids following those cycles.
1019	// It also avoids duplicated effort: if we didn't find the field in an
1020	// embedded type T at level 2, we won't find it in one at level 4 either.
1021	visited := map[*structType]bool{}
1022
1023	for len(next) > 0 {
1024		current, next = next, current[:0]
1025		count := nextCount
1026		nextCount = nil
1027
1028		// Process all the fields at this depth, now listed in 'current'.
1029		// The loop queues embedded fields found in 'next', for processing during the next
1030		// iteration. The multiplicity of the 'current' field counts is recorded
1031		// in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
1032		for _, scan := range current {
1033			t := scan.typ
1034			if visited[t] {
1035				// We've looked through this type before, at a higher level.
1036				// That higher level would shadow the lower level we're now at,
1037				// so this one can't be useful to us. Ignore it.
1038				continue
1039			}
1040			visited[t] = true
1041			for i := range t.fields {
1042				f := &t.fields[i]
1043				// Find name and (for embedded field) type for field f.
1044				fname := *f.name
1045				var ntyp *rtype
1046				if f.embedded() {
1047					// Embedded field of type T or *T.
1048					ntyp = f.typ
1049					if ntyp.Kind() == Ptr {
1050						ntyp = ntyp.Elem().common()
1051					}
1052				}
1053
1054				// Does it match?
1055				if match(fname) {
1056					// Potential match
1057					if count[t] > 1 || ok {
1058						// Name appeared multiple times at this level: annihilate.
1059						return StructField{}, false
1060					}
1061					result = t.Field(i)
1062					result.Index = nil
1063					result.Index = append(result.Index, scan.index...)
1064					result.Index = append(result.Index, i)
1065					ok = true
1066					continue
1067				}
1068
1069				// Queue embedded struct fields for processing with next level,
1070				// but only if we haven't seen a match yet at this level and only
1071				// if the embedded types haven't already been queued.
1072				if ok || ntyp == nil || ntyp.Kind() != Struct {
1073					continue
1074				}
1075				ntyp = toType(ntyp).common()
1076				styp := (*structType)(unsafe.Pointer(ntyp))
1077				if nextCount[styp] > 0 {
1078					nextCount[styp] = 2 // exact multiple doesn't matter
1079					continue
1080				}
1081				if nextCount == nil {
1082					nextCount = map[*structType]int{}
1083				}
1084				nextCount[styp] = 1
1085				if count[t] > 1 {
1086					nextCount[styp] = 2 // exact multiple doesn't matter
1087				}
1088				var index []int
1089				index = append(index, scan.index...)
1090				index = append(index, i)
1091				next = append(next, fieldScan{styp, index})
1092			}
1093		}
1094		if ok {
1095			break
1096		}
1097	}
1098	return
1099}
1100
1101// FieldByName returns the struct field with the given name
1102// and a boolean to indicate if the field was found.
1103func (t *structType) FieldByName(name string) (f StructField, present bool) {
1104	// Quick check for top-level name, or struct without embedded fields.
1105	hasEmbeds := false
1106	if name != "" {
1107		for i := range t.fields {
1108			tf := &t.fields[i]
1109			if *tf.name == name {
1110				return t.Field(i), true
1111			}
1112			if tf.embedded() {
1113				hasEmbeds = true
1114			}
1115		}
1116	}
1117	if !hasEmbeds {
1118		return
1119	}
1120	return t.FieldByNameFunc(func(s string) bool { return s == name })
1121}
1122
1123// TypeOf returns the reflection Type that represents the dynamic type of i.
1124// If i is a nil interface value, TypeOf returns nil.
1125func TypeOf(i interface{}) Type {
1126	eface := *(*emptyInterface)(unsafe.Pointer(&i))
1127	return toType(eface.typ)
1128}
1129
1130// ptrMap is the cache for PtrTo.
1131var ptrMap sync.Map // map[*rtype]*ptrType
1132
1133// PtrTo returns the pointer type with element t.
1134// For example, if t represents type Foo, PtrTo(t) represents *Foo.
1135func PtrTo(t Type) Type {
1136	return t.(*rtype).ptrTo()
1137}
1138
1139func (t *rtype) ptrTo() *rtype {
1140	if p := t.ptrToThis; p != nil {
1141		return p
1142	}
1143
1144	// Check the cache.
1145	if pi, ok := ptrMap.Load(t); ok {
1146		return &pi.(*ptrType).rtype
1147	}
1148
1149	// Look in known types.
1150	s := "*" + *t.string
1151	if tt := lookupType(s); tt != nil {
1152		p := (*ptrType)(unsafe.Pointer(toType(tt).(*rtype)))
1153		if p.elem == t {
1154			pi, _ := ptrMap.LoadOrStore(t, p)
1155			return &pi.(*ptrType).rtype
1156		}
1157	}
1158
1159	// Create a new ptrType starting with the description
1160	// of an *unsafe.Pointer.
1161	var iptr interface{} = (*unsafe.Pointer)(nil)
1162	prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1163	pp := *prototype
1164
1165	pp.string = &s
1166	pp.ptrToThis = nil
1167
1168	// For the type structures linked into the binary, the
1169	// compiler provides a good hash of the string.
1170	// Create a good hash for the new string by using
1171	// the FNV-1 hash's mixing function to combine the
1172	// old hash and the new "*".
1173	// p.hash = fnv1(t.hash, '*')
1174	// This is the gccgo version.
1175	pp.hash = (t.hash << 4) + 9
1176
1177	pp.uncommonType = nil
1178	pp.ptrToThis = nil
1179	pp.elem = t
1180
1181	q := toType(&pp.rtype).(*rtype)
1182	p := (*ptrType)(unsafe.Pointer(q))
1183	pi, _ := ptrMap.LoadOrStore(t, p)
1184	return &pi.(*ptrType).rtype
1185}
1186
1187// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1188func fnv1(x uint32, list ...byte) uint32 {
1189	for _, b := range list {
1190		x = x*16777619 ^ uint32(b)
1191	}
1192	return x
1193}
1194
1195func (t *rtype) Implements(u Type) bool {
1196	if u == nil {
1197		panic("reflect: nil type passed to Type.Implements")
1198	}
1199	if u.Kind() != Interface {
1200		panic("reflect: non-interface type passed to Type.Implements")
1201	}
1202	return implements(u.(*rtype), t)
1203}
1204
1205func (t *rtype) AssignableTo(u Type) bool {
1206	if u == nil {
1207		panic("reflect: nil type passed to Type.AssignableTo")
1208	}
1209	uu := u.(*rtype)
1210	return directlyAssignable(uu, t) || implements(uu, t)
1211}
1212
1213func (t *rtype) ConvertibleTo(u Type) bool {
1214	if u == nil {
1215		panic("reflect: nil type passed to Type.ConvertibleTo")
1216	}
1217	uu := u.(*rtype)
1218	return convertOp(uu, t) != nil
1219}
1220
1221func (t *rtype) Comparable() bool {
1222	return t.equal != nil
1223}
1224
1225// implements reports whether the type V implements the interface type T.
1226func implements(T, V *rtype) bool {
1227	if T.Kind() != Interface {
1228		return false
1229	}
1230	t := (*interfaceType)(unsafe.Pointer(T))
1231	if len(t.methods) == 0 {
1232		return true
1233	}
1234
1235	// The same algorithm applies in both cases, but the
1236	// method tables for an interface type and a concrete type
1237	// are different, so the code is duplicated.
1238	// In both cases the algorithm is a linear scan over the two
1239	// lists - T's methods and V's methods - simultaneously.
1240	// Since method tables are stored in a unique sorted order
1241	// (alphabetical, with no duplicate method names), the scan
1242	// through V's methods must hit a match for each of T's
1243	// methods along the way, or else V does not implement T.
1244	// This lets us run the scan in overall linear time instead of
1245	// the quadratic time  a naive search would require.
1246	// See also ../runtime/iface.go.
1247	if V.Kind() == Interface {
1248		v := (*interfaceType)(unsafe.Pointer(V))
1249		i := 0
1250		for j := 0; j < len(v.methods); j++ {
1251			tm := &t.methods[i]
1252			vm := &v.methods[j]
1253			if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.typ).common() == toType(tm.typ).common() {
1254				if i++; i >= len(t.methods) {
1255					return true
1256				}
1257			}
1258		}
1259		return false
1260	}
1261
1262	v := V.uncommon()
1263	if v == nil {
1264		return false
1265	}
1266	i := 0
1267	for j := 0; j < len(v.methods); j++ {
1268		tm := &t.methods[i]
1269		vm := &v.methods[j]
1270		if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.mtyp).common() == toType(tm.typ).common() {
1271			if i++; i >= len(t.methods) {
1272				return true
1273			}
1274		}
1275	}
1276	return false
1277}
1278
1279// specialChannelAssignability reports whether a value x of channel type V
1280// can be directly assigned (using memmove) to another channel type T.
1281// https://golang.org/doc/go_spec.html#Assignability
1282// T and V must be both of Chan kind.
1283func specialChannelAssignability(T, V *rtype) bool {
1284	// Special case:
1285	// x is a bidirectional channel value, T is a channel type,
1286	// x's type V and T have identical element types,
1287	// and at least one of V or T is not a defined type.
1288	return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
1289}
1290
1291// directlyAssignable reports whether a value x of type V can be directly
1292// assigned (using memmove) to a value of type T.
1293// https://golang.org/doc/go_spec.html#Assignability
1294// Ignoring the interface rules (implemented elsewhere)
1295// and the ideal constant rules (no ideal constants at run time).
1296func directlyAssignable(T, V *rtype) bool {
1297	// x's type V is identical to T?
1298	if rtypeEqual(T, V) {
1299		return true
1300	}
1301
1302	// Otherwise at least one of T and V must not be defined
1303	// and they must have the same kind.
1304	if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
1305		return false
1306	}
1307
1308	if T.Kind() == Chan && specialChannelAssignability(T, V) {
1309		return true
1310	}
1311
1312	// x's type T and V must have identical underlying types.
1313	return haveIdenticalUnderlyingType(T, V, true)
1314}
1315
1316func haveIdenticalType(T, V Type, cmpTags bool) bool {
1317	if cmpTags {
1318		return T == V
1319	}
1320
1321	if T.Name() != V.Name() || T.Kind() != V.Kind() || T.PkgPath() != V.PkgPath() {
1322		return false
1323	}
1324
1325	return haveIdenticalUnderlyingType(T.common(), V.common(), false)
1326}
1327
1328func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
1329	if rtypeEqual(T, V) {
1330		return true
1331	}
1332
1333	kind := T.Kind()
1334	if kind != V.Kind() {
1335		return false
1336	}
1337
1338	// Non-composite types of equal kind have same underlying type
1339	// (the predefined instance of the type).
1340	if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1341		return true
1342	}
1343
1344	// Composite types.
1345	switch kind {
1346	case Array:
1347		return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1348
1349	case Chan:
1350		return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1351
1352	case Func:
1353		t := (*funcType)(unsafe.Pointer(T))
1354		v := (*funcType)(unsafe.Pointer(V))
1355		if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) {
1356			return false
1357		}
1358		for i, typ := range t.in {
1359			if !haveIdenticalType(typ, v.in[i], cmpTags) {
1360				return false
1361			}
1362		}
1363		for i, typ := range t.out {
1364			if !haveIdenticalType(typ, v.out[i], cmpTags) {
1365				return false
1366			}
1367		}
1368		return true
1369
1370	case Interface:
1371		t := (*interfaceType)(unsafe.Pointer(T))
1372		v := (*interfaceType)(unsafe.Pointer(V))
1373		if len(t.methods) == 0 && len(v.methods) == 0 {
1374			return true
1375		}
1376		// Might have the same methods but still
1377		// need a run time conversion.
1378		return false
1379
1380	case Map:
1381		return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1382
1383	case Ptr, Slice:
1384		return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1385
1386	case Struct:
1387		t := (*structType)(unsafe.Pointer(T))
1388		v := (*structType)(unsafe.Pointer(V))
1389		if len(t.fields) != len(v.fields) {
1390			return false
1391		}
1392		for i := range t.fields {
1393			tf := &t.fields[i]
1394			vf := &v.fields[i]
1395			if tf.name != vf.name && (tf.name == nil || vf.name == nil || *tf.name != *vf.name) {
1396				return false
1397			}
1398			if tf.pkgPath != vf.pkgPath && (tf.pkgPath == nil || vf.pkgPath == nil || *tf.pkgPath != *vf.pkgPath) {
1399				return false
1400			}
1401			if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
1402				return false
1403			}
1404			if cmpTags && tf.tag != vf.tag && (tf.tag == nil || vf.tag == nil || *tf.tag != *vf.tag) {
1405				return false
1406			}
1407			if tf.offsetEmbed != vf.offsetEmbed {
1408				return false
1409			}
1410		}
1411		return true
1412	}
1413
1414	return false
1415}
1416
1417// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1418var lookupCache sync.Map // map[cacheKey]*rtype
1419
1420// A cacheKey is the key for use in the lookupCache.
1421// Four values describe any of the types we are looking for:
1422// type kind, one or two subtypes, and an extra integer.
1423type cacheKey struct {
1424	kind  Kind
1425	t1    *rtype
1426	t2    *rtype
1427	extra uintptr
1428}
1429
1430// The funcLookupCache caches FuncOf lookups.
1431// FuncOf does not share the common lookupCache since cacheKey is not
1432// sufficient to represent functions unambiguously.
1433var funcLookupCache struct {
1434	sync.Mutex // Guards stores (but not loads) on m.
1435
1436	// m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1437	// Elements of m are append-only and thus safe for concurrent reading.
1438	m sync.Map
1439}
1440
1441// ChanOf returns the channel type with the given direction and element type.
1442// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1443//
1444// The gc runtime imposes a limit of 64 kB on channel element types.
1445// If t's size is equal to or exceeds this limit, ChanOf panics.
1446func ChanOf(dir ChanDir, t Type) Type {
1447	typ := t.(*rtype)
1448
1449	// Look in cache.
1450	ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1451	if ch, ok := lookupCache.Load(ckey); ok {
1452		return ch.(*rtype)
1453	}
1454
1455	// This restriction is imposed by the gc compiler and the runtime.
1456	if typ.size >= 1<<16 {
1457		panic("reflect.ChanOf: element size too large")
1458	}
1459
1460	// Look in known types.
1461	var s string
1462	switch dir {
1463	default:
1464		panic("reflect.ChanOf: invalid dir")
1465	case SendDir:
1466		s = "chan<- " + *typ.string
1467	case RecvDir:
1468		s = "<-chan " + *typ.string
1469	case BothDir:
1470		typeStr := *typ.string
1471		if typeStr[0] == '<' {
1472			// typ is recv chan, need parentheses as "<-" associates with leftmost
1473			// chan possible, see:
1474			// * https://golang.org/ref/spec#Channel_types
1475			// * https://github.com/golang/go/issues/39897
1476			s = "chan (" + typeStr + ")"
1477		} else {
1478			s = "chan " + typeStr
1479		}
1480	}
1481	if tt := lookupType(s); tt != nil {
1482		ch := (*chanType)(unsafe.Pointer(toType(tt).(*rtype)))
1483		if ch.elem == typ && ch.dir == uintptr(dir) {
1484			ti, _ := lookupCache.LoadOrStore(ckey, tt)
1485			return ti.(Type)
1486		}
1487	}
1488
1489	// Make a channel type.
1490	var ichan interface{} = (chan unsafe.Pointer)(nil)
1491	prototype := *(**chanType)(unsafe.Pointer(&ichan))
1492	ch := *prototype
1493	ch.tflag = tflagRegularMemory
1494	ch.dir = uintptr(dir)
1495	ch.string = &s
1496
1497	// gccgo uses a different hash.
1498	// ch.hash = fnv1(typ.hash, 'c', byte(dir))
1499	ch.hash = 0
1500	if dir&SendDir != 0 {
1501		ch.hash += 1
1502	}
1503	if dir&RecvDir != 0 {
1504		ch.hash += 2
1505	}
1506	ch.hash += typ.hash << 2
1507	ch.hash <<= 3
1508	ch.hash += 15
1509
1510	ch.elem = typ
1511	ch.uncommonType = nil
1512	ch.ptrToThis = nil
1513
1514	ti, _ := lookupCache.LoadOrStore(ckey, toType(&ch.rtype).(*rtype))
1515	return ti.(Type)
1516}
1517
1518// MapOf returns the map type with the given key and element types.
1519// For example, if k represents int and e represents string,
1520// MapOf(k, e) represents map[int]string.
1521//
1522// If the key type is not a valid map key type (that is, if it does
1523// not implement Go's == operator), MapOf panics.
1524func MapOf(key, elem Type) Type {
1525	ktyp := key.(*rtype)
1526	etyp := elem.(*rtype)
1527
1528	if ktyp.equal == nil {
1529		panic("reflect.MapOf: invalid key type " + ktyp.String())
1530	}
1531
1532	// Look in cache.
1533	ckey := cacheKey{Map, ktyp, etyp, 0}
1534	if mt, ok := lookupCache.Load(ckey); ok {
1535		return mt.(Type)
1536	}
1537
1538	// Look in known types.
1539	s := "map[" + *ktyp.string + "]" + *etyp.string
1540	if tt := lookupType(s); tt != nil {
1541		mt := (*mapType)(unsafe.Pointer(toType(tt).(*rtype)))
1542		if mt.key == ktyp && mt.elem == etyp {
1543			ti, _ := lookupCache.LoadOrStore(ckey, tt)
1544			return ti.(Type)
1545		}
1546	}
1547
1548	// Make a map type.
1549	// Note: flag values must match those used in the TMAP case
1550	// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
1551	var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1552	mt := **(**mapType)(unsafe.Pointer(&imap))
1553	mt.string = &s
1554
1555	// gccgo uses a different hash
1556	// mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1557	mt.hash = ktyp.hash + etyp.hash + 2 + 14
1558
1559	mt.key = ktyp
1560	mt.elem = etyp
1561	mt.uncommonType = nil
1562	mt.ptrToThis = nil
1563
1564	mt.bucket = bucketOf(ktyp, etyp)
1565	mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
1566		return typehash(ktyp, p, seed)
1567	}
1568	mt.flags = 0
1569	if ktyp.size > maxKeySize {
1570		mt.keysize = uint8(ptrSize)
1571		mt.flags |= 1 // indirect key
1572	} else {
1573		mt.keysize = uint8(ktyp.size)
1574	}
1575	if etyp.size > maxValSize {
1576		mt.valuesize = uint8(ptrSize)
1577		mt.flags |= 2 // indirect value
1578	} else {
1579		mt.valuesize = uint8(etyp.size)
1580	}
1581	mt.bucketsize = uint16(mt.bucket.size)
1582	if isReflexive(ktyp) {
1583		mt.flags |= 4
1584	}
1585	if needKeyUpdate(ktyp) {
1586		mt.flags |= 8
1587	}
1588	if hashMightPanic(ktyp) {
1589		mt.flags |= 16
1590	}
1591
1592	ti, _ := lookupCache.LoadOrStore(ckey, toType(&mt.rtype).(*rtype))
1593	return ti.(Type)
1594}
1595
1596// FuncOf returns the function type with the given argument and result types.
1597// For example if k represents int and e represents string,
1598// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1599//
1600// The variadic argument controls whether the function is variadic. FuncOf
1601// panics if the in[len(in)-1] does not represent a slice and variadic is
1602// true.
1603func FuncOf(in, out []Type, variadic bool) Type {
1604	if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1605		panic("reflect.FuncOf: last arg of variadic func must be slice")
1606	}
1607
1608	// Make a func type.
1609	var ifunc interface{} = (func())(nil)
1610	prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1611	ft := new(funcType)
1612	*ft = *prototype
1613
1614	// Build a hash and minimally populate ft.
1615	var hash uint32
1616	var fin, fout []*rtype
1617	shift := uint(1)
1618	for _, in := range in {
1619		t := in.(*rtype)
1620		fin = append(fin, t)
1621		hash += t.hash << shift
1622		shift++
1623	}
1624	shift = 2
1625	for _, out := range out {
1626		t := out.(*rtype)
1627		fout = append(fout, t)
1628		hash += t.hash << shift
1629		shift++
1630	}
1631	if variadic {
1632		hash++
1633	}
1634	hash <<= 4
1635	hash += 8
1636	ft.hash = hash
1637	ft.in = fin
1638	ft.out = fout
1639	ft.dotdotdot = variadic
1640
1641	// Look in cache.
1642	if ts, ok := funcLookupCache.m.Load(hash); ok {
1643		for _, t := range ts.([]*rtype) {
1644			if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1645				return t
1646			}
1647		}
1648	}
1649
1650	// Not in cache, lock and retry.
1651	funcLookupCache.Lock()
1652	defer funcLookupCache.Unlock()
1653	if ts, ok := funcLookupCache.m.Load(hash); ok {
1654		for _, t := range ts.([]*rtype) {
1655			if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1656				return t
1657			}
1658		}
1659	}
1660
1661	addToCache := func(tt *rtype) Type {
1662		var rts []*rtype
1663		if rti, ok := funcLookupCache.m.Load(hash); ok {
1664			rts = rti.([]*rtype)
1665		}
1666		funcLookupCache.m.Store(hash, append(rts, tt))
1667		return tt
1668	}
1669
1670	str := funcStr(ft)
1671	if tt := lookupType(str); tt != nil {
1672		if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
1673			return addToCache(tt)
1674		}
1675	}
1676
1677	// Populate the remaining fields of ft and store in cache.
1678	ft.string = &str
1679	ft.uncommonType = nil
1680	ft.ptrToThis = nil
1681	return addToCache(toType(&ft.rtype).(*rtype))
1682}
1683
1684// funcStr builds a string representation of a funcType.
1685func funcStr(ft *funcType) string {
1686	repr := make([]byte, 0, 64)
1687	repr = append(repr, "func("...)
1688	for i, t := range ft.in {
1689		if i > 0 {
1690			repr = append(repr, ", "...)
1691		}
1692		if ft.dotdotdot && i == len(ft.in)-1 {
1693			repr = append(repr, "..."...)
1694			repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...)
1695		} else {
1696			repr = append(repr, *t.string...)
1697		}
1698	}
1699	repr = append(repr, ')')
1700	if l := len(ft.out); l == 1 {
1701		repr = append(repr, ' ')
1702	} else if l > 1 {
1703		repr = append(repr, " ("...)
1704	}
1705	for i, t := range ft.out {
1706		if i > 0 {
1707			repr = append(repr, ", "...)
1708		}
1709		repr = append(repr, *t.string...)
1710	}
1711	if len(ft.out) > 1 {
1712		repr = append(repr, ')')
1713	}
1714	return string(repr)
1715}
1716
1717// isReflexive reports whether the == operation on the type is reflexive.
1718// That is, x == x for all values x of type t.
1719func isReflexive(t *rtype) bool {
1720	switch t.Kind() {
1721	case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
1722		return true
1723	case Float32, Float64, Complex64, Complex128, Interface:
1724		return false
1725	case Array:
1726		tt := (*arrayType)(unsafe.Pointer(t))
1727		return isReflexive(tt.elem)
1728	case Struct:
1729		tt := (*structType)(unsafe.Pointer(t))
1730		for _, f := range tt.fields {
1731			if !isReflexive(f.typ) {
1732				return false
1733			}
1734		}
1735		return true
1736	default:
1737		// Func, Map, Slice, Invalid
1738		panic("isReflexive called on non-key type " + t.String())
1739	}
1740}
1741
1742// needKeyUpdate reports whether map overwrites require the key to be copied.
1743func needKeyUpdate(t *rtype) bool {
1744	switch t.Kind() {
1745	case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
1746		return false
1747	case Float32, Float64, Complex64, Complex128, Interface, String:
1748		// Float keys can be updated from +0 to -0.
1749		// String keys can be updated to use a smaller backing store.
1750		// Interfaces might have floats of strings in them.
1751		return true
1752	case Array:
1753		tt := (*arrayType)(unsafe.Pointer(t))
1754		return needKeyUpdate(tt.elem)
1755	case Struct:
1756		tt := (*structType)(unsafe.Pointer(t))
1757		for _, f := range tt.fields {
1758			if needKeyUpdate(f.typ) {
1759				return true
1760			}
1761		}
1762		return false
1763	default:
1764		// Func, Map, Slice, Invalid
1765		panic("needKeyUpdate called on non-key type " + t.String())
1766	}
1767}
1768
1769// hashMightPanic reports whether the hash of a map key of type t might panic.
1770func hashMightPanic(t *rtype) bool {
1771	switch t.Kind() {
1772	case Interface:
1773		return true
1774	case Array:
1775		tt := (*arrayType)(unsafe.Pointer(t))
1776		return hashMightPanic(tt.elem)
1777	case Struct:
1778		tt := (*structType)(unsafe.Pointer(t))
1779		for _, f := range tt.fields {
1780			if hashMightPanic(f.typ) {
1781				return true
1782			}
1783		}
1784		return false
1785	default:
1786		return false
1787	}
1788}
1789
1790// Make sure these routines stay in sync with ../../runtime/map.go!
1791// These types exist only for GC, so we only fill out GC relevant info.
1792// Currently, that's just size and the GC program. We also fill in string
1793// for possible debugging use.
1794const (
1795	bucketSize uintptr = 8
1796	maxKeySize uintptr = 128
1797	maxValSize uintptr = 128
1798)
1799
1800func bucketOf(ktyp, etyp *rtype) *rtype {
1801	if ktyp.size > maxKeySize {
1802		ktyp = PtrTo(ktyp).(*rtype)
1803	}
1804	if etyp.size > maxValSize {
1805		etyp = PtrTo(etyp).(*rtype)
1806	}
1807
1808	// Prepare GC data if any.
1809	// A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
1810	// or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1811	// Note that since the key and value are known to be <= 128 bytes,
1812	// they're guaranteed to have bitmaps instead of GC programs.
1813	var gcdata *byte
1814	var ptrdata uintptr
1815
1816	size := bucketSize
1817	size = align(size, uintptr(ktyp.fieldAlign))
1818	size += bucketSize * ktyp.size
1819	size = align(size, uintptr(etyp.fieldAlign))
1820	size += bucketSize * etyp.size
1821
1822	maxAlign := uintptr(ktyp.fieldAlign)
1823	if maxAlign < uintptr(etyp.fieldAlign) {
1824		maxAlign = uintptr(etyp.fieldAlign)
1825	}
1826	if maxAlign > ptrSize {
1827		size = align(size, maxAlign)
1828		size += align(ptrSize, maxAlign) - ptrSize
1829	} else if maxAlign < ptrSize {
1830		size = align(size, ptrSize)
1831		maxAlign = ptrSize
1832	}
1833
1834	ovoff := size
1835	size += ptrSize
1836
1837	if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
1838		nptr := size / ptrSize
1839		mask := make([]byte, (nptr+7)/8)
1840		psize := bucketSize
1841		psize = align(psize, uintptr(ktyp.fieldAlign))
1842		base := psize / ptrSize
1843
1844		if ktyp.ptrdata != 0 {
1845			emitGCMask(mask, base, ktyp, bucketSize)
1846		}
1847		psize += bucketSize * ktyp.size
1848		psize = align(psize, uintptr(etyp.fieldAlign))
1849		base = psize / ptrSize
1850
1851		if etyp.ptrdata != 0 {
1852			emitGCMask(mask, base, etyp, bucketSize)
1853		}
1854
1855		word := ovoff / ptrSize
1856		mask[word/8] |= 1 << (word % 8)
1857		gcdata = &mask[0]
1858		ptrdata = (word + 1) * ptrSize
1859
1860		// overflow word must be last
1861		if ptrdata != size {
1862			panic("reflect: bad layout computation in MapOf")
1863		}
1864	}
1865
1866	b := &rtype{
1867		align:      uint8(maxAlign),
1868		fieldAlign: uint8(maxAlign),
1869		size:       size,
1870		kind:       uint8(Struct),
1871		ptrdata:    ptrdata,
1872		gcdata:     gcdata,
1873	}
1874	s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
1875	b.string = &s
1876	return b
1877}
1878
1879func (t *rtype) gcSlice(begin, end uintptr) []byte {
1880	return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end]
1881}
1882
1883// emitGCMask writes the GC mask for [n]typ into out, starting at bit
1884// offset base.
1885func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) {
1886	if typ.kind&kindGCProg != 0 {
1887		panic("reflect: unexpected GC program")
1888	}
1889	ptrs := typ.ptrdata / ptrSize
1890	words := typ.size / ptrSize
1891	mask := typ.gcSlice(0, (ptrs+7)/8)
1892	for j := uintptr(0); j < ptrs; j++ {
1893		if (mask[j/8]>>(j%8))&1 != 0 {
1894			for i := uintptr(0); i < n; i++ {
1895				k := base + i*words + j
1896				out[k/8] |= 1 << (k % 8)
1897			}
1898		}
1899	}
1900}
1901
1902// appendGCProg appends the GC program for the first ptrdata bytes of
1903// typ to dst and returns the extended slice.
1904func appendGCProg(dst []byte, typ *rtype) []byte {
1905	if typ.kind&kindGCProg != 0 {
1906		// Element has GC program; emit one element.
1907		n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata)))
1908		prog := typ.gcSlice(4, 4+n-1)
1909		return append(dst, prog...)
1910	}
1911
1912	// Element is small with pointer mask; use as literal bits.
1913	ptrs := typ.ptrdata / ptrSize
1914	mask := typ.gcSlice(0, (ptrs+7)/8)
1915
1916	// Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
1917	for ; ptrs > 120; ptrs -= 120 {
1918		dst = append(dst, 120)
1919		dst = append(dst, mask[:15]...)
1920		mask = mask[15:]
1921	}
1922
1923	dst = append(dst, byte(ptrs))
1924	dst = append(dst, mask...)
1925	return dst
1926}
1927
1928// SliceOf returns the slice type with element type t.
1929// For example, if t represents int, SliceOf(t) represents []int.
1930func SliceOf(t Type) Type {
1931	typ := t.(*rtype)
1932
1933	// Look in cache.
1934	ckey := cacheKey{Slice, typ, nil, 0}
1935	if slice, ok := lookupCache.Load(ckey); ok {
1936		return slice.(Type)
1937	}
1938
1939	// Look in known types.
1940	s := "[]" + *typ.string
1941	if tt := lookupType(s); tt != nil {
1942		slice := (*sliceType)(unsafe.Pointer(toType(tt).(*rtype)))
1943		if slice.elem == typ {
1944			ti, _ := lookupCache.LoadOrStore(ckey, tt)
1945			return ti.(Type)
1946		}
1947	}
1948
1949	// Make a slice type.
1950	var islice interface{} = ([]unsafe.Pointer)(nil)
1951	prototype := *(**sliceType)(unsafe.Pointer(&islice))
1952	slice := *prototype
1953	slice.string = &s
1954
1955	// gccgo uses a different hash.
1956	// slice.hash = fnv1(typ.hash, '[')
1957	slice.hash = typ.hash + 1 + 13
1958
1959	slice.elem = typ
1960	slice.uncommonType = nil
1961	slice.ptrToThis = nil
1962
1963	ti, _ := lookupCache.LoadOrStore(ckey, toType(&slice.rtype).(*rtype))
1964	return ti.(Type)
1965}
1966
1967// The structLookupCache caches StructOf lookups.
1968// StructOf does not share the common lookupCache since we need to pin
1969// the memory associated with *structTypeFixedN.
1970var structLookupCache struct {
1971	sync.Mutex // Guards stores (but not loads) on m.
1972
1973	// m is a map[uint32][]Type keyed by the hash calculated in StructOf.
1974	// Elements in m are append-only and thus safe for concurrent reading.
1975	m sync.Map
1976}
1977
1978// isLetter reports whether a given 'rune' is classified as a Letter.
1979func isLetter(ch rune) bool {
1980	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
1981}
1982
1983// isValidFieldName checks if a string is a valid (struct) field name or not.
1984//
1985// According to the language spec, a field name should be an identifier.
1986//
1987// identifier = letter { letter | unicode_digit } .
1988// letter = unicode_letter | "_" .
1989func isValidFieldName(fieldName string) bool {
1990	for i, c := range fieldName {
1991		if i == 0 && !isLetter(c) {
1992			return false
1993		}
1994
1995		if !(isLetter(c) || unicode.IsDigit(c)) {
1996			return false
1997		}
1998	}
1999
2000	return len(fieldName) > 0
2001}
2002
2003// StructOf returns the struct type containing fields.
2004// The Offset and Index fields are ignored and computed as they would be
2005// by the compiler.
2006//
2007// StructOf currently does not generate wrapper methods for embedded
2008// fields and panics if passed unexported StructFields.
2009// These limitations may be lifted in a future version.
2010func StructOf(fields []StructField) Type {
2011	var (
2012		hash       = uint32(12)
2013		size       uintptr
2014		typalign   uint8
2015		comparable = true
2016
2017		fs   = make([]structField, len(fields))
2018		repr = make([]byte, 0, 64)
2019		fset = map[string]struct{}{} // fields' names
2020
2021		hasGCProg = false // records whether a struct-field type has a GCProg
2022	)
2023
2024	lastzero := uintptr(0)
2025	repr = append(repr, "struct {"...)
2026	pkgpath := ""
2027	for i, field := range fields {
2028		if field.Name == "" {
2029			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
2030		}
2031		if !isValidFieldName(field.Name) {
2032			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
2033		}
2034		if field.Type == nil {
2035			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
2036		}
2037		f, fpkgpath := runtimeStructField(field)
2038		ft := f.typ
2039		if ft.kind&kindGCProg != 0 {
2040			hasGCProg = true
2041		}
2042		if fpkgpath != "" {
2043			if pkgpath == "" {
2044				pkgpath = fpkgpath
2045			} else if pkgpath != fpkgpath {
2046				panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
2047			}
2048		}
2049
2050		// Update string and hash
2051		name := *f.name
2052		hash = (hash << 1) + ft.hash
2053		if !f.embedded() {
2054			repr = append(repr, (" " + name)...)
2055		} else {
2056			// Embedded field
2057			repr = append(repr, " ?"...)
2058			if f.typ.Kind() == Ptr {
2059				// Embedded ** and *interface{} are illegal
2060				elem := ft.Elem()
2061				if k := elem.Kind(); k == Ptr || k == Interface {
2062					panic("reflect.StructOf: illegal embedded field type " + ft.String())
2063				}
2064				name = elem.String()
2065			} else {
2066				name = ft.String()
2067			}
2068
2069			switch f.typ.Kind() {
2070			case Interface:
2071				ift := (*interfaceType)(unsafe.Pointer(ft))
2072				if len(ift.methods) > 0 {
2073					panic("reflect.StructOf: embedded field with methods not implemented")
2074				}
2075			case Ptr:
2076				ptr := (*ptrType)(unsafe.Pointer(ft))
2077				if unt := ptr.uncommon(); unt != nil {
2078					if len(unt.methods) > 0 {
2079						panic("reflect.StructOf: embedded field with methods not implemented")
2080					}
2081				}
2082				if unt := ptr.elem.uncommon(); unt != nil {
2083					if len(unt.methods) > 0 {
2084						panic("reflect.StructOf: embedded field with methods not implemented")
2085					}
2086				}
2087			default:
2088				if unt := ft.uncommon(); unt != nil {
2089					if len(unt.methods) > 0 {
2090						panic("reflect.StructOf: embedded field with methods not implemented")
2091					}
2092				}
2093			}
2094		}
2095		if _, dup := fset[name]; dup {
2096			panic("reflect.StructOf: duplicate field " + name)
2097		}
2098		fset[name] = struct{}{}
2099
2100		repr = append(repr, (" " + *ft.string)...)
2101		if f.tag != nil {
2102			repr = append(repr, (" " + strconv.Quote(*f.tag))...)
2103		}
2104		if i < len(fields)-1 {
2105			repr = append(repr, ';')
2106		}
2107
2108		comparable = comparable && (ft.equal != nil)
2109
2110		offset := align(size, uintptr(ft.fieldAlign))
2111		if ft.fieldAlign > typalign {
2112			typalign = ft.fieldAlign
2113		}
2114		size = offset + ft.size
2115		f.offsetEmbed |= offset << 1
2116
2117		if ft.size == 0 {
2118			lastzero = size
2119		}
2120
2121		fs[i] = f
2122	}
2123
2124	if size > 0 && lastzero == size {
2125		// This is a non-zero sized struct that ends in a
2126		// zero-sized field. We add an extra byte of padding,
2127		// to ensure that taking the address of the final
2128		// zero-sized field can't manufacture a pointer to the
2129		// next object in the heap. See issue 9401.
2130		size++
2131	}
2132
2133	if len(fs) > 0 {
2134		repr = append(repr, ' ')
2135	}
2136	repr = append(repr, '}')
2137	hash <<= 2
2138	str := string(repr)
2139
2140	// Round the size up to be a multiple of the alignment.
2141	size = align(size, uintptr(typalign))
2142
2143	// Make the struct type.
2144	var istruct interface{} = struct{}{}
2145	prototype := *(**structType)(unsafe.Pointer(&istruct))
2146	typ := new(structType)
2147	*typ = *prototype
2148	typ.fields = fs
2149
2150	// Look in cache.
2151	if ts, ok := structLookupCache.m.Load(hash); ok {
2152		for _, st := range ts.([]Type) {
2153			t := st.common()
2154			if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2155				return t
2156			}
2157		}
2158	}
2159
2160	// Not in cache, lock and retry.
2161	structLookupCache.Lock()
2162	defer structLookupCache.Unlock()
2163	if ts, ok := structLookupCache.m.Load(hash); ok {
2164		for _, st := range ts.([]Type) {
2165			t := st.common()
2166			if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2167				return t
2168			}
2169		}
2170	}
2171
2172	addToCache := func(t Type) Type {
2173		var ts []Type
2174		if ti, ok := structLookupCache.m.Load(hash); ok {
2175			ts = ti.([]Type)
2176		}
2177		structLookupCache.m.Store(hash, append(ts, t))
2178		return t
2179	}
2180
2181	// Look in known types.
2182	if tt := lookupType(str); tt != nil {
2183		if haveIdenticalUnderlyingType(&typ.rtype, tt, true) {
2184			return addToCache(tt)
2185		}
2186	}
2187
2188	typ.string = &str
2189	typ.tflag = 0 // TODO: set tflagRegularMemory
2190	typ.hash = hash
2191	typ.size = size
2192	typ.ptrdata = typeptrdata(typ.common())
2193	typ.align = typalign
2194	typ.fieldAlign = typalign
2195
2196	if hasGCProg {
2197		lastPtrField := 0
2198		for i, ft := range fs {
2199			if ft.typ.pointers() {
2200				lastPtrField = i
2201			}
2202		}
2203		prog := []byte{0, 0, 0, 0} // will be length of prog
2204		var off uintptr
2205		for i, ft := range fs {
2206			if i > lastPtrField {
2207				// gcprog should not include anything for any field after
2208				// the last field that contains pointer data
2209				break
2210			}
2211			if !ft.typ.pointers() {
2212				// Ignore pointerless fields.
2213				continue
2214			}
2215			// Pad to start of this field with zeros.
2216			if ft.offset() > off {
2217				n := (ft.offset() - off) / ptrSize
2218				prog = append(prog, 0x01, 0x00) // emit a 0 bit
2219				if n > 1 {
2220					prog = append(prog, 0x81)      // repeat previous bit
2221					prog = appendVarint(prog, n-1) // n-1 times
2222				}
2223				off = ft.offset()
2224			}
2225
2226			prog = appendGCProg(prog, ft.typ)
2227			off += ft.typ.ptrdata
2228		}
2229		prog = append(prog, 0)
2230		*(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2231		typ.kind |= kindGCProg
2232		typ.gcdata = &prog[0]
2233	} else {
2234		typ.kind &^= kindGCProg
2235		bv := new(bitVector)
2236		addTypeBits(bv, 0, typ.common())
2237		if len(bv.data) > 0 {
2238			typ.gcdata = &bv.data[0]
2239		}
2240	}
2241	typ.ptrdata = typeptrdata(typ.common())
2242
2243	typ.equal = nil
2244	if comparable {
2245		typ.equal = func(p, q unsafe.Pointer) bool {
2246			for _, ft := range typ.fields {
2247				pi := add(p, ft.offset(), "&x.field safe")
2248				qi := add(q, ft.offset(), "&x.field safe")
2249				if !ft.typ.equal(pi, qi) {
2250					return false
2251				}
2252			}
2253			return true
2254		}
2255	}
2256
2257	switch {
2258	case len(fs) == 1 && !ifaceIndir(fs[0].typ):
2259		// structs of 1 direct iface type can be direct
2260		typ.kind |= kindDirectIface
2261	default:
2262		typ.kind &^= kindDirectIface
2263	}
2264
2265	typ.uncommonType = nil
2266	typ.ptrToThis = nil
2267	return addToCache(toType(&typ.rtype).(*rtype))
2268}
2269
2270// runtimeStructField takes a StructField value passed to StructOf and
2271// returns both the corresponding internal representation, of type
2272// structField, and the pkgpath value to use for this field.
2273func runtimeStructField(field StructField) (structField, string) {
2274	if field.Anonymous && field.PkgPath != "" {
2275		panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
2276	}
2277
2278	if field.IsExported() {
2279		// Best-effort check for misuse.
2280		// Since this field will be treated as exported, not much harm done if Unicode lowercase slips through.
2281		c := field.Name[0]
2282		if 'a' <= c && c <= 'z' || c == '_' {
2283			panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2284		}
2285	}
2286
2287	offsetEmbed := uintptr(0)
2288	if field.Anonymous {
2289		offsetEmbed |= 1
2290	}
2291
2292	s := field.Name
2293	name := &s
2294
2295	var tag *string
2296	if field.Tag != "" {
2297		st := string(field.Tag)
2298		tag = &st
2299	}
2300
2301	var pkgPath *string
2302	if field.PkgPath != "" {
2303		s := field.PkgPath
2304		pkgPath = &s
2305	}
2306	f := structField{
2307		name:        name,
2308		pkgPath:     pkgPath,
2309		typ:         field.Type.common(),
2310		tag:         tag,
2311		offsetEmbed: offsetEmbed,
2312	}
2313	return f, field.PkgPath
2314}
2315
2316// typeptrdata returns the length in bytes of the prefix of t
2317// containing pointer data. Anything after this offset is scalar data.
2318// keep in sync with ../cmd/compile/internal/reflectdata/reflect.go
2319func typeptrdata(t *rtype) uintptr {
2320	switch t.Kind() {
2321	case Struct:
2322		st := (*structType)(unsafe.Pointer(t))
2323		// find the last field that has pointers.
2324		field := -1
2325		for i := range st.fields {
2326			ft := st.fields[i].typ
2327			if ft.pointers() {
2328				field = i
2329			}
2330		}
2331		if field == -1 {
2332			return 0
2333		}
2334		f := st.fields[field]
2335		return f.offset() + f.typ.ptrdata
2336
2337	default:
2338		panic("reflect.typeptrdata: unexpected type, " + t.String())
2339	}
2340}
2341
2342// See cmd/compile/internal/reflectdata/reflect.go for derivation of constant.
2343const maxPtrmaskBytes = 2048
2344
2345// ArrayOf returns the array type with the given length and element type.
2346// For example, if t represents int, ArrayOf(5, t) represents [5]int.
2347//
2348// If the resulting type would be larger than the available address space,
2349// ArrayOf panics.
2350func ArrayOf(length int, elem Type) Type {
2351	if length < 0 {
2352		panic("reflect: negative length passed to ArrayOf")
2353	}
2354
2355	typ := elem.(*rtype)
2356
2357	// Look in cache.
2358	ckey := cacheKey{Array, typ, nil, uintptr(length)}
2359	if array, ok := lookupCache.Load(ckey); ok {
2360		return array.(Type)
2361	}
2362
2363	// Look in known types.
2364	s := "[" + strconv.Itoa(length) + "]" + *typ.string
2365	if tt := lookupType(s); tt != nil {
2366		array := (*arrayType)(unsafe.Pointer(toType(tt).(*rtype)))
2367		if array.elem == typ {
2368			ti, _ := lookupCache.LoadOrStore(ckey, tt)
2369			return ti.(Type)
2370		}
2371	}
2372
2373	// Make an array type.
2374	var iarray interface{} = [1]unsafe.Pointer{}
2375	prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2376	array := *prototype
2377	array.tflag = typ.tflag & tflagRegularMemory
2378	array.string = &s
2379
2380	// gccgo uses a different hash.
2381	// array.hash = fnv1(typ.hash, '[')
2382	// for n := uint32(length); n > 0; n >>= 8 {
2383	// 	array.hash = fnv1(array.hash, byte(n))
2384	// }
2385	// array.hash = fnv1(array.hash, ']')
2386	array.hash = typ.hash + 1 + 13
2387	array.elem = typ
2388	array.ptrToThis = nil
2389	if typ.size > 0 {
2390		max := ^uintptr(0) / typ.size
2391		if uintptr(length) > max {
2392			panic("reflect.ArrayOf: array size would exceed virtual address space")
2393		}
2394	}
2395	array.size = typ.size * uintptr(length)
2396	if length > 0 && typ.ptrdata != 0 {
2397		array.ptrdata = typ.size*uintptr(length-1) + typ.ptrdata
2398	}
2399	array.align = typ.align
2400	array.fieldAlign = typ.fieldAlign
2401	array.uncommonType = nil
2402	array.len = uintptr(length)
2403	array.slice = SliceOf(elem).(*rtype)
2404
2405	switch {
2406	case typ.ptrdata == 0 || array.size == 0:
2407		// No pointers.
2408		array.gcdata = nil
2409		array.ptrdata = 0
2410
2411	case length == 1:
2412		// In memory, 1-element array looks just like the element.
2413		array.kind |= typ.kind & kindGCProg
2414		array.gcdata = typ.gcdata
2415		array.ptrdata = typ.ptrdata
2416
2417	case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
2418		// Element is small with pointer mask; array is still small.
2419		// Create direct pointer mask by turning each 1 bit in elem
2420		// into length 1 bits in larger mask.
2421		mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
2422		emitGCMask(mask, 0, typ, array.len)
2423		array.gcdata = &mask[0]
2424
2425	default:
2426		// Create program that emits one element
2427		// and then repeats to make the array.
2428		prog := []byte{0, 0, 0, 0} // will be length of prog
2429		prog = appendGCProg(prog, typ)
2430		// Pad from ptrdata to size.
2431		elemPtrs := typ.ptrdata / ptrSize
2432		elemWords := typ.size / ptrSize
2433		if elemPtrs < elemWords {
2434			// Emit literal 0 bit, then repeat as needed.
2435			prog = append(prog, 0x01, 0x00)
2436			if elemPtrs+1 < elemWords {
2437				prog = append(prog, 0x81)
2438				prog = appendVarint(prog, elemWords-elemPtrs-1)
2439			}
2440		}
2441		// Repeat length-1 times.
2442		if elemWords < 0x80 {
2443			prog = append(prog, byte(elemWords|0x80))
2444		} else {
2445			prog = append(prog, 0x80)
2446			prog = appendVarint(prog, elemWords)
2447		}
2448		prog = appendVarint(prog, uintptr(length)-1)
2449		prog = append(prog, 0)
2450		*(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2451		array.kind |= kindGCProg
2452		array.gcdata = &prog[0]
2453		array.ptrdata = array.size // overestimate but ok; must match program
2454	}
2455
2456	etyp := typ.common()
2457	esize := typ.size
2458
2459	array.equal = nil
2460	if eequal := etyp.equal; eequal != nil {
2461		array.equal = func(p, q unsafe.Pointer) bool {
2462			for i := 0; i < length; i++ {
2463				pi := arrayAt(p, i, esize, "i < length")
2464				qi := arrayAt(q, i, esize, "i < length")
2465				if !eequal(pi, qi) {
2466					return false
2467				}
2468			}
2469			return true
2470		}
2471	}
2472
2473	switch {
2474	case length == 1 && !ifaceIndir(typ):
2475		// array of 1 direct iface type can be direct
2476		array.kind |= kindDirectIface
2477	default:
2478		array.kind &^= kindDirectIface
2479	}
2480
2481	ti, _ := lookupCache.LoadOrStore(ckey, toType(&array.rtype).(*rtype))
2482	return ti.(Type)
2483}
2484
2485func appendVarint(x []byte, v uintptr) []byte {
2486	for ; v >= 0x80; v >>= 7 {
2487		x = append(x, byte(v|0x80))
2488	}
2489	x = append(x, byte(v))
2490	return x
2491}
2492
2493// Look up a compiler-generated type descriptor.
2494// Implemented in runtime.
2495func lookupType(s string) *rtype
2496
2497// ifaceIndir reports whether t is stored indirectly in an interface value.
2498func ifaceIndir(t *rtype) bool {
2499	return t.kind&kindDirectIface == 0
2500}
2501
2502// Note: this type must agree with runtime.bitvector.
2503type bitVector struct {
2504	n    uint32 // number of bits
2505	data []byte
2506}
2507
2508// append a bit to the bitmap.
2509func (bv *bitVector) append(bit uint8) {
2510	if bv.n%8 == 0 {
2511		bv.data = append(bv.data, 0)
2512	}
2513	bv.data[bv.n/8] |= bit << (bv.n % 8)
2514	bv.n++
2515}
2516
2517func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
2518	if t.ptrdata == 0 {
2519		return
2520	}
2521
2522	switch Kind(t.kind & kindMask) {
2523	case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
2524		// 1 pointer at start of representation
2525		for bv.n < uint32(offset/uintptr(ptrSize)) {
2526			bv.append(0)
2527		}
2528		bv.append(1)
2529
2530	case Interface:
2531		// 2 pointers
2532		for bv.n < uint32(offset/uintptr(ptrSize)) {
2533			bv.append(0)
2534		}
2535		bv.append(1)
2536		bv.append(1)
2537
2538	case Array:
2539		// repeat inner type
2540		tt := (*arrayType)(unsafe.Pointer(t))
2541		for i := 0; i < int(tt.len); i++ {
2542			addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
2543		}
2544
2545	case Struct:
2546		// apply fields
2547		tt := (*structType)(unsafe.Pointer(t))
2548		for i := range tt.fields {
2549			f := &tt.fields[i]
2550			addTypeBits(bv, offset+f.offset(), f.typ)
2551		}
2552	}
2553}
2554