1// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
2// Use of this source code is governed by a MIT license found in the LICENSE file.
3
4package codec
5
6// Contains code shared by both encode and decode.
7
8// Some shared ideas around encoding/decoding
9// ------------------------------------------
10//
11// If an interface{} is passed, we first do a type assertion to see if it is
12// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
13//
14// If we start with a reflect.Value, we are already in reflect.Value land and
15// will try to grab the function for the underlying Type and directly call that function.
16// This is more performant than calling reflect.Value.Interface().
17//
18// This still helps us bypass many layers of reflection, and give best performance.
19//
20// Containers
21// ------------
22// Containers in the stream are either associative arrays (key-value pairs) or
23// regular arrays (indexed by incrementing integers).
24//
25// Some streams support indefinite-length containers, and use a breaking
26// byte-sequence to denote that the container has come to an end.
27//
28// Some streams also are text-based, and use explicit separators to denote the
29// end/beginning of different values.
30//
31// Philosophy
32// ------------
33// On decode, this codec will update containers appropriately:
34//    - If struct, update fields from stream into fields of struct.
35//      If field in stream not found in struct, handle appropriately (based on option).
36//      If a struct field has no corresponding value in the stream, leave it AS IS.
37//      If nil in stream, set value to nil/zero value.
38//    - If map, update map from stream.
39//      If the stream value is NIL, set the map to nil.
40//    - if slice, try to update up to length of array in stream.
41//      if container len is less than stream array length,
42//      and container cannot be expanded, handled (based on option).
43//      This means you can decode 4-element stream array into 1-element array.
44//
45// ------------------------------------
46// On encode, user can specify omitEmpty. This means that the value will be omitted
47// if the zero value. The problem may occur during decode, where omitted values do not affect
48// the value being decoded into. This means that if decoding into a struct with an
49// int field with current value=5, and the field is omitted in the stream, then after
50// decoding, the value will still be 5 (not 0).
51// omitEmpty only works if you guarantee that you always decode into zero-values.
52//
53// ------------------------------------
54// We could have truncated a map to remove keys not available in the stream,
55// or set values in the struct which are not in the stream to their zero values.
56// We decided against it because there is no efficient way to do it.
57// We may introduce it as an option later.
58// However, that will require enabling it for both runtime and code generation modes.
59//
60// To support truncate, we need to do 2 passes over the container:
61//   map
62//   - first collect all keys (e.g. in k1)
63//   - for each key in stream, mark k1 that the key should not be removed
64//   - after updating map, do second pass and call delete for all keys in k1 which are not marked
65//   struct:
66//   - for each field, track the *typeInfo s1
67//   - iterate through all s1, and for each one not marked, set value to zero
68//   - this involves checking the possible anonymous fields which are nil ptrs.
69//     too much work.
70//
71// ------------------------------------------
72// Error Handling is done within the library using panic.
73//
74// This way, the code doesn't have to keep checking if an error has happened,
75// and we don't have to keep sending the error value along with each call
76// or storing it in the En|Decoder and checking it constantly along the way.
77//
78// We considered storing the error is En|Decoder.
79//   - once it has its err field set, it cannot be used again.
80//   - panicing will be optional, controlled by const flag.
81//   - code should always check error first and return early.
82//
83// We eventually decided against it as it makes the code clumsier to always
84// check for these error conditions.
85//
86// ------------------------------------------
87// We use sync.Pool only for the aid of long-lived objects shared across multiple goroutines.
88// Encoder, Decoder, enc|decDriver, reader|writer, etc do not fall into this bucket.
89//
90// Also, GC is much better now, eliminating some of the reasons to use a shared pool structure.
91// Instead, the short-lived objects use free-lists that live as long as the object exists.
92//
93// ------------------------------------------
94// Performance is affected by the following:
95//    - Bounds Checking
96//    - Inlining
97//    - Pointer chasing
98// This package tries hard to manage the performance impact of these.
99//
100// ------------------------------------------
101// To alleviate performance due to pointer-chasing:
102//    - Prefer non-pointer values in a struct field
103//    - Refer to these directly within helper classes
104//      e.g. json.go refers directly to d.d.decRd
105//
106// We made the changes to embed En/Decoder in en/decDriver,
107// but we had to explicitly reference the fields as opposed to using a function
108// to get the better performance that we were looking for.
109// For example, we explicitly call d.d.decRd.fn() instead of d.d.r().fn().
110//
111// ------------------------------------------
112// Bounds Checking
113//    - Allow bytesDecReader to incur "bounds check error", and
114//      recover that as an io.EOF.
115//      This allows the bounds check branch to always be taken by the branch predictor,
116//      giving better performance (in theory), while ensuring that the code is shorter.
117//
118// ------------------------------------------
119// Escape Analysis
120//    - Prefer to return non-pointers if the value is used right away.
121//      Newly allocated values returned as pointers will be heap-allocated as they escape.
122//
123// Prefer functions and methods that
124//    - take no parameters and
125//    - return no results and
126//    - do not allocate.
127// These are optimized by the runtime.
128// For example, in json, we have dedicated functions for ReadMapElemKey, etc
129// which do not delegate to readDelim, as readDelim takes a parameter.
130// The difference in runtime was as much as 5%.
131
132import (
133	"bytes"
134	"encoding"
135	"encoding/binary"
136	"errors"
137	"fmt"
138	"io"
139	"math"
140	"reflect"
141	"sort"
142	"strconv"
143	"strings"
144	"sync"
145	"sync/atomic"
146	"time"
147)
148
149const (
150	// rvNLen is the length of the array for readn or writen calls
151	rwNLen = 7
152
153	// scratchByteArrayLen = 64
154	// initCollectionCap   = 16 // 32 is defensive. 16 is preferred.
155
156	// Support encoding.(Binary|Text)(Unm|M)arshaler.
157	// This constant flag will enable or disable it.
158	supportMarshalInterfaces = true
159
160	// for debugging, set this to false, to catch panic traces.
161	// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
162	recoverPanicToErr = true
163
164	// arrayCacheLen is the length of the cache used in encoder or decoder for
165	// allowing zero-alloc initialization.
166	// arrayCacheLen = 8
167
168	// size of the cacheline: defaulting to value for archs: amd64, arm64, 386
169	// should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
170	cacheLineSize = 64
171
172	wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
173	wordSize     = wordSizeBits / 8
174
175	// so structFieldInfo fits into 8 bytes
176	maxLevelsEmbedding = 14
177
178	// xdebug controls whether xdebugf prints any output
179	xdebug = true
180)
181
182var (
183	oneByteArr    [1]byte
184	zeroByteSlice = oneByteArr[:0:0]
185
186	codecgen bool
187
188	panicv panicHdl
189
190	refBitset    bitset32
191	isnilBitset  bitset32
192	scalarBitset bitset32
193)
194
195var (
196	errMapTypeNotMapKind     = errors.New("MapType MUST be of Map Kind")
197	errSliceTypeNotSliceKind = errors.New("SliceType MUST be of Slice Kind")
198)
199
200var pool4tiload = sync.Pool{New: func() interface{} { return new(typeInfoLoadArray) }}
201
202func init() {
203	refBitset = refBitset.
204		set(byte(reflect.Map)).
205		set(byte(reflect.Ptr)).
206		set(byte(reflect.Func)).
207		set(byte(reflect.Chan)).
208		set(byte(reflect.UnsafePointer))
209
210	isnilBitset = isnilBitset.
211		set(byte(reflect.Map)).
212		set(byte(reflect.Ptr)).
213		set(byte(reflect.Func)).
214		set(byte(reflect.Chan)).
215		set(byte(reflect.UnsafePointer)).
216		set(byte(reflect.Interface)).
217		set(byte(reflect.Slice))
218
219	scalarBitset = scalarBitset.
220		set(byte(reflect.Bool)).
221		set(byte(reflect.Int)).
222		set(byte(reflect.Int8)).
223		set(byte(reflect.Int16)).
224		set(byte(reflect.Int32)).
225		set(byte(reflect.Int64)).
226		set(byte(reflect.Uint)).
227		set(byte(reflect.Uint8)).
228		set(byte(reflect.Uint16)).
229		set(byte(reflect.Uint32)).
230		set(byte(reflect.Uint64)).
231		set(byte(reflect.Uintptr)).
232		set(byte(reflect.Float32)).
233		set(byte(reflect.Float64)).
234		set(byte(reflect.Complex64)).
235		set(byte(reflect.Complex128)).
236		set(byte(reflect.String))
237
238}
239
240type handleFlag uint8
241
242const (
243	initedHandleFlag handleFlag = 1 << iota
244	binaryHandleFlag
245	jsonHandleFlag
246)
247
248type clsErr struct {
249	closed    bool  // is it closed?
250	errClosed error // error on closing
251}
252
253type charEncoding uint8
254
255const (
256	_ charEncoding = iota // make 0 unset
257	cUTF8
258	cUTF16LE
259	cUTF16BE
260	cUTF32LE
261	cUTF32BE
262	// Deprecated: not a true char encoding value
263	cRAW charEncoding = 255
264)
265
266// valueType is the stream type
267type valueType uint8
268
269const (
270	valueTypeUnset valueType = iota
271	valueTypeNil
272	valueTypeInt
273	valueTypeUint
274	valueTypeFloat
275	valueTypeBool
276	valueTypeString
277	valueTypeSymbol
278	valueTypeBytes
279	valueTypeMap
280	valueTypeArray
281	valueTypeTime
282	valueTypeExt
283
284	// valueTypeInvalid = 0xff
285)
286
287var valueTypeStrings = [...]string{
288	"Unset",
289	"Nil",
290	"Int",
291	"Uint",
292	"Float",
293	"Bool",
294	"String",
295	"Symbol",
296	"Bytes",
297	"Map",
298	"Array",
299	"Timestamp",
300	"Ext",
301}
302
303func (x valueType) String() string {
304	if int(x) < len(valueTypeStrings) {
305		return valueTypeStrings[x]
306	}
307	return strconv.FormatInt(int64(x), 10)
308}
309
310type seqType uint8
311
312const (
313	_ seqType = iota
314	seqTypeArray
315	seqTypeSlice
316	seqTypeChan
317)
318
319// note that containerMapStart and containerArraySend are not sent.
320// This is because the ReadXXXStart and EncodeXXXStart already does these.
321type containerState uint8
322
323const (
324	_ containerState = iota
325
326	containerMapStart
327	containerMapKey
328	containerMapValue
329	containerMapEnd
330	containerArrayStart
331	containerArrayElem
332	containerArrayEnd
333)
334
335// do not recurse if a containing type refers to an embedded type
336// which refers back to its containing type (via a pointer).
337// The second time this back-reference happens, break out,
338// so as not to cause an infinite loop.
339const rgetMaxRecursion = 2
340
341// Anecdotally, we believe most types have <= 12 fields.
342// - even Java's PMD rules set TooManyFields threshold to 15.
343// However, go has embedded fields, which should be regarded as
344// top level, allowing structs to possibly double or triple.
345// In addition, we don't want to keep creating transient arrays,
346// especially for the sfi index tracking, and the evtypes tracking.
347//
348// So - try to keep typeInfoLoadArray within 2K bytes
349const (
350	typeInfoLoadArraySfisLen   = 16
351	typeInfoLoadArraySfiidxLen = 8 * 112
352	typeInfoLoadArrayEtypesLen = 12
353	typeInfoLoadArrayBLen      = 8 * 4
354)
355
356// typeInfoLoad is a transient object used while loading up a typeInfo.
357type typeInfoLoad struct {
358	etypes []uintptr
359	sfis   []structFieldInfo
360}
361
362// typeInfoLoadArray is a cache object used to efficiently load up a typeInfo without
363// much allocation.
364type typeInfoLoadArray struct {
365	sfis   [typeInfoLoadArraySfisLen]structFieldInfo
366	sfiidx [typeInfoLoadArraySfiidxLen]byte
367	etypes [typeInfoLoadArrayEtypesLen]uintptr
368	b      [typeInfoLoadArrayBLen]byte // scratch - used for struct field names
369}
370
371// mirror json.Marshaler and json.Unmarshaler here,
372// so we don't import the encoding/json package
373
374type jsonMarshaler interface {
375	MarshalJSON() ([]byte, error)
376}
377type jsonUnmarshaler interface {
378	UnmarshalJSON([]byte) error
379}
380
381type isZeroer interface {
382	IsZero() bool
383}
384
385type codecError struct {
386	name string
387	err  interface{}
388}
389
390func (e codecError) Cause() error {
391	switch xerr := e.err.(type) {
392	case nil:
393		return nil
394	case error:
395		return xerr
396	case string:
397		return errors.New(xerr)
398	case fmt.Stringer:
399		return errors.New(xerr.String())
400	default:
401		return fmt.Errorf("%v", e.err)
402	}
403}
404
405func (e codecError) Error() string {
406	return fmt.Sprintf("%s error: %v", e.name, e.err)
407}
408
409var (
410	bigen               = binary.BigEndian
411	structInfoFieldName = "_struct"
412
413	mapStrIntfTyp  = reflect.TypeOf(map[string]interface{}(nil))
414	mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
415	intfSliceTyp   = reflect.TypeOf([]interface{}(nil))
416	intfTyp        = intfSliceTyp.Elem()
417
418	reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
419
420	stringTyp     = reflect.TypeOf("")
421	timeTyp       = reflect.TypeOf(time.Time{})
422	rawExtTyp     = reflect.TypeOf(RawExt{})
423	rawTyp        = reflect.TypeOf(Raw{})
424	uintptrTyp    = reflect.TypeOf(uintptr(0))
425	uint8Typ      = reflect.TypeOf(uint8(0))
426	uint8SliceTyp = reflect.TypeOf([]uint8(nil))
427	uintTyp       = reflect.TypeOf(uint(0))
428	intTyp        = reflect.TypeOf(int(0))
429
430	mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
431
432	binaryMarshalerTyp   = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
433	binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
434
435	textMarshalerTyp   = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
436	textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
437
438	jsonMarshalerTyp   = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
439	jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
440
441	selferTyp         = reflect.TypeOf((*Selfer)(nil)).Elem()
442	missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem()
443	iszeroTyp         = reflect.TypeOf((*isZeroer)(nil)).Elem()
444
445	uint8TypId      = rt2id(uint8Typ)
446	uint8SliceTypId = rt2id(uint8SliceTyp)
447	rawExtTypId     = rt2id(rawExtTyp)
448	rawTypId        = rt2id(rawTyp)
449	intfTypId       = rt2id(intfTyp)
450	timeTypId       = rt2id(timeTyp)
451	stringTypId     = rt2id(stringTyp)
452
453	mapStrIntfTypId  = rt2id(mapStrIntfTyp)
454	mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
455	intfSliceTypId   = rt2id(intfSliceTyp)
456	// mapBySliceTypId  = rt2id(mapBySliceTyp)
457
458	intBitsize  = uint8(intTyp.Bits())
459	uintBitsize = uint8(uintTyp.Bits())
460
461	// bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
462	bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
463
464	chkOvf checkOverflow
465
466	errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo")
467)
468
469var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
470
471var immutableKindsSet = [32]bool{
472	// reflect.Invalid:  ,
473	reflect.Bool:       true,
474	reflect.Int:        true,
475	reflect.Int8:       true,
476	reflect.Int16:      true,
477	reflect.Int32:      true,
478	reflect.Int64:      true,
479	reflect.Uint:       true,
480	reflect.Uint8:      true,
481	reflect.Uint16:     true,
482	reflect.Uint32:     true,
483	reflect.Uint64:     true,
484	reflect.Uintptr:    true,
485	reflect.Float32:    true,
486	reflect.Float64:    true,
487	reflect.Complex64:  true,
488	reflect.Complex128: true,
489	// reflect.Array
490	// reflect.Chan
491	// reflect.Func: true,
492	// reflect.Interface
493	// reflect.Map
494	// reflect.Ptr
495	// reflect.Slice
496	reflect.String: true,
497	// reflect.Struct
498	// reflect.UnsafePointer
499}
500
501// SelfExt is a sentinel extension signifying that types
502// registered with it SHOULD be encoded and decoded
503// based on the native mode of the format.
504//
505// This allows users to define a tag for an extension,
506// but signify that the types should be encoded/decoded as the native encoding.
507// This way, users need not also define how to encode or decode the extension.
508var SelfExt = &extFailWrapper{}
509
510// Selfer defines methods by which a value can encode or decode itself.
511//
512// Any type which implements Selfer will be able to encode or decode itself.
513// Consequently, during (en|de)code, this takes precedence over
514// (text|binary)(M|Unm)arshal or extension support.
515//
516// By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself.
517// If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error.
518// For example, the snippet below will cause such an error.
519//     type testSelferRecur struct{}
520//     func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) }
521//     func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) }
522//
523// Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
524// This is because, during each decode, we first check the the next set of bytes
525// represent nil, and if so, we just set the value to nil.
526type Selfer interface {
527	CodecEncodeSelf(*Encoder)
528	CodecDecodeSelf(*Decoder)
529}
530
531// MissingFielder defines the interface allowing structs to internally decode or encode
532// values which do not map to struct fields.
533//
534// We expect that this interface is bound to a pointer type (so the mutation function works).
535//
536// A use-case is if a version of a type unexports a field, but you want compatibility between
537// both versions during encoding and decoding.
538//
539// Note that the interface is completely ignored during codecgen.
540type MissingFielder interface {
541	// CodecMissingField is called to set a missing field and value pair.
542	//
543	// It returns true if the missing field was set on the struct.
544	CodecMissingField(field []byte, value interface{}) bool
545
546	// CodecMissingFields returns the set of fields which are not struct fields
547	CodecMissingFields() map[string]interface{}
548}
549
550// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream.
551// The slice contains a sequence of key-value pairs.
552// This affords storing a map in a specific sequence in the stream.
553//
554// Example usage:
555//    type T1 []string         // or []int or []Point or any other "slice" type
556//    func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
557//    type T2 struct { KeyValues T1 }
558//
559//    var kvs = []string{"one", "1", "two", "2", "three", "3"}
560//    var v2 = T2{ KeyValues: T1(kvs) }
561//    // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
562//
563// The support of MapBySlice affords the following:
564//   - A slice type which implements MapBySlice will be encoded as a map
565//   - A slice can be decoded from a map in the stream
566//   - It MUST be a slice type (not a pointer receiver) that implements MapBySlice
567type MapBySlice interface {
568	MapBySlice()
569}
570
571// BasicHandle encapsulates the common options and extension functions.
572//
573// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
574type BasicHandle struct {
575	// BasicHandle is always a part of a different type.
576	// It doesn't have to fit into it own cache lines.
577
578	// TypeInfos is used to get the type info for any type.
579	//
580	// If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
581	TypeInfos *TypeInfos
582
583	// Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls).
584	// If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
585	// Thses slices are used all the time, so keep as slices (not pointers).
586
587	extHandle
588
589	rtidFns      atomicRtidFnSlice
590	rtidFnsNoExt atomicRtidFnSlice
591
592	// ---- cache line
593
594	DecodeOptions
595
596	// ---- cache line
597
598	EncodeOptions
599
600	intf2impls
601
602	mu     sync.Mutex
603	inited uint32 // holds if inited, and also handle flags (binary encoding, json handler, etc)
604
605	RPCOptions
606
607	// TimeNotBuiltin configures whether time.Time should be treated as a builtin type.
608	//
609	// All Handlers should know how to encode/decode time.Time as part of the core
610	// format specification, or as a standard extension defined by the format.
611	//
612	// However, users can elect to handle time.Time as a custom extension, or via the
613	// standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface.
614	// To elect this behavior, users can set TimeNotBuiltin=true.
615	//
616	// Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior
617	// (for Cbor and Msgpack), where time.Time was not a builtin supported type.
618	//
619	// Note: DO NOT CHANGE AFTER FIRST USE.
620	//
621	// Once a Handle has been used, do not modify this option.
622	// It will lead to unexpected behaviour during encoding and decoding.
623	TimeNotBuiltin bool
624
625	// ExplicitRelease configures whether Release() is implicitly called after an encode or
626	// decode call.
627	//
628	// If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...)
629	// on it or calling (Must)Encode repeatedly into a given []byte or io.Writer,
630	// then you do not want it to be implicitly closed after each Encode/Decode call.
631	// Doing so will unnecessarily return resources to the shared pool, only for you to
632	// grab them right after again to do another Encode/Decode call.
633	//
634	// Instead, you configure ExplicitRelease=true, and you explicitly call Release() when
635	// you are truly done.
636	//
637	// As an alternative, you can explicitly set a finalizer - so its resources
638	// are returned to the shared pool before it is garbage-collected. Do it as below:
639	//    runtime.SetFinalizer(e, (*Encoder).Release)
640	//    runtime.SetFinalizer(d, (*Decoder).Release)
641	//
642	// Deprecated: This is not longer used as pools are only used for long-lived objects
643	// which are shared across goroutines.
644	// Setting this value has no effect. It is maintained for backward compatibility.
645	ExplicitRelease bool
646
647	// ---- cache line
648}
649
650// basicHandle returns an initialized BasicHandle from the Handle.
651func basicHandle(hh Handle) (x *BasicHandle) {
652	x = hh.getBasicHandle()
653	// ** We need to simulate once.Do, to ensure no data race within the block.
654	// ** Consequently, below would not work.
655	// if atomic.CompareAndSwapUint32(&x.inited, 0, 1) {
656	// 	x.be = hh.isBinary()
657	// 	_, x.js = hh.(*JsonHandle)
658	// 	x.n = hh.Name()[0]
659	// }
660
661	// simulate once.Do using our own stored flag and mutex as a CompareAndSwap
662	// is not sufficient, since a race condition can occur within init(Handle) function.
663	// init is made noinline, so that this function can be inlined by its caller.
664	if atomic.LoadUint32(&x.inited) == 0 {
665		x.init(hh)
666	}
667	return
668}
669
670func (x *BasicHandle) isJs() bool {
671	return handleFlag(x.inited)&jsonHandleFlag != 0
672}
673
674func (x *BasicHandle) isBe() bool {
675	return handleFlag(x.inited)&binaryHandleFlag != 0
676}
677
678//go:noinline
679func (x *BasicHandle) init(hh Handle) {
680	// make it uninlineable, as it is called at most once
681	x.mu.Lock()
682	if x.inited == 0 {
683		var f = initedHandleFlag
684		if hh.isBinary() {
685			f |= binaryHandleFlag
686		}
687		if _, b := hh.(*JsonHandle); b {
688			f |= jsonHandleFlag
689		}
690		atomic.StoreUint32(&x.inited, uint32(f))
691		// ensure MapType and SliceType are of correct type
692		if x.MapType != nil && x.MapType.Kind() != reflect.Map {
693			panic(errMapTypeNotMapKind)
694		}
695		if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice {
696			panic(errSliceTypeNotSliceKind)
697		}
698	}
699	x.mu.Unlock()
700}
701
702func (x *BasicHandle) getBasicHandle() *BasicHandle {
703	return x
704}
705
706func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
707	if x.TypeInfos == nil {
708		return defTypeInfos.get(rtid, rt)
709	}
710	return x.TypeInfos.get(rtid, rt)
711}
712
713func findFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) {
714	// binary search. adapted from sort/search.go.
715	// Note: we use goto (instead of for loop) so this can be inlined.
716
717	// h, i, j := 0, 0, len(s)
718	var h uint // var h, i uint
719	var j = uint(len(s))
720LOOP:
721	if i < j {
722		h = i + (j-i)/2
723		if s[h].rtid < rtid {
724			i = h + 1
725		} else {
726			j = h
727		}
728		goto LOOP
729	}
730	if i < uint(len(s)) && s[i].rtid == rtid {
731		fn = s[i].fn
732	}
733	return
734}
735
736func (x *BasicHandle) fn(rt reflect.Type) (fn *codecFn) {
737	return x.fnVia(rt, &x.rtidFns, true)
738}
739
740func (x *BasicHandle) fnNoExt(rt reflect.Type) (fn *codecFn) {
741	return x.fnVia(rt, &x.rtidFnsNoExt, false)
742}
743
744func (x *BasicHandle) fnVia(rt reflect.Type, fs *atomicRtidFnSlice, checkExt bool) (fn *codecFn) {
745	rtid := rt2id(rt)
746	sp := fs.load()
747	if sp != nil {
748		if _, fn = findFn(sp, rtid); fn != nil {
749			return
750		}
751	}
752	fn = x.fnLoad(rt, rtid, checkExt)
753	x.mu.Lock()
754	var sp2 []codecRtidFn
755	sp = fs.load()
756	if sp == nil {
757		sp2 = []codecRtidFn{{rtid, fn}}
758		fs.store(sp2)
759	} else {
760		idx, fn2 := findFn(sp, rtid)
761		if fn2 == nil {
762			sp2 = make([]codecRtidFn, len(sp)+1)
763			copy(sp2, sp[:idx])
764			copy(sp2[idx+1:], sp[idx:])
765			sp2[idx] = codecRtidFn{rtid, fn}
766			fs.store(sp2)
767		}
768	}
769	x.mu.Unlock()
770	return
771}
772
773func (x *BasicHandle) fnLoad(rt reflect.Type, rtid uintptr, checkExt bool) (fn *codecFn) {
774	fn = new(codecFn)
775	fi := &(fn.i)
776	ti := x.getTypeInfo(rtid, rt)
777	fi.ti = ti
778
779	rk := reflect.Kind(ti.kind)
780
781	// anything can be an extension except the built-in ones: time, raw and rawext
782
783	if rtid == timeTypId && !x.TimeNotBuiltin {
784		fn.fe = (*Encoder).kTime
785		fn.fd = (*Decoder).kTime
786	} else if rtid == rawTypId {
787		fn.fe = (*Encoder).raw
788		fn.fd = (*Decoder).raw
789	} else if rtid == rawExtTypId {
790		fn.fe = (*Encoder).rawExt
791		fn.fd = (*Decoder).rawExt
792		fi.addrF = true
793		fi.addrD = true
794		fi.addrE = true
795	} else if xfFn := x.getExt(rtid, checkExt); xfFn != nil {
796		fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
797		fn.fe = (*Encoder).ext
798		fn.fd = (*Decoder).ext
799		fi.addrF = true
800		fi.addrD = true
801		if rk == reflect.Struct || rk == reflect.Array {
802			fi.addrE = true
803		}
804	} else if ti.isFlag(tiflagSelfer) || ti.isFlag(tiflagSelferPtr) {
805		fn.fe = (*Encoder).selferMarshal
806		fn.fd = (*Decoder).selferUnmarshal
807		fi.addrF = true
808		fi.addrD = ti.isFlag(tiflagSelferPtr)
809		fi.addrE = ti.isFlag(tiflagSelferPtr)
810	} else if supportMarshalInterfaces && x.isBe() &&
811		(ti.isFlag(tiflagBinaryMarshaler) || ti.isFlag(tiflagBinaryMarshalerPtr)) &&
812		(ti.isFlag(tiflagBinaryUnmarshaler) || ti.isFlag(tiflagBinaryUnmarshalerPtr)) {
813		fn.fe = (*Encoder).binaryMarshal
814		fn.fd = (*Decoder).binaryUnmarshal
815		fi.addrF = true
816		fi.addrD = ti.isFlag(tiflagBinaryUnmarshalerPtr)
817		fi.addrE = ti.isFlag(tiflagBinaryMarshalerPtr)
818	} else if supportMarshalInterfaces && !x.isBe() && x.isJs() &&
819		(ti.isFlag(tiflagJsonMarshaler) || ti.isFlag(tiflagJsonMarshalerPtr)) &&
820		(ti.isFlag(tiflagJsonUnmarshaler) || ti.isFlag(tiflagJsonUnmarshalerPtr)) {
821		//If JSON, we should check JSONMarshal before textMarshal
822		fn.fe = (*Encoder).jsonMarshal
823		fn.fd = (*Decoder).jsonUnmarshal
824		fi.addrF = true
825		fi.addrD = ti.isFlag(tiflagJsonUnmarshalerPtr)
826		fi.addrE = ti.isFlag(tiflagJsonMarshalerPtr)
827	} else if supportMarshalInterfaces && !x.isBe() &&
828		(ti.isFlag(tiflagTextMarshaler) || ti.isFlag(tiflagTextMarshalerPtr)) &&
829		(ti.isFlag(tiflagTextUnmarshaler) || ti.isFlag(tiflagTextUnmarshalerPtr)) {
830		fn.fe = (*Encoder).textMarshal
831		fn.fd = (*Decoder).textUnmarshal
832		fi.addrF = true
833		fi.addrD = ti.isFlag(tiflagTextUnmarshalerPtr)
834		fi.addrE = ti.isFlag(tiflagTextMarshalerPtr)
835	} else {
836		if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice) {
837			if ti.pkgpath == "" { // un-named slice or map
838				if idx := fastpathAV.index(rtid); idx != -1 {
839					fn.fe = fastpathAV[idx].encfn
840					fn.fd = fastpathAV[idx].decfn
841					fi.addrD = true
842					fi.addrF = false
843				}
844			} else {
845				// use mapping for underlying type if there
846				var rtu reflect.Type
847				if rk == reflect.Map {
848					rtu = reflect.MapOf(ti.key, ti.elem)
849				} else {
850					rtu = reflect.SliceOf(ti.elem)
851				}
852				rtuid := rt2id(rtu)
853				if idx := fastpathAV.index(rtuid); idx != -1 {
854					xfnf := fastpathAV[idx].encfn
855					xrt := fastpathAV[idx].rt
856					fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
857						xfnf(e, xf, rvConvert(xrv, xrt))
858					}
859					fi.addrD = true
860					fi.addrF = false // meaning it can be an address(ptr) or a value
861					xfnf2 := fastpathAV[idx].decfn
862					xptr2rt := reflect.PtrTo(xrt)
863					fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
864						if xrv.Kind() == reflect.Ptr {
865							xfnf2(d, xf, rvConvert(xrv, xptr2rt))
866						} else {
867							xfnf2(d, xf, rvConvert(xrv, xrt))
868						}
869					}
870				}
871			}
872		}
873		if fn.fe == nil && fn.fd == nil {
874			switch rk {
875			case reflect.Bool:
876				fn.fe = (*Encoder).kBool
877				fn.fd = (*Decoder).kBool
878			case reflect.String:
879				// Do not use different functions based on StringToRaw option,
880				// as that will statically set the function for a string type,
881				// and if the Handle is modified thereafter, behaviour is non-deterministic.
882				// i.e. DO NOT DO:
883				//   if x.StringToRaw {
884				//   	fn.fe = (*Encoder).kStringToRaw
885				//   } else {
886				//   	fn.fe = (*Encoder).kStringEnc
887				//   }
888
889				fn.fe = (*Encoder).kString
890				fn.fd = (*Decoder).kString
891			case reflect.Int:
892				fn.fd = (*Decoder).kInt
893				fn.fe = (*Encoder).kInt
894			case reflect.Int8:
895				fn.fe = (*Encoder).kInt8
896				fn.fd = (*Decoder).kInt8
897			case reflect.Int16:
898				fn.fe = (*Encoder).kInt16
899				fn.fd = (*Decoder).kInt16
900			case reflect.Int32:
901				fn.fe = (*Encoder).kInt32
902				fn.fd = (*Decoder).kInt32
903			case reflect.Int64:
904				fn.fe = (*Encoder).kInt64
905				fn.fd = (*Decoder).kInt64
906			case reflect.Uint:
907				fn.fd = (*Decoder).kUint
908				fn.fe = (*Encoder).kUint
909			case reflect.Uint8:
910				fn.fe = (*Encoder).kUint8
911				fn.fd = (*Decoder).kUint8
912			case reflect.Uint16:
913				fn.fe = (*Encoder).kUint16
914				fn.fd = (*Decoder).kUint16
915			case reflect.Uint32:
916				fn.fe = (*Encoder).kUint32
917				fn.fd = (*Decoder).kUint32
918			case reflect.Uint64:
919				fn.fe = (*Encoder).kUint64
920				fn.fd = (*Decoder).kUint64
921			case reflect.Uintptr:
922				fn.fe = (*Encoder).kUintptr
923				fn.fd = (*Decoder).kUintptr
924			case reflect.Float32:
925				fn.fe = (*Encoder).kFloat32
926				fn.fd = (*Decoder).kFloat32
927			case reflect.Float64:
928				fn.fe = (*Encoder).kFloat64
929				fn.fd = (*Decoder).kFloat64
930			case reflect.Invalid:
931				fn.fe = (*Encoder).kInvalid
932				fn.fd = (*Decoder).kErr
933			case reflect.Chan:
934				fi.seq = seqTypeChan
935				fn.fe = (*Encoder).kChan
936				fn.fd = (*Decoder).kSliceForChan
937			case reflect.Slice:
938				fi.seq = seqTypeSlice
939				fn.fe = (*Encoder).kSlice
940				fn.fd = (*Decoder).kSlice
941			case reflect.Array:
942				fi.seq = seqTypeArray
943				fn.fe = (*Encoder).kArray
944				fi.addrF = false
945				fi.addrD = false
946				rt2 := reflect.SliceOf(ti.elem)
947				fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
948					// call fnVia directly, so fn(...) is not recursive, and can be inlined
949					d.h.fnVia(rt2, &x.rtidFns, true).fd(d, xf, rvGetSlice4Array(xrv, rt2))
950				}
951			case reflect.Struct:
952				if ti.anyOmitEmpty ||
953					ti.isFlag(tiflagMissingFielder) ||
954					ti.isFlag(tiflagMissingFielderPtr) {
955					fn.fe = (*Encoder).kStruct
956				} else {
957					fn.fe = (*Encoder).kStructNoOmitempty
958				}
959				fn.fd = (*Decoder).kStruct
960			case reflect.Map:
961				fn.fe = (*Encoder).kMap
962				fn.fd = (*Decoder).kMap
963			case reflect.Interface:
964				// encode: reflect.Interface are handled already by preEncodeValue
965				fn.fd = (*Decoder).kInterface
966				fn.fe = (*Encoder).kErr
967			default:
968				// reflect.Ptr and reflect.Interface are handled already by preEncodeValue
969				fn.fe = (*Encoder).kErr
970				fn.fd = (*Decoder).kErr
971			}
972		}
973	}
974	return
975}
976
977// Handle defines a specific encoding format. It also stores any runtime state
978// used during an Encoding or Decoding session e.g. stored state about Types, etc.
979//
980// Once a handle is configured, it can be shared across multiple Encoders and Decoders.
981//
982// Note that a Handle is NOT safe for concurrent modification.
983//
984// A Handle also should not be modified after it is configured and has
985// been used at least once. This is because stored state may be out of sync with the
986// new configuration, and a data race can occur when multiple goroutines access it.
987// i.e. multiple Encoders or Decoders in different goroutines.
988//
989// Consequently, the typical usage model is that a Handle is pre-configured
990// before first time use, and not modified while in use.
991// Such a pre-configured Handle is safe for concurrent access.
992type Handle interface {
993	Name() string
994	// return the basic handle. It may not have been inited.
995	// Prefer to use basicHandle() helper function that ensures it has been inited.
996	getBasicHandle() *BasicHandle
997	newEncDriver() encDriver
998	newDecDriver() decDriver
999	isBinary() bool
1000}
1001
1002// Raw represents raw formatted bytes.
1003// We "blindly" store it during encode and retrieve the raw bytes during decode.
1004// Note: it is dangerous during encode, so we may gate the behaviour
1005// behind an Encode flag which must be explicitly set.
1006type Raw []byte
1007
1008// RawExt represents raw unprocessed extension data.
1009// Some codecs will decode extension data as a *RawExt
1010// if there is no registered extension for the tag.
1011//
1012// Only one of Data or Value is nil.
1013// If Data is nil, then the content of the RawExt is in the Value.
1014type RawExt struct {
1015	Tag uint64
1016	// Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
1017	// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
1018	Data []byte
1019	// Value represents the extension, if Data is nil.
1020	// Value is used by codecs (e.g. cbor, json) which leverage the format to do
1021	// custom serialization of the types.
1022	Value interface{}
1023}
1024
1025// BytesExt handles custom (de)serialization of types to/from []byte.
1026// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
1027type BytesExt interface {
1028	// WriteExt converts a value to a []byte.
1029	//
1030	// Note: v is a pointer iff the registered extension type is a struct or array kind.
1031	WriteExt(v interface{}) []byte
1032
1033	// ReadExt updates a value from a []byte.
1034	//
1035	// Note: dst is always a pointer kind to the registered extension type.
1036	ReadExt(dst interface{}, src []byte)
1037}
1038
1039// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
1040// The Encoder or Decoder will then handle the further (de)serialization of that known type.
1041//
1042// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
1043type InterfaceExt interface {
1044	// ConvertExt converts a value into a simpler interface for easy encoding
1045	// e.g. convert time.Time to int64.
1046	//
1047	// Note: v is a pointer iff the registered extension type is a struct or array kind.
1048	ConvertExt(v interface{}) interface{}
1049
1050	// UpdateExt updates a value from a simpler interface for easy decoding
1051	// e.g. convert int64 to time.Time.
1052	//
1053	// Note: dst is always a pointer kind to the registered extension type.
1054	UpdateExt(dst interface{}, src interface{})
1055}
1056
1057// Ext handles custom (de)serialization of custom types / extensions.
1058type Ext interface {
1059	BytesExt
1060	InterfaceExt
1061}
1062
1063// addExtWrapper is a wrapper implementation to support former AddExt exported method.
1064type addExtWrapper struct {
1065	encFn func(reflect.Value) ([]byte, error)
1066	decFn func(reflect.Value, []byte) error
1067}
1068
1069func (x addExtWrapper) WriteExt(v interface{}) []byte {
1070	bs, err := x.encFn(rv4i(v))
1071	if err != nil {
1072		panic(err)
1073	}
1074	return bs
1075}
1076
1077func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
1078	if err := x.decFn(rv4i(v), bs); err != nil {
1079		panic(err)
1080	}
1081}
1082
1083func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
1084	return x.WriteExt(v)
1085}
1086
1087func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
1088	x.ReadExt(dest, v.([]byte))
1089}
1090
1091type bytesExtFailer struct{}
1092
1093func (bytesExtFailer) WriteExt(v interface{}) []byte {
1094	panicv.errorstr("BytesExt.WriteExt is not supported")
1095	return nil
1096}
1097func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
1098	panicv.errorstr("BytesExt.ReadExt is not supported")
1099}
1100
1101type interfaceExtFailer struct{}
1102
1103func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
1104	panicv.errorstr("InterfaceExt.ConvertExt is not supported")
1105	return nil
1106}
1107func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
1108	panicv.errorstr("InterfaceExt.UpdateExt is not supported")
1109}
1110
1111type bytesExtWrapper struct {
1112	interfaceExtFailer
1113	BytesExt
1114}
1115
1116type interfaceExtWrapper struct {
1117	bytesExtFailer
1118	InterfaceExt
1119}
1120
1121type extFailWrapper struct {
1122	bytesExtFailer
1123	interfaceExtFailer
1124}
1125
1126type binaryEncodingType struct{}
1127
1128func (binaryEncodingType) isBinary() bool { return true }
1129
1130type textEncodingType struct{}
1131
1132func (textEncodingType) isBinary() bool { return false }
1133
1134// noBuiltInTypes is embedded into many types which do not support builtins
1135// e.g. msgpack, simple, cbor.
1136
1137type noBuiltInTypes struct{}
1138
1139func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
1140func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
1141
1142// bigenHelper.
1143// Users must already slice the x completely, because we will not reslice.
1144type bigenHelper struct {
1145	x []byte // must be correctly sliced to appropriate len. slicing is a cost.
1146	w *encWr
1147}
1148
1149func (z bigenHelper) writeUint16(v uint16) {
1150	bigen.PutUint16(z.x, v)
1151	z.w.writeb(z.x)
1152}
1153
1154func (z bigenHelper) writeUint32(v uint32) {
1155	bigen.PutUint32(z.x, v)
1156	z.w.writeb(z.x)
1157}
1158
1159func (z bigenHelper) writeUint64(v uint64) {
1160	bigen.PutUint64(z.x, v)
1161	z.w.writeb(z.x)
1162}
1163
1164type extTypeTagFn struct {
1165	rtid    uintptr
1166	rtidptr uintptr
1167	rt      reflect.Type
1168	tag     uint64
1169	ext     Ext
1170	// _       [1]uint64 // padding
1171}
1172
1173type extHandle []extTypeTagFn
1174
1175// AddExt registes an encode and decode function for a reflect.Type.
1176// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
1177//
1178// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
1179func (o *extHandle) AddExt(rt reflect.Type, tag byte,
1180	encfn func(reflect.Value) ([]byte, error),
1181	decfn func(reflect.Value, []byte) error) (err error) {
1182	if encfn == nil || decfn == nil {
1183		return o.SetExt(rt, uint64(tag), nil)
1184	}
1185	return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
1186}
1187
1188// SetExt will set the extension for a tag and reflect.Type.
1189// Note that the type must be a named type, and specifically not a pointer or Interface.
1190// An error is returned if that is not honored.
1191// To Deregister an ext, call SetExt with nil Ext.
1192//
1193// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
1194func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
1195	// o is a pointer, because we may need to initialize it
1196	// We EXPECT *o is a pointer to a non-nil extHandle.
1197
1198	rk := rt.Kind()
1199	for rk == reflect.Ptr {
1200		rt = rt.Elem()
1201		rk = rt.Kind()
1202	}
1203
1204	if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
1205		return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
1206	}
1207
1208	rtid := rt2id(rt)
1209	switch rtid {
1210	case timeTypId, rawTypId, rawExtTypId:
1211		// all natively supported type, so cannot have an extension.
1212		// However, we do not return an error for these, as we do not document that.
1213		// Instead, we silently treat as a no-op, and return.
1214		return
1215	}
1216	o2 := *o
1217	for i := range o2 {
1218		v := &o2[i]
1219		if v.rtid == rtid {
1220			v.tag, v.ext = tag, ext
1221			return
1222		}
1223	}
1224	rtidptr := rt2id(reflect.PtrTo(rt))
1225	*o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext}) // , [1]uint64{}})
1226	return
1227}
1228
1229func (o extHandle) getExt(rtid uintptr, check bool) (v *extTypeTagFn) {
1230	if !check {
1231		return
1232	}
1233	for i := range o {
1234		v = &o[i]
1235		if v.rtid == rtid || v.rtidptr == rtid {
1236			return
1237		}
1238	}
1239	return nil
1240}
1241
1242func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
1243	for i := range o {
1244		v = &o[i]
1245		if v.tag == tag {
1246			return
1247		}
1248	}
1249	return nil
1250}
1251
1252type intf2impl struct {
1253	rtid uintptr // for intf
1254	impl reflect.Type
1255	// _    [1]uint64 // padding // not-needed, as *intf2impl is never returned.
1256}
1257
1258type intf2impls []intf2impl
1259
1260// Intf2Impl maps an interface to an implementing type.
1261// This allows us support infering the concrete type
1262// and populating it when passed an interface.
1263// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
1264//
1265// Passing a nil impl will clear the mapping.
1266func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
1267	if impl != nil && !impl.Implements(intf) {
1268		return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
1269	}
1270	rtid := rt2id(intf)
1271	o2 := *o
1272	for i := range o2 {
1273		v := &o2[i]
1274		if v.rtid == rtid {
1275			v.impl = impl
1276			return
1277		}
1278	}
1279	*o = append(o2, intf2impl{rtid, impl})
1280	return
1281}
1282
1283func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
1284	for i := range o {
1285		v := &o[i]
1286		if v.rtid == rtid {
1287			if v.impl == nil {
1288				return
1289			}
1290			vkind := v.impl.Kind()
1291			if vkind == reflect.Ptr {
1292				return reflect.New(v.impl.Elem())
1293			}
1294			return rvZeroAddrK(v.impl, vkind)
1295		}
1296	}
1297	return
1298}
1299
1300type structFieldInfoFlag uint8
1301
1302const (
1303	_ structFieldInfoFlag = 1 << iota
1304	structFieldInfoFlagReady
1305	structFieldInfoFlagOmitEmpty
1306)
1307
1308func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) {
1309	*x = *x | f
1310}
1311
1312func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) {
1313	*x = *x &^ f
1314}
1315
1316func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool {
1317	return x&f != 0
1318}
1319
1320func (x structFieldInfoFlag) omitEmpty() bool {
1321	return x.flagGet(structFieldInfoFlagOmitEmpty)
1322}
1323
1324func (x structFieldInfoFlag) ready() bool {
1325	return x.flagGet(structFieldInfoFlagReady)
1326}
1327
1328type structFieldInfo struct {
1329	encName   string // encode name
1330	fieldName string // field name
1331
1332	is  [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct
1333	nis uint8                      // num levels of embedding. if 1, then it's not embedded.
1334
1335	encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers
1336	structFieldInfoFlag
1337	// _ [1]byte // padding
1338}
1339
1340// func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
1341// 	if v, valid := si.field(v, false); valid {
1342// 		v.Set(reflect.Zero(v.Type()))
1343// 	}
1344// }
1345
1346// rv returns the field of the struct.
1347// If anonymous, it returns an Invalid
1348func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) {
1349	// replicate FieldByIndex
1350	for i, x := range si.is {
1351		if uint8(i) == si.nis {
1352			break
1353		}
1354		if v, valid = baseStructRv(v, update); !valid {
1355			return
1356		}
1357		v = v.Field(int(x))
1358	}
1359
1360	return v, true
1361}
1362
1363func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
1364	keytype = valueTypeString // default
1365	if stag == "" {
1366		return
1367	}
1368	for i, s := range strings.Split(stag, ",") {
1369		if i == 0 {
1370		} else {
1371			switch s {
1372			case "omitempty":
1373				omitEmpty = true
1374			case "toarray":
1375				toArray = true
1376			case "int":
1377				keytype = valueTypeInt
1378			case "uint":
1379				keytype = valueTypeUint
1380			case "float":
1381				keytype = valueTypeFloat
1382				// case "bool":
1383				// 	keytype = valueTypeBool
1384			case "string":
1385				keytype = valueTypeString
1386			}
1387		}
1388	}
1389	return
1390}
1391
1392func (si *structFieldInfo) parseTag(stag string) {
1393	// if fname == "" {
1394	// 	panic(errNoFieldNameToStructFieldInfo)
1395	// }
1396
1397	if stag == "" {
1398		return
1399	}
1400	for i, s := range strings.Split(stag, ",") {
1401		if i == 0 {
1402			if s != "" {
1403				si.encName = s
1404			}
1405		} else {
1406			switch s {
1407			case "omitempty":
1408				si.flagSet(structFieldInfoFlagOmitEmpty)
1409			}
1410		}
1411	}
1412}
1413
1414type sfiSortedByEncName []*structFieldInfo
1415
1416func (p sfiSortedByEncName) Len() int           { return len(p) }
1417func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName }
1418func (p sfiSortedByEncName) Swap(i, j int)      { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
1419
1420const structFieldNodeNumToCache = 4
1421
1422type structFieldNodeCache struct {
1423	rv  [structFieldNodeNumToCache]reflect.Value
1424	idx [structFieldNodeNumToCache]uint32
1425	num uint8
1426}
1427
1428func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) {
1429	for i, k := range &x.idx {
1430		if uint8(i) == x.num {
1431			return // break
1432		}
1433		if key == k {
1434			return x.rv[i], true
1435		}
1436	}
1437	return
1438}
1439
1440func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) {
1441	if x.num < structFieldNodeNumToCache {
1442		x.rv[x.num] = fv
1443		x.idx[x.num] = key
1444		x.num++
1445		return
1446	}
1447}
1448
1449type structFieldNode struct {
1450	v      reflect.Value
1451	cache2 structFieldNodeCache
1452	cache3 structFieldNodeCache
1453	update bool
1454}
1455
1456func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) {
1457	// return si.fieldval(x.v, x.update)
1458
1459	// Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding
1460	// This mostly saves us time on the repeated calls to v.Elem, v.Field, etc.
1461	var valid bool
1462	switch si.nis {
1463	case 1:
1464		fv = x.v.Field(int(si.is[0]))
1465	case 2:
1466		if fv, valid = x.cache2.get(uint32(si.is[0])); valid {
1467			fv = fv.Field(int(si.is[1]))
1468			return
1469		}
1470		fv = x.v.Field(int(si.is[0]))
1471		if fv, valid = baseStructRv(fv, x.update); !valid {
1472			return
1473		}
1474		x.cache2.tryAdd(fv, uint32(si.is[0]))
1475		fv = fv.Field(int(si.is[1]))
1476	case 3:
1477		var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1])
1478		if fv, valid = x.cache3.get(key); valid {
1479			fv = fv.Field(int(si.is[2]))
1480			return
1481		}
1482		fv = x.v.Field(int(si.is[0]))
1483		if fv, valid = baseStructRv(fv, x.update); !valid {
1484			return
1485		}
1486		fv = fv.Field(int(si.is[1]))
1487		if fv, valid = baseStructRv(fv, x.update); !valid {
1488			return
1489		}
1490		x.cache3.tryAdd(fv, key)
1491		fv = fv.Field(int(si.is[2]))
1492	default:
1493		fv, _ = si.field(x.v, x.update)
1494	}
1495	return
1496}
1497
1498func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) {
1499	for v.Kind() == reflect.Ptr {
1500		if rvIsNil(v) {
1501			if !update {
1502				return
1503			}
1504			rvSetDirect(v, reflect.New(v.Type().Elem()))
1505		}
1506		v = v.Elem()
1507	}
1508	return v, true
1509}
1510
1511type tiflag uint32
1512
1513const (
1514	_ tiflag = 1 << iota
1515
1516	tiflagComparable
1517
1518	tiflagIsZeroer
1519	tiflagIsZeroerPtr
1520
1521	tiflagBinaryMarshaler
1522	tiflagBinaryMarshalerPtr
1523
1524	tiflagBinaryUnmarshaler
1525	tiflagBinaryUnmarshalerPtr
1526
1527	tiflagTextMarshaler
1528	tiflagTextMarshalerPtr
1529
1530	tiflagTextUnmarshaler
1531	tiflagTextUnmarshalerPtr
1532
1533	tiflagJsonMarshaler
1534	tiflagJsonMarshalerPtr
1535
1536	tiflagJsonUnmarshaler
1537	tiflagJsonUnmarshalerPtr
1538
1539	tiflagSelfer
1540	tiflagSelferPtr
1541
1542	tiflagMissingFielder
1543	tiflagMissingFielderPtr
1544)
1545
1546// typeInfo keeps static (non-changing readonly)information
1547// about each (non-ptr) type referenced in the encode/decode sequence.
1548//
1549// During an encode/decode sequence, we work as below:
1550//   - If base is a built in type, en/decode base value
1551//   - If base is registered as an extension, en/decode base value
1552//   - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
1553//   - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
1554//   - Else decode appropriately based on the reflect.Kind
1555type typeInfo struct {
1556	rt      reflect.Type
1557	elem    reflect.Type
1558	pkgpath string
1559
1560	rtid uintptr
1561
1562	numMeth uint16 // number of methods
1563	kind    uint8
1564	chandir uint8
1565
1566	anyOmitEmpty bool      // true if a struct, and any of the fields are tagged "omitempty"
1567	toArray      bool      // whether this (struct) type should be encoded as an array
1568	keyType      valueType // if struct, how is the field name stored in a stream? default is string
1569	mbs          bool      // base type (T or *T) is a MapBySlice
1570
1571	// ---- cpu cache line boundary?
1572	sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map.
1573	sfiSrc  []*structFieldInfo // unsorted. Used when enc/dec struct to array.
1574
1575	key reflect.Type
1576
1577	// ---- cpu cache line boundary?
1578	// sfis         []structFieldInfo // all sfi, in src order, as created.
1579	sfiNamesSort []byte // all names, with indexes into the sfiSort
1580
1581	// rv0 is the zero value for the type.
1582	// It is mostly beneficial for all non-reference kinds
1583	// i.e. all but map/chan/func/ptr/unsafe.pointer
1584	// so beneficial for intXX, bool, slices, structs, etc
1585	rv0 reflect.Value
1586
1587	elemsize uintptr
1588
1589	// other flags, with individual bits representing if set.
1590	flags tiflag
1591
1592	infoFieldOmitempty bool
1593
1594	elemkind uint8
1595	_        [2]byte // padding
1596	// _ [1]uint64 // padding
1597}
1598
1599func (ti *typeInfo) isFlag(f tiflag) bool {
1600	return ti.flags&f != 0
1601}
1602
1603func (ti *typeInfo) flag(when bool, f tiflag) *typeInfo {
1604	if when {
1605		ti.flags |= f
1606	}
1607	return ti
1608}
1609
1610func (ti *typeInfo) indexForEncName(name []byte) (index int16) {
1611	var sn []byte
1612	if len(name)+2 <= 32 {
1613		var buf [32]byte // should not escape to heap
1614		sn = buf[:len(name)+2]
1615	} else {
1616		sn = make([]byte, len(name)+2)
1617	}
1618	copy(sn[1:], name)
1619	sn[0], sn[len(sn)-1] = tiSep2(name), 0xff
1620	j := bytes.Index(ti.sfiNamesSort, sn)
1621	if j < 0 {
1622		return -1
1623	}
1624	index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8)
1625	return
1626}
1627
1628type rtid2ti struct {
1629	rtid uintptr
1630	ti   *typeInfo
1631}
1632
1633// TypeInfos caches typeInfo for each type on first inspection.
1634//
1635// It is configured with a set of tag keys, which are used to get
1636// configuration for the type.
1637type TypeInfos struct {
1638	// infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected
1639	infos atomicTypeInfoSlice
1640	mu    sync.Mutex
1641	_     uint64 // padding (cache-aligned)
1642	tags  []string
1643	_     uint64 // padding (cache-aligned)
1644}
1645
1646// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
1647//
1648// This allows users customize the struct tag keys which contain configuration
1649// of their types.
1650func NewTypeInfos(tags []string) *TypeInfos {
1651	return &TypeInfos{tags: tags}
1652}
1653
1654func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
1655	// check for tags: codec, json, in that order.
1656	// this allows seamless support for many configured structs.
1657	for _, x := range x.tags {
1658		s = t.Get(x)
1659		if s != "" {
1660			return s
1661		}
1662	}
1663	return
1664}
1665
1666func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
1667	// binary search. adapted from sort/search.go.
1668	// Note: we use goto (instead of for loop) so this can be inlined.
1669
1670	// h, i, j := 0, 0, len(s)
1671	var h uint // var h, i uint
1672	var j = uint(len(s))
1673LOOP:
1674	if i < j {
1675		h = i + (j-i)/2
1676		if s[h].rtid < rtid {
1677			i = h + 1
1678		} else {
1679			j = h
1680		}
1681		goto LOOP
1682	}
1683	if i < uint(len(s)) && s[i].rtid == rtid {
1684		ti = s[i].ti
1685	}
1686	return
1687}
1688
1689func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
1690	sp := x.infos.load()
1691	if sp != nil {
1692		_, pti = findTypeInfo(sp, rtid)
1693		if pti != nil {
1694			return
1695		}
1696	}
1697
1698	rk := rt.Kind()
1699
1700	if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
1701		panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
1702	}
1703
1704	// do not hold lock while computing this.
1705	// it may lead to duplication, but that's ok.
1706	ti := typeInfo{
1707		rt:      rt,
1708		rtid:    rtid,
1709		kind:    uint8(rk),
1710		pkgpath: rt.PkgPath(),
1711		keyType: valueTypeString, // default it - so it's never 0
1712	}
1713	ti.rv0 = reflect.Zero(rt)
1714
1715	ti.numMeth = uint16(rt.NumMethod())
1716
1717	var b1, b2 bool
1718	b1, b2 = implIntf(rt, binaryMarshalerTyp)
1719	ti.flag(b1, tiflagBinaryMarshaler).flag(b2, tiflagBinaryMarshalerPtr)
1720	b1, b2 = implIntf(rt, binaryUnmarshalerTyp)
1721	ti.flag(b1, tiflagBinaryUnmarshaler).flag(b2, tiflagBinaryUnmarshalerPtr)
1722	b1, b2 = implIntf(rt, textMarshalerTyp)
1723	ti.flag(b1, tiflagTextMarshaler).flag(b2, tiflagTextMarshalerPtr)
1724	b1, b2 = implIntf(rt, textUnmarshalerTyp)
1725	ti.flag(b1, tiflagTextUnmarshaler).flag(b2, tiflagTextUnmarshalerPtr)
1726	b1, b2 = implIntf(rt, jsonMarshalerTyp)
1727	ti.flag(b1, tiflagJsonMarshaler).flag(b2, tiflagJsonMarshalerPtr)
1728	b1, b2 = implIntf(rt, jsonUnmarshalerTyp)
1729	ti.flag(b1, tiflagJsonUnmarshaler).flag(b2, tiflagJsonUnmarshalerPtr)
1730	b1, b2 = implIntf(rt, selferTyp)
1731	ti.flag(b1, tiflagSelfer).flag(b2, tiflagSelferPtr)
1732	b1, b2 = implIntf(rt, missingFielderTyp)
1733	ti.flag(b1, tiflagMissingFielder).flag(b2, tiflagMissingFielderPtr)
1734	b1, b2 = implIntf(rt, iszeroTyp)
1735	ti.flag(b1, tiflagIsZeroer).flag(b2, tiflagIsZeroerPtr)
1736	b1 = rt.Comparable()
1737	ti.flag(b1, tiflagComparable)
1738
1739	switch rk {
1740	case reflect.Struct:
1741		var omitEmpty bool
1742		if f, ok := rt.FieldByName(structInfoFieldName); ok {
1743			ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
1744			ti.infoFieldOmitempty = omitEmpty
1745		} else {
1746			ti.keyType = valueTypeString
1747		}
1748		pp, pi := &pool4tiload, pool4tiload.Get() // pool.tiLoad()
1749		pv := pi.(*typeInfoLoadArray)
1750		pv.etypes[0] = ti.rtid
1751		// vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
1752		vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]}
1753		x.rget(rt, rtid, omitEmpty, nil, &vv)
1754		ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv)
1755		pp.Put(pi)
1756	case reflect.Map:
1757		ti.elem = rt.Elem()
1758		ti.key = rt.Key()
1759	case reflect.Slice:
1760		ti.mbs, _ = implIntf(rt, mapBySliceTyp)
1761		ti.elem = rt.Elem()
1762		ti.elemsize = ti.elem.Size()
1763		ti.elemkind = uint8(ti.elem.Kind())
1764	case reflect.Chan:
1765		ti.elem = rt.Elem()
1766		ti.chandir = uint8(rt.ChanDir())
1767	case reflect.Array:
1768		ti.elem = rt.Elem()
1769		ti.elemsize = ti.elem.Size()
1770		ti.elemkind = uint8(ti.elem.Kind())
1771	case reflect.Ptr:
1772		ti.elem = rt.Elem()
1773	}
1774
1775	x.mu.Lock()
1776	sp = x.infos.load()
1777	var sp2 []rtid2ti
1778	if sp == nil {
1779		pti = &ti
1780		sp2 = []rtid2ti{{rtid, pti}}
1781		x.infos.store(sp2)
1782	} else {
1783		var idx uint
1784		idx, pti = findTypeInfo(sp, rtid)
1785		if pti == nil {
1786			pti = &ti
1787			sp2 = make([]rtid2ti, len(sp)+1)
1788			copy(sp2, sp[:idx])
1789			copy(sp2[idx+1:], sp[idx:])
1790			sp2[idx] = rtid2ti{rtid, pti}
1791			x.infos.store(sp2)
1792		}
1793	}
1794	x.mu.Unlock()
1795	return
1796}
1797
1798func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
1799	indexstack []uint16, pv *typeInfoLoad) {
1800	// Read up fields and store how to access the value.
1801	//
1802	// It uses go's rules for message selectors,
1803	// which say that the field with the shallowest depth is selected.
1804	//
1805	// Note: we consciously use slices, not a map, to simulate a set.
1806	//       Typically, types have < 16 fields,
1807	//       and iteration using equals is faster than maps there
1808	flen := rt.NumField()
1809	if flen > (1<<maxLevelsEmbedding - 1) {
1810		panicv.errorf("codec: types with > %v fields are not supported - has %v fields",
1811			(1<<maxLevelsEmbedding - 1), flen)
1812	}
1813	// pv.sfis = make([]structFieldInfo, flen)
1814LOOP:
1815	for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
1816		f := rt.Field(int(j))
1817		fkind := f.Type.Kind()
1818		// skip if a func type, or is unexported, or structTag value == "-"
1819		switch fkind {
1820		case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
1821			continue LOOP
1822		}
1823
1824		isUnexported := f.PkgPath != ""
1825		if isUnexported && !f.Anonymous {
1826			continue
1827		}
1828		stag := x.structTag(f.Tag)
1829		if stag == "-" {
1830			continue
1831		}
1832		var si structFieldInfo
1833		var parsed bool
1834		// if anonymous and no struct tag (or it's blank),
1835		// and a struct (or pointer to struct), inline it.
1836		if f.Anonymous && fkind != reflect.Interface {
1837			// ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
1838			ft := f.Type
1839			isPtr := ft.Kind() == reflect.Ptr
1840			for ft.Kind() == reflect.Ptr {
1841				ft = ft.Elem()
1842			}
1843			isStruct := ft.Kind() == reflect.Struct
1844
1845			// Ignore embedded fields of unexported non-struct types.
1846			// Also, from go1.10, ignore pointers to unexported struct types
1847			// because unmarshal cannot assign a new struct to an unexported field.
1848			// See https://golang.org/issue/21357
1849			if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
1850				continue
1851			}
1852			doInline := stag == ""
1853			if !doInline {
1854				si.parseTag(stag)
1855				parsed = true
1856				doInline = si.encName == ""
1857				// doInline = si.isZero()
1858			}
1859			if doInline && isStruct {
1860				// if etypes contains this, don't call rget again (as fields are already seen here)
1861				ftid := rt2id(ft)
1862				// We cannot recurse forever, but we need to track other field depths.
1863				// So - we break if we see a type twice (not the first time).
1864				// This should be sufficient to handle an embedded type that refers to its
1865				// owning type, which then refers to its embedded type.
1866				processIt := true
1867				numk := 0
1868				for _, k := range pv.etypes {
1869					if k == ftid {
1870						numk++
1871						if numk == rgetMaxRecursion {
1872							processIt = false
1873							break
1874						}
1875					}
1876				}
1877				if processIt {
1878					pv.etypes = append(pv.etypes, ftid)
1879					indexstack2 := make([]uint16, len(indexstack)+1)
1880					copy(indexstack2, indexstack)
1881					indexstack2[len(indexstack)] = j
1882					// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
1883					x.rget(ft, ftid, omitEmpty, indexstack2, pv)
1884				}
1885				continue
1886			}
1887		}
1888
1889		// after the anonymous dance: if an unexported field, skip
1890		if isUnexported {
1891			continue
1892		}
1893
1894		if f.Name == "" {
1895			panic(errNoFieldNameToStructFieldInfo)
1896		}
1897
1898		// pv.fNames = append(pv.fNames, f.Name)
1899		// if si.encName == "" {
1900
1901		if !parsed {
1902			si.encName = f.Name
1903			si.parseTag(stag)
1904			parsed = true
1905		} else if si.encName == "" {
1906			si.encName = f.Name
1907		}
1908		si.encNameAsciiAlphaNum = true
1909		for i := len(si.encName) - 1; i >= 0; i-- { // bounds-check elimination
1910			b := si.encName[i]
1911			if (b >= '0' && b <= '9') || (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') {
1912				continue
1913			}
1914			si.encNameAsciiAlphaNum = false
1915			break
1916		}
1917		si.fieldName = f.Name
1918		si.flagSet(structFieldInfoFlagReady)
1919
1920		if len(indexstack) > maxLevelsEmbedding-1 {
1921			panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth",
1922				maxLevelsEmbedding-1, len(indexstack))
1923		}
1924		si.nis = uint8(len(indexstack)) + 1
1925		copy(si.is[:], indexstack)
1926		si.is[len(indexstack)] = j
1927
1928		if omitEmpty {
1929			si.flagSet(structFieldInfoFlagOmitEmpty)
1930		}
1931		pv.sfis = append(pv.sfis, si)
1932	}
1933}
1934
1935func tiSep(name string) uint8 {
1936	// (xn[0]%64) // (between 192-255 - outside ascii BMP)
1937	// Tried the following before settling on correct implementation:
1938	//   return 0xfe - (name[0] & 63)
1939	//   return 0xfe - (name[0] & 63) - uint8(len(name))
1940	//   return 0xfe - (name[0] & 63) - uint8(len(name)&63)
1941	//   return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07))
1942
1943	return 0xfe - (name[0] & 63) - uint8(len(name)&63)
1944}
1945
1946func tiSep2(name []byte) uint8 {
1947	return 0xfe - (name[0] & 63) - uint8(len(name)&63)
1948}
1949
1950// resolves the struct field info got from a call to rget.
1951// Returns a trimmed, unsorted and sorted []*structFieldInfo.
1952func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) (
1953	y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) {
1954	sa := pv.sfiidx[:0]
1955	sn := pv.b[:]
1956	n := len(x)
1957
1958	var xn string
1959	var ui uint16
1960	var sep byte
1961
1962	for i := range x {
1963		ui = uint16(i)
1964		xn = x[i].encName // fieldName or encName? use encName for now.
1965		if len(xn)+2 > cap(sn) {
1966			sn = make([]byte, len(xn)+2)
1967		} else {
1968			sn = sn[:len(xn)+2]
1969		}
1970		// use a custom sep, so that misses are less frequent,
1971		// since the sep (first char in search) is as unique as first char in field name.
1972		sep = tiSep(xn)
1973		sn[0], sn[len(sn)-1] = sep, 0xff
1974		copy(sn[1:], xn)
1975		j := bytes.Index(sa, sn)
1976		if j == -1 {
1977			sa = append(sa, sep)
1978			sa = append(sa, xn...)
1979			sa = append(sa, 0xff, byte(ui>>8), byte(ui))
1980		} else {
1981			index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8
1982			// one of them must be cleared (reset to nil),
1983			// and the index updated appropriately
1984			i2clear := ui                // index to be cleared
1985			if x[i].nis < x[index].nis { // this one is shallower
1986				// update the index to point to this later one.
1987				sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui)
1988				// clear the earlier one, as this later one is shallower.
1989				i2clear = index
1990			}
1991			if x[i2clear].ready() {
1992				x[i2clear].flagClr(structFieldInfoFlagReady)
1993				n--
1994			}
1995		}
1996	}
1997
1998	var w []structFieldInfo
1999	sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray
2000	if sharingArray {
2001		w = make([]structFieldInfo, n)
2002	}
2003
2004	// remove all the nils (non-ready)
2005	y = make([]*structFieldInfo, n)
2006	n = 0
2007	var sslen int
2008	for i := range x {
2009		if !x[i].ready() {
2010			continue
2011		}
2012		if !anyOmitEmpty && x[i].omitEmpty() {
2013			anyOmitEmpty = true
2014		}
2015		if sharingArray {
2016			w[n] = x[i]
2017			y[n] = &w[n]
2018		} else {
2019			y[n] = &x[i]
2020		}
2021		sslen = sslen + len(x[i].encName) + 4
2022		n++
2023	}
2024	if n != len(y) {
2025		panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d",
2026			rt, len(y), len(x), n)
2027	}
2028
2029	z = make([]*structFieldInfo, len(y))
2030	copy(z, y)
2031	sort.Sort(sfiSortedByEncName(z))
2032
2033	sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen
2034	if sharingArray {
2035		ss = make([]byte, 0, sslen)
2036	} else {
2037		ss = sa[:0] // reuse the newly made sa array if necessary
2038	}
2039	for i := range z {
2040		xn = z[i].encName
2041		sep = tiSep(xn)
2042		ui = uint16(i)
2043		ss = append(ss, sep)
2044		ss = append(ss, xn...)
2045		ss = append(ss, 0xff, byte(ui>>8), byte(ui))
2046	}
2047	return
2048}
2049
2050func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
2051	return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
2052}
2053
2054// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty:
2055//    - does it implement IsZero() bool
2056//    - is it comparable, and can i compare directly using ==
2057//    - if checkStruct, then walk through the encodable fields
2058//      and check if they are empty or not.
2059func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
2060	// v is a struct kind - no need to check again.
2061	// We only check isZero on a struct kind, to reduce the amount of times
2062	// that we lookup the rtid and typeInfo for each type as we walk the tree.
2063
2064	vt := v.Type()
2065	rtid := rt2id(vt)
2066	if tinfos == nil {
2067		tinfos = defTypeInfos
2068	}
2069	ti := tinfos.get(rtid, vt)
2070	if ti.rtid == timeTypId {
2071		return rv2i(v).(time.Time).IsZero()
2072	}
2073	if ti.isFlag(tiflagIsZeroerPtr) && v.CanAddr() {
2074		return rv2i(v.Addr()).(isZeroer).IsZero()
2075	}
2076	if ti.isFlag(tiflagIsZeroer) {
2077		return rv2i(v).(isZeroer).IsZero()
2078	}
2079	if ti.isFlag(tiflagComparable) {
2080		return rv2i(v) == rv2i(reflect.Zero(vt))
2081	}
2082	if !checkStruct {
2083		return false
2084	}
2085	// We only care about what we can encode/decode,
2086	// so that is what we use to check omitEmpty.
2087	for _, si := range ti.sfiSrc {
2088		sfv, valid := si.field(v, false)
2089		if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) {
2090			return false
2091		}
2092	}
2093	return true
2094}
2095
2096// func roundFloat(x float64) float64 {
2097// 	t := math.Trunc(x)
2098// 	if math.Abs(x-t) >= 0.5 {
2099// 		return t + math.Copysign(1, x)
2100// 	}
2101// 	return t
2102// }
2103
2104func panicToErr(h errDecorator, err *error) {
2105	// Note: This method MUST be called directly from defer i.e. defer panicToErr ...
2106	// else it seems the recover is not fully handled
2107	if recoverPanicToErr {
2108		if x := recover(); x != nil {
2109			// fmt.Printf("panic'ing with: %v\n", x)
2110			// debug.PrintStack()
2111			panicValToErr(h, x, err)
2112		}
2113	}
2114}
2115
2116func isSliceBoundsError(s string) bool {
2117	return strings.Contains(s, "index out of range") ||
2118		strings.Contains(s, "slice bounds out of range")
2119}
2120
2121func panicValToErr(h errDecorator, v interface{}, err *error) {
2122	d, dok := h.(*Decoder)
2123	switch xerr := v.(type) {
2124	case nil:
2125	case error:
2126		switch xerr {
2127		case nil:
2128		case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
2129			// treat as special (bubble up)
2130			*err = xerr
2131		default:
2132			if dok && d.bytes && isSliceBoundsError(xerr.Error()) {
2133				*err = io.EOF
2134			} else {
2135				h.wrapErr(xerr, err)
2136			}
2137		}
2138	case string:
2139		if xerr != "" {
2140			if dok && d.bytes && isSliceBoundsError(xerr) {
2141				*err = io.EOF
2142			} else {
2143				h.wrapErr(xerr, err)
2144			}
2145		}
2146	case fmt.Stringer:
2147		if xerr != nil {
2148			h.wrapErr(xerr, err)
2149		}
2150	default:
2151		h.wrapErr(v, err)
2152	}
2153}
2154
2155func isImmutableKind(k reflect.Kind) (v bool) {
2156	// return immutableKindsSet[k]
2157	// since we know reflect.Kind is in range 0..31, then use the k%32 == k constraint
2158	return immutableKindsSet[k%reflect.Kind(len(immutableKindsSet))] // bounds-check-elimination
2159}
2160
2161func usableByteSlice(bs []byte, slen int) []byte {
2162	if cap(bs) >= slen {
2163		if bs == nil {
2164			return []byte{}
2165		}
2166		return bs[:slen]
2167	}
2168	return make([]byte, slen)
2169}
2170
2171// ----
2172
2173type codecFnInfo struct {
2174	ti    *typeInfo
2175	xfFn  Ext
2176	xfTag uint64
2177	seq   seqType
2178	addrD bool
2179	addrF bool // if addrD, this says whether decode function can take a value or a ptr
2180	addrE bool
2181}
2182
2183// codecFn encapsulates the captured variables and the encode function.
2184// This way, we only do some calculations one times, and pass to the
2185// code block that should be called (encapsulated in a function)
2186// instead of executing the checks every time.
2187type codecFn struct {
2188	i  codecFnInfo
2189	fe func(*Encoder, *codecFnInfo, reflect.Value)
2190	fd func(*Decoder, *codecFnInfo, reflect.Value)
2191	_  [1]uint64 // padding (cache-aligned)
2192}
2193
2194type codecRtidFn struct {
2195	rtid uintptr
2196	fn   *codecFn
2197}
2198
2199func makeExt(ext interface{}) Ext {
2200	if ext == nil {
2201		return &extFailWrapper{}
2202	}
2203	switch t := ext.(type) {
2204	case nil:
2205		return &extFailWrapper{}
2206	case Ext:
2207		return t
2208	case BytesExt:
2209		return &bytesExtWrapper{BytesExt: t}
2210	case InterfaceExt:
2211		return &interfaceExtWrapper{InterfaceExt: t}
2212	}
2213	return &extFailWrapper{}
2214}
2215
2216func baseRV(v interface{}) (rv reflect.Value) {
2217	for rv = rv4i(v); rv.Kind() == reflect.Ptr; rv = rv.Elem() {
2218	}
2219	return
2220}
2221
2222// ----
2223
2224// these "checkOverflow" functions must be inlinable, and not call anybody.
2225// Overflow means that the value cannot be represented without wrapping/overflow.
2226// Overflow=false does not mean that the value can be represented without losing precision
2227// (especially for floating point).
2228
2229type checkOverflow struct{}
2230
2231// func (checkOverflow) Float16(f float64) (overflow bool) {
2232// 	panicv.errorf("unimplemented")
2233// 	if f < 0 {
2234// 		f = -f
2235// 	}
2236// 	return math.MaxFloat32 < f && f <= math.MaxFloat64
2237// }
2238
2239func (checkOverflow) Float32(v float64) (overflow bool) {
2240	if v < 0 {
2241		v = -v
2242	}
2243	return math.MaxFloat32 < v && v <= math.MaxFloat64
2244}
2245func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
2246	if bitsize == 0 || bitsize >= 64 || v == 0 {
2247		return
2248	}
2249	if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
2250		overflow = true
2251	}
2252	return
2253}
2254func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
2255	if bitsize == 0 || bitsize >= 64 || v == 0 {
2256		return
2257	}
2258	if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
2259		overflow = true
2260	}
2261	return
2262}
2263func (checkOverflow) SignedInt(v uint64) (overflow bool) {
2264	//e.g. -127 to 128 for int8
2265	pos := (v >> 63) == 0
2266	ui2 := v & 0x7fffffffffffffff
2267	if pos {
2268		if ui2 > math.MaxInt64 {
2269			overflow = true
2270		}
2271	} else {
2272		if ui2 > math.MaxInt64-1 {
2273			overflow = true
2274		}
2275	}
2276	return
2277}
2278
2279func (x checkOverflow) Float32V(v float64) float64 {
2280	if x.Float32(v) {
2281		panicv.errorf("float32 overflow: %v", v)
2282	}
2283	return v
2284}
2285func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
2286	if x.Uint(v, bitsize) {
2287		panicv.errorf("uint64 overflow: %v", v)
2288	}
2289	return v
2290}
2291func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
2292	if x.Int(v, bitsize) {
2293		panicv.errorf("int64 overflow: %v", v)
2294	}
2295	return v
2296}
2297func (x checkOverflow) SignedIntV(v uint64) int64 {
2298	if x.SignedInt(v) {
2299		panicv.errorf("uint64 to int64 overflow: %v", v)
2300	}
2301	return int64(v)
2302}
2303
2304// ------------------ FLOATING POINT -----------------
2305
2306func isNaN64(f float64) bool { return f != f }
2307func isNaN32(f float32) bool { return f != f }
2308func abs32(f float32) float32 {
2309	return math.Float32frombits(math.Float32bits(f) &^ (1 << 31))
2310}
2311
2312// Per go spec, floats are represented in memory as
2313// IEEE single or double precision floating point values.
2314//
2315// We also looked at the source for stdlib math/modf.go,
2316// reviewed https://github.com/chewxy/math32
2317// and read wikipedia documents describing the formats.
2318//
2319// It became clear that we could easily look at the bits to determine
2320// whether any fraction exists.
2321//
2322// This is all we need for now.
2323
2324func noFrac64(f float64) (v bool) {
2325	x := math.Float64bits(f)
2326	e := uint64(x>>52)&0x7FF - 1023 // uint(x>>shift)&mask - bias
2327	// clear top 12+e bits, the integer part; if the rest is 0, then no fraction.
2328	if e < 52 {
2329		// return x&((1<<64-1)>>(12+e)) == 0
2330		return x<<(12+e) == 0
2331	}
2332	return
2333}
2334
2335func noFrac32(f float32) (v bool) {
2336	x := math.Float32bits(f)
2337	e := uint32(x>>23)&0xFF - 127 // uint(x>>shift)&mask - bias
2338	// clear top 9+e bits, the integer part; if the rest is 0, then no fraction.
2339	if e < 23 {
2340		// return x&((1<<32-1)>>(9+e)) == 0
2341		return x<<(9+e) == 0
2342	}
2343	return
2344}
2345
2346// func noFrac(f float64) bool {
2347// 	_, frac := math.Modf(float64(f))
2348// 	return frac == 0
2349// }
2350
2351// -----------------------
2352
2353type ioFlusher interface {
2354	Flush() error
2355}
2356
2357type ioPeeker interface {
2358	Peek(int) ([]byte, error)
2359}
2360
2361type ioBuffered interface {
2362	Buffered() int
2363}
2364
2365// -----------------------
2366
2367type sfiRv struct {
2368	v *structFieldInfo
2369	r reflect.Value
2370}
2371
2372// -----------------
2373
2374type set []interface{}
2375
2376func (s *set) add(v interface{}) (exists bool) {
2377	// e.ci is always nil, or len >= 1
2378	x := *s
2379
2380	if x == nil {
2381		x = make([]interface{}, 1, 8)
2382		x[0] = v
2383		*s = x
2384		return
2385	}
2386	// typically, length will be 1. make this perform.
2387	if len(x) == 1 {
2388		if j := x[0]; j == 0 {
2389			x[0] = v
2390		} else if j == v {
2391			exists = true
2392		} else {
2393			x = append(x, v)
2394			*s = x
2395		}
2396		return
2397	}
2398	// check if it exists
2399	for _, j := range x {
2400		if j == v {
2401			exists = true
2402			return
2403		}
2404	}
2405	// try to replace a "deleted" slot
2406	for i, j := range x {
2407		if j == 0 {
2408			x[i] = v
2409			return
2410		}
2411	}
2412	// if unable to replace deleted slot, just append it.
2413	x = append(x, v)
2414	*s = x
2415	return
2416}
2417
2418func (s *set) remove(v interface{}) (exists bool) {
2419	x := *s
2420	if len(x) == 0 {
2421		return
2422	}
2423	if len(x) == 1 {
2424		if x[0] == v {
2425			x[0] = 0
2426		}
2427		return
2428	}
2429	for i, j := range x {
2430		if j == v {
2431			exists = true
2432			x[i] = 0 // set it to 0, as way to delete it.
2433			// copy(x[i:], x[i+1:])
2434			// x = x[:len(x)-1]
2435			return
2436		}
2437	}
2438	return
2439}
2440
2441// ------
2442
2443// bitset types are better than [256]bool, because they permit the whole
2444// bitset array being on a single cache line and use less memory.
2445//
2446// Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap).
2447//
2448// We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces
2449// bounds checking, so we discarded them, and everyone uses bitset256.
2450//
2451// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
2452// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
2453
2454type bitset256 [32]byte
2455
2456func (x *bitset256) check(pos byte) uint8 {
2457	return x[pos>>3] & (1 << (pos & 7))
2458}
2459
2460func (x *bitset256) isset(pos byte) bool {
2461	return x.check(pos) != 0
2462	// return x[pos>>3]&(1<<(pos&7)) != 0
2463}
2464
2465// func (x *bitset256) issetv(pos byte) byte {
2466// 	return x[pos>>3] & (1 << (pos & 7))
2467// }
2468
2469func (x *bitset256) set(pos byte) {
2470	x[pos>>3] |= (1 << (pos & 7))
2471}
2472
2473type bitset32 uint32
2474
2475func (x bitset32) set(pos byte) bitset32 {
2476	return x | (1 << pos)
2477}
2478
2479func (x bitset32) check(pos byte) uint32 {
2480	return uint32(x) & (1 << pos)
2481}
2482func (x bitset32) isset(pos byte) bool {
2483	return x.check(pos) != 0
2484	// return x&(1<<pos) != 0
2485}
2486
2487// func (x *bitset256) unset(pos byte) {
2488// 	x[pos>>3] &^= (1 << (pos & 7))
2489// }
2490
2491// type bit2set256 [64]byte
2492
2493// func (x *bit2set256) set(pos byte, v1, v2 bool) {
2494// 	var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
2495// 	if v1 {
2496// 		x[pos>>2] |= 1 << (pos2 + 1)
2497// 	}
2498// 	if v2 {
2499// 		x[pos>>2] |= 1 << pos2
2500// 	}
2501// }
2502// func (x *bit2set256) get(pos byte) uint8 {
2503// 	var pos2 uint8 = (pos & 3) << 1     // returning 0, 2, 4 or 6
2504// 	return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011
2505// }
2506
2507// ------------
2508
2509type panicHdl struct{}
2510
2511func (panicHdl) errorv(err error) {
2512	if err != nil {
2513		panic(err)
2514	}
2515}
2516
2517func (panicHdl) errorstr(message string) {
2518	if message != "" {
2519		panic(message)
2520	}
2521}
2522
2523func (panicHdl) errorf(format string, params ...interface{}) {
2524	if len(params) != 0 {
2525		panic(fmt.Sprintf(format, params...))
2526	}
2527	if len(params) == 0 {
2528		panic(format)
2529	}
2530	panic("undefined error")
2531}
2532
2533// ----------------------------------------------------
2534
2535type errDecorator interface {
2536	wrapErr(in interface{}, out *error)
2537}
2538
2539type errDecoratorDef struct{}
2540
2541func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) }
2542
2543// ----------------------------------------------------
2544
2545type must struct{}
2546
2547func (must) String(s string, err error) string {
2548	if err != nil {
2549		panicv.errorv(err)
2550	}
2551	return s
2552}
2553func (must) Int(s int64, err error) int64 {
2554	if err != nil {
2555		panicv.errorv(err)
2556	}
2557	return s
2558}
2559func (must) Uint(s uint64, err error) uint64 {
2560	if err != nil {
2561		panicv.errorv(err)
2562	}
2563	return s
2564}
2565func (must) Float(s float64, err error) float64 {
2566	if err != nil {
2567		panicv.errorv(err)
2568	}
2569	return s
2570}
2571
2572// -------------------
2573
2574func freelistCapacity(length int) (capacity int) {
2575	for capacity = 8; capacity < length; capacity *= 2 {
2576	}
2577	return
2578}
2579
2580type bytesFreelist [][]byte
2581
2582func (x *bytesFreelist) get(length int) (out []byte) {
2583	var j int = -1
2584	for i := 0; i < len(*x); i++ {
2585		if cap((*x)[i]) >= length && (j == -1 || cap((*x)[j]) > cap((*x)[i])) {
2586			j = i
2587		}
2588	}
2589	if j == -1 {
2590		return make([]byte, length, freelistCapacity(length))
2591	}
2592	out = (*x)[j][:length]
2593	(*x)[j] = nil
2594	for i := 0; i < len(out); i++ {
2595		out[i] = 0
2596	}
2597	return
2598}
2599
2600func (x *bytesFreelist) put(v []byte) {
2601	if len(v) == 0 {
2602		return
2603	}
2604	for i := 0; i < len(*x); i++ {
2605		if cap((*x)[i]) == 0 {
2606			(*x)[i] = v
2607			return
2608		}
2609	}
2610	*x = append(*x, v)
2611}
2612
2613func (x *bytesFreelist) check(v []byte, length int) (out []byte) {
2614	if cap(v) < length {
2615		x.put(v)
2616		return x.get(length)
2617	}
2618	return v[:length]
2619}
2620
2621// -------------------------
2622
2623type sfiRvFreelist [][]sfiRv
2624
2625func (x *sfiRvFreelist) get(length int) (out []sfiRv) {
2626	var j int = -1
2627	for i := 0; i < len(*x); i++ {
2628		if cap((*x)[i]) >= length && (j == -1 || cap((*x)[j]) > cap((*x)[i])) {
2629			j = i
2630		}
2631	}
2632	if j == -1 {
2633		return make([]sfiRv, length, freelistCapacity(length))
2634	}
2635	out = (*x)[j][:length]
2636	(*x)[j] = nil
2637	for i := 0; i < len(out); i++ {
2638		out[i] = sfiRv{}
2639	}
2640	return
2641}
2642
2643func (x *sfiRvFreelist) put(v []sfiRv) {
2644	for i := 0; i < len(*x); i++ {
2645		if cap((*x)[i]) == 0 {
2646			(*x)[i] = v
2647			return
2648		}
2649	}
2650	*x = append(*x, v)
2651}
2652
2653// -----------
2654
2655// xdebugf printf. the message in red on the terminal.
2656// Use it in place of fmt.Printf (which it calls internally)
2657func xdebugf(pattern string, args ...interface{}) {
2658	xdebugAnyf("31", pattern, args...)
2659}
2660
2661// xdebug2f printf. the message in blue on the terminal.
2662// Use it in place of fmt.Printf (which it calls internally)
2663func xdebug2f(pattern string, args ...interface{}) {
2664	xdebugAnyf("34", pattern, args...)
2665}
2666
2667func xdebugAnyf(colorcode, pattern string, args ...interface{}) {
2668	if !xdebug {
2669		return
2670	}
2671	var delim string
2672	if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' {
2673		delim = "\n"
2674	}
2675	fmt.Printf("\033[1;"+colorcode+"m"+pattern+delim+"\033[0m", args...)
2676	// os.Stderr.Flush()
2677}
2678
2679// register these here, so that staticcheck stops barfing
2680var _ = xdebug2f
2681var _ = xdebugf
2682var _ = isNaN32
2683