1// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. 2// Use of this source code is governed by a MIT license found in the LICENSE file. 3 4package codec 5 6// Contains code shared by both encode and decode. 7 8// Some shared ideas around encoding/decoding 9// ------------------------------------------ 10// 11// If an interface{} is passed, we first do a type assertion to see if it is 12// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. 13// 14// If we start with a reflect.Value, we are already in reflect.Value land and 15// will try to grab the function for the underlying Type and directly call that function. 16// This is more performant than calling reflect.Value.Interface(). 17// 18// This still helps us bypass many layers of reflection, and give best performance. 19// 20// Containers 21// ------------ 22// Containers in the stream are either associative arrays (key-value pairs) or 23// regular arrays (indexed by incrementing integers). 24// 25// Some streams support indefinite-length containers, and use a breaking 26// byte-sequence to denote that the container has come to an end. 27// 28// Some streams also are text-based, and use explicit separators to denote the 29// end/beginning of different values. 30// 31// During encode, we use a high-level condition to determine how to iterate through 32// the container. That decision is based on whether the container is text-based (with 33// separators) or binary (without separators). If binary, we do not even call the 34// encoding of separators. 35// 36// During decode, we use a different high-level condition to determine how to iterate 37// through the containers. That decision is based on whether the stream contained 38// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that 39// it has to be binary, and we do not even try to read separators. 40// 41// Philosophy 42// ------------ 43// On decode, this codec will update containers appropriately: 44// - If struct, update fields from stream into fields of struct. 45// If field in stream not found in struct, handle appropriately (based on option). 46// If a struct field has no corresponding value in the stream, leave it AS IS. 47// If nil in stream, set value to nil/zero value. 48// - If map, update map from stream. 49// If the stream value is NIL, set the map to nil. 50// - if slice, try to update up to length of array in stream. 51// if container len is less than stream array length, 52// and container cannot be expanded, handled (based on option). 53// This means you can decode 4-element stream array into 1-element array. 54// 55// ------------------------------------ 56// On encode, user can specify omitEmpty. This means that the value will be omitted 57// if the zero value. The problem may occur during decode, where omitted values do not affect 58// the value being decoded into. This means that if decoding into a struct with an 59// int field with current value=5, and the field is omitted in the stream, then after 60// decoding, the value will still be 5 (not 0). 61// omitEmpty only works if you guarantee that you always decode into zero-values. 62// 63// ------------------------------------ 64// We could have truncated a map to remove keys not available in the stream, 65// or set values in the struct which are not in the stream to their zero values. 66// We decided against it because there is no efficient way to do it. 67// We may introduce it as an option later. 68// However, that will require enabling it for both runtime and code generation modes. 69// 70// To support truncate, we need to do 2 passes over the container: 71// map 72// - first collect all keys (e.g. in k1) 73// - for each key in stream, mark k1 that the key should not be removed 74// - after updating map, do second pass and call delete for all keys in k1 which are not marked 75// struct: 76// - for each field, track the *typeInfo s1 77// - iterate through all s1, and for each one not marked, set value to zero 78// - this involves checking the possible anonymous fields which are nil ptrs. 79// too much work. 80// 81// ------------------------------------------ 82// Error Handling is done within the library using panic. 83// 84// This way, the code doesn't have to keep checking if an error has happened, 85// and we don't have to keep sending the error value along with each call 86// or storing it in the En|Decoder and checking it constantly along the way. 87// 88// The disadvantage is that small functions which use panics cannot be inlined. 89// The code accounts for that by only using panics behind an interface; 90// since interface calls cannot be inlined, this is irrelevant. 91// 92// We considered storing the error is En|Decoder. 93// - once it has its err field set, it cannot be used again. 94// - panicing will be optional, controlled by const flag. 95// - code should always check error first and return early. 96// We eventually decided against it as it makes the code clumsier to always 97// check for these error conditions. 98 99import ( 100 "bytes" 101 "encoding" 102 "encoding/binary" 103 "errors" 104 "fmt" 105 "io" 106 "math" 107 "reflect" 108 "sort" 109 "strconv" 110 "strings" 111 "sync" 112 "sync/atomic" 113 "time" 114) 115 116const ( 117 scratchByteArrayLen = 32 118 // initCollectionCap = 16 // 32 is defensive. 16 is preferred. 119 120 // Support encoding.(Binary|Text)(Unm|M)arshaler. 121 // This constant flag will enable or disable it. 122 supportMarshalInterfaces = true 123 124 // for debugging, set this to false, to catch panic traces. 125 // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. 126 recoverPanicToErr = true 127 128 // arrayCacheLen is the length of the cache used in encoder or decoder for 129 // allowing zero-alloc initialization. 130 // arrayCacheLen = 8 131 132 // size of the cacheline: defaulting to value for archs: amd64, arm64, 386 133 // should use "runtime/internal/sys".CacheLineSize, but that is not exposed. 134 cacheLineSize = 64 135 136 wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize 137 wordSize = wordSizeBits / 8 138 139 // so structFieldInfo fits into 8 bytes 140 maxLevelsEmbedding = 14 141 142 // useFinalizers=true configures finalizers to release pool'ed resources 143 // acquired by Encoder/Decoder during their GC. 144 // 145 // Note that calling SetFinalizer is always expensive, 146 // as code must be run on the systemstack even for SetFinalizer(t, nil). 147 // 148 // We document that folks SHOULD call Release() when done, or they can 149 // explicitly call SetFinalizer themselves e.g. 150 // runtime.SetFinalizer(e, (*Encoder).Release) 151 // runtime.SetFinalizer(d, (*Decoder).Release) 152 useFinalizers = false 153) 154 155var oneByteArr [1]byte 156var zeroByteSlice = oneByteArr[:0:0] 157 158var codecgen bool 159 160var refBitset bitset256 161var pool pooler 162var panicv panicHdl 163 164func init() { 165 pool.init() 166 167 refBitset.set(byte(reflect.Map)) 168 refBitset.set(byte(reflect.Ptr)) 169 refBitset.set(byte(reflect.Func)) 170 refBitset.set(byte(reflect.Chan)) 171} 172 173type clsErr struct { 174 closed bool // is it closed? 175 errClosed error // error on closing 176} 177 178// type entryType uint8 179 180// const ( 181// entryTypeBytes entryType = iota // make this 0, so a comparison is cheap 182// entryTypeIo 183// entryTypeBufio 184// entryTypeUnset = 255 185// ) 186 187type charEncoding uint8 188 189const ( 190 _ charEncoding = iota // make 0 unset 191 cUTF8 192 cUTF16LE 193 cUTF16BE 194 cUTF32LE 195 cUTF32BE 196 // Deprecated: not a true char encoding value 197 cRAW charEncoding = 255 198) 199 200// valueType is the stream type 201type valueType uint8 202 203const ( 204 valueTypeUnset valueType = iota 205 valueTypeNil 206 valueTypeInt 207 valueTypeUint 208 valueTypeFloat 209 valueTypeBool 210 valueTypeString 211 valueTypeSymbol 212 valueTypeBytes 213 valueTypeMap 214 valueTypeArray 215 valueTypeTime 216 valueTypeExt 217 218 // valueTypeInvalid = 0xff 219) 220 221var valueTypeStrings = [...]string{ 222 "Unset", 223 "Nil", 224 "Int", 225 "Uint", 226 "Float", 227 "Bool", 228 "String", 229 "Symbol", 230 "Bytes", 231 "Map", 232 "Array", 233 "Timestamp", 234 "Ext", 235} 236 237func (x valueType) String() string { 238 if int(x) < len(valueTypeStrings) { 239 return valueTypeStrings[x] 240 } 241 return strconv.FormatInt(int64(x), 10) 242} 243 244type seqType uint8 245 246const ( 247 _ seqType = iota 248 seqTypeArray 249 seqTypeSlice 250 seqTypeChan 251) 252 253// note that containerMapStart and containerArraySend are not sent. 254// This is because the ReadXXXStart and EncodeXXXStart already does these. 255type containerState uint8 256 257const ( 258 _ containerState = iota 259 260 containerMapStart // slot left open, since Driver method already covers it 261 containerMapKey 262 containerMapValue 263 containerMapEnd 264 containerArrayStart // slot left open, since Driver methods already cover it 265 containerArrayElem 266 containerArrayEnd 267) 268 269// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo 270// type sfiIdx struct { 271// name string 272// index int 273// } 274 275// do not recurse if a containing type refers to an embedded type 276// which refers back to its containing type (via a pointer). 277// The second time this back-reference happens, break out, 278// so as not to cause an infinite loop. 279const rgetMaxRecursion = 2 280 281// Anecdotally, we believe most types have <= 12 fields. 282// - even Java's PMD rules set TooManyFields threshold to 15. 283// However, go has embedded fields, which should be regarded as 284// top level, allowing structs to possibly double or triple. 285// In addition, we don't want to keep creating transient arrays, 286// especially for the sfi index tracking, and the evtypes tracking. 287// 288// So - try to keep typeInfoLoadArray within 2K bytes 289const ( 290 typeInfoLoadArraySfisLen = 16 291 typeInfoLoadArraySfiidxLen = 8 * 112 292 typeInfoLoadArrayEtypesLen = 12 293 typeInfoLoadArrayBLen = 8 * 4 294) 295 296type typeInfoLoad struct { 297 // fNames []string 298 // encNames []string 299 etypes []uintptr 300 sfis []structFieldInfo 301} 302 303type typeInfoLoadArray struct { 304 // fNames [typeInfoLoadArrayLen]string 305 // encNames [typeInfoLoadArrayLen]string 306 sfis [typeInfoLoadArraySfisLen]structFieldInfo 307 sfiidx [typeInfoLoadArraySfiidxLen]byte 308 etypes [typeInfoLoadArrayEtypesLen]uintptr 309 b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names 310} 311 312// mirror json.Marshaler and json.Unmarshaler here, 313// so we don't import the encoding/json package 314 315type jsonMarshaler interface { 316 MarshalJSON() ([]byte, error) 317} 318type jsonUnmarshaler interface { 319 UnmarshalJSON([]byte) error 320} 321 322type isZeroer interface { 323 IsZero() bool 324} 325 326type codecError struct { 327 name string 328 err interface{} 329} 330 331func (e codecError) Cause() error { 332 switch xerr := e.err.(type) { 333 case nil: 334 return nil 335 case error: 336 return xerr 337 case string: 338 return errors.New(xerr) 339 case fmt.Stringer: 340 return errors.New(xerr.String()) 341 default: 342 return fmt.Errorf("%v", e.err) 343 } 344} 345 346func (e codecError) Error() string { 347 return fmt.Sprintf("%s error: %v", e.name, e.err) 348} 349 350// type byteAccepter func(byte) bool 351 352var ( 353 bigen = binary.BigEndian 354 structInfoFieldName = "_struct" 355 356 mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) 357 mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) 358 intfSliceTyp = reflect.TypeOf([]interface{}(nil)) 359 intfTyp = intfSliceTyp.Elem() 360 361 reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem() 362 363 stringTyp = reflect.TypeOf("") 364 timeTyp = reflect.TypeOf(time.Time{}) 365 rawExtTyp = reflect.TypeOf(RawExt{}) 366 rawTyp = reflect.TypeOf(Raw{}) 367 uintptrTyp = reflect.TypeOf(uintptr(0)) 368 uint8Typ = reflect.TypeOf(uint8(0)) 369 uint8SliceTyp = reflect.TypeOf([]uint8(nil)) 370 uintTyp = reflect.TypeOf(uint(0)) 371 intTyp = reflect.TypeOf(int(0)) 372 373 mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() 374 375 binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() 376 binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() 377 378 textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() 379 textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() 380 381 jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() 382 jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() 383 384 selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() 385 missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem() 386 iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem() 387 388 uint8TypId = rt2id(uint8Typ) 389 uint8SliceTypId = rt2id(uint8SliceTyp) 390 rawExtTypId = rt2id(rawExtTyp) 391 rawTypId = rt2id(rawTyp) 392 intfTypId = rt2id(intfTyp) 393 timeTypId = rt2id(timeTyp) 394 stringTypId = rt2id(stringTyp) 395 396 mapStrIntfTypId = rt2id(mapStrIntfTyp) 397 mapIntfIntfTypId = rt2id(mapIntfIntfTyp) 398 intfSliceTypId = rt2id(intfSliceTyp) 399 // mapBySliceTypId = rt2id(mapBySliceTyp) 400 401 intBitsize = uint8(intTyp.Bits()) 402 uintBitsize = uint8(uintTyp.Bits()) 403 404 // bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} 405 bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} 406 407 chkOvf checkOverflow 408 409 errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo") 410) 411 412var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) 413 414var immutableKindsSet = [32]bool{ 415 // reflect.Invalid: , 416 reflect.Bool: true, 417 reflect.Int: true, 418 reflect.Int8: true, 419 reflect.Int16: true, 420 reflect.Int32: true, 421 reflect.Int64: true, 422 reflect.Uint: true, 423 reflect.Uint8: true, 424 reflect.Uint16: true, 425 reflect.Uint32: true, 426 reflect.Uint64: true, 427 reflect.Uintptr: true, 428 reflect.Float32: true, 429 reflect.Float64: true, 430 reflect.Complex64: true, 431 reflect.Complex128: true, 432 // reflect.Array 433 // reflect.Chan 434 // reflect.Func: true, 435 // reflect.Interface 436 // reflect.Map 437 // reflect.Ptr 438 // reflect.Slice 439 reflect.String: true, 440 // reflect.Struct 441 // reflect.UnsafePointer 442} 443 444// Selfer defines methods by which a value can encode or decode itself. 445// 446// Any type which implements Selfer will be able to encode or decode itself. 447// Consequently, during (en|de)code, this takes precedence over 448// (text|binary)(M|Unm)arshal or extension support. 449// 450// By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself. 451// If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error. 452// For example, the snippet below will cause such an error. 453// type testSelferRecur struct{} 454// func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) } 455// func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) } 456// 457// Note: *the first set of bytes of any value MUST NOT represent nil in the format*. 458// This is because, during each decode, we first check the the next set of bytes 459// represent nil, and if so, we just set the value to nil. 460type Selfer interface { 461 CodecEncodeSelf(*Encoder) 462 CodecDecodeSelf(*Decoder) 463} 464 465// MissingFielder defines the interface allowing structs to internally decode or encode 466// values which do not map to struct fields. 467// 468// We expect that this interface is bound to a pointer type (so the mutation function works). 469// 470// A use-case is if a version of a type unexports a field, but you want compatibility between 471// both versions during encoding and decoding. 472// 473// Note that the interface is completely ignored during codecgen. 474type MissingFielder interface { 475 // CodecMissingField is called to set a missing field and value pair. 476 // 477 // It returns true if the missing field was set on the struct. 478 CodecMissingField(field []byte, value interface{}) bool 479 480 // CodecMissingFields returns the set of fields which are not struct fields 481 CodecMissingFields() map[string]interface{} 482} 483 484// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream. 485// The slice contains a sequence of key-value pairs. 486// This affords storing a map in a specific sequence in the stream. 487// 488// Example usage: 489// type T1 []string // or []int or []Point or any other "slice" type 490// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map 491// type T2 struct { KeyValues T1 } 492// 493// var kvs = []string{"one", "1", "two", "2", "three", "3"} 494// var v2 = T2{ KeyValues: T1(kvs) } 495// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} } 496// 497// The support of MapBySlice affords the following: 498// - A slice type which implements MapBySlice will be encoded as a map 499// - A slice can be decoded from a map in the stream 500// - It MUST be a slice type (not a pointer receiver) that implements MapBySlice 501type MapBySlice interface { 502 MapBySlice() 503} 504 505// BasicHandle encapsulates the common options and extension functions. 506// 507// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. 508type BasicHandle struct { 509 // BasicHandle is always a part of a different type. 510 // It doesn't have to fit into it own cache lines. 511 512 // TypeInfos is used to get the type info for any type. 513 // 514 // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json 515 TypeInfos *TypeInfos 516 517 // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls). 518 // If *[]T is used instead, this becomes comparable, at the cost of extra indirection. 519 // Thses slices are used all the time, so keep as slices (not pointers). 520 521 extHandle 522 523 intf2impls 524 525 inited uint32 526 _ uint32 // padding 527 528 // ---- cache line 529 530 RPCOptions 531 532 // TimeNotBuiltin configures whether time.Time should be treated as a builtin type. 533 // 534 // All Handlers should know how to encode/decode time.Time as part of the core 535 // format specification, or as a standard extension defined by the format. 536 // 537 // However, users can elect to handle time.Time as a custom extension, or via the 538 // standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface. 539 // To elect this behavior, users can set TimeNotBuiltin=true. 540 // Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior 541 // (for Cbor and Msgpack), where time.Time was not a builtin supported type. 542 TimeNotBuiltin bool 543 544 // ExplicitRelease configures whether Release() is implicitly called after an encode or 545 // decode call. 546 // 547 // If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...) 548 // on it or calling (Must)Encode repeatedly into a given []byte or io.Writer, 549 // then you do not want it to be implicitly closed after each Encode/Decode call. 550 // Doing so will unnecessarily return resources to the shared pool, only for you to 551 // grab them right after again to do another Encode/Decode call. 552 // 553 // Instead, you configure ExplicitRelease=true, and you explicitly call Release() when 554 // you are truly done. 555 // 556 // As an alternative, you can explicitly set a finalizer - so its resources 557 // are returned to the shared pool before it is garbage-collected. Do it as below: 558 // runtime.SetFinalizer(e, (*Encoder).Release) 559 // runtime.SetFinalizer(d, (*Decoder).Release) 560 ExplicitRelease bool 561 562 be bool // is handle a binary encoding? 563 js bool // is handle javascript handler? 564 n byte // first letter of handle name 565 _ uint16 // padding 566 567 // ---- cache line 568 569 DecodeOptions 570 571 // ---- cache line 572 573 EncodeOptions 574 575 // noBuiltInTypeChecker 576 577 rtidFns atomicRtidFnSlice 578 mu sync.Mutex 579 // r []uintptr // rtids mapped to s above 580} 581 582// basicHandle returns an initialized BasicHandle from the Handle. 583func basicHandle(hh Handle) (x *BasicHandle) { 584 x = hh.getBasicHandle() 585 // ** We need to simulate once.Do, to ensure no data race within the block. 586 // ** Consequently, below would not work. 587 // if atomic.CompareAndSwapUint32(&x.inited, 0, 1) { 588 // x.be = hh.isBinary() 589 // _, x.js = hh.(*JsonHandle) 590 // x.n = hh.Name()[0] 591 // } 592 593 // simulate once.Do using our own stored flag and mutex as a CompareAndSwap 594 // is not sufficient, since a race condition can occur within init(Handle) function. 595 // init is made noinline, so that this function can be inlined by its caller. 596 if atomic.LoadUint32(&x.inited) == 0 { 597 x.init(hh) 598 } 599 return 600} 601 602//go:noinline 603func (x *BasicHandle) init(hh Handle) { 604 // make it uninlineable, as it is called at most once 605 x.mu.Lock() 606 if x.inited == 0 { 607 x.be = hh.isBinary() 608 _, x.js = hh.(*JsonHandle) 609 x.n = hh.Name()[0] 610 atomic.StoreUint32(&x.inited, 1) 611 } 612 x.mu.Unlock() 613} 614 615func (x *BasicHandle) getBasicHandle() *BasicHandle { 616 return x 617} 618 619func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { 620 if x.TypeInfos == nil { 621 return defTypeInfos.get(rtid, rt) 622 } 623 return x.TypeInfos.get(rtid, rt) 624} 625 626func findFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) { 627 // binary search. adapted from sort/search.go. 628 // Note: we use goto (instead of for loop) so this can be inlined. 629 630 // h, i, j := 0, 0, len(s) 631 var h uint // var h, i uint 632 var j = uint(len(s)) 633LOOP: 634 if i < j { 635 h = i + (j-i)/2 636 if s[h].rtid < rtid { 637 i = h + 1 638 } else { 639 j = h 640 } 641 goto LOOP 642 } 643 if i < uint(len(s)) && s[i].rtid == rtid { 644 fn = s[i].fn 645 } 646 return 647} 648 649func (x *BasicHandle) fn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { 650 rtid := rt2id(rt) 651 sp := x.rtidFns.load() 652 if sp != nil { 653 if _, fn = findFn(sp, rtid); fn != nil { 654 // xdebugf("<<<< %c: found fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) 655 return 656 } 657 } 658 c := x 659 // xdebugf("#### for %c: load fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) 660 fn = new(codecFn) 661 fi := &(fn.i) 662 ti := c.getTypeInfo(rtid, rt) 663 fi.ti = ti 664 665 rk := reflect.Kind(ti.kind) 666 667 if checkCodecSelfer && (ti.cs || ti.csp) { 668 fn.fe = (*Encoder).selferMarshal 669 fn.fd = (*Decoder).selferUnmarshal 670 fi.addrF = true 671 fi.addrD = ti.csp 672 fi.addrE = ti.csp 673 } else if rtid == timeTypId && !c.TimeNotBuiltin { 674 fn.fe = (*Encoder).kTime 675 fn.fd = (*Decoder).kTime 676 } else if rtid == rawTypId { 677 fn.fe = (*Encoder).raw 678 fn.fd = (*Decoder).raw 679 } else if rtid == rawExtTypId { 680 fn.fe = (*Encoder).rawExt 681 fn.fd = (*Decoder).rawExt 682 fi.addrF = true 683 fi.addrD = true 684 fi.addrE = true 685 } else if xfFn := c.getExt(rtid); xfFn != nil { 686 fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext 687 fn.fe = (*Encoder).ext 688 fn.fd = (*Decoder).ext 689 fi.addrF = true 690 fi.addrD = true 691 if rk == reflect.Struct || rk == reflect.Array { 692 fi.addrE = true 693 } 694 } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) { 695 fn.fe = (*Encoder).binaryMarshal 696 fn.fd = (*Decoder).binaryUnmarshal 697 fi.addrF = true 698 fi.addrD = ti.bup 699 fi.addrE = ti.bmp 700 } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) { 701 //If JSON, we should check JSONMarshal before textMarshal 702 fn.fe = (*Encoder).jsonMarshal 703 fn.fd = (*Decoder).jsonUnmarshal 704 fi.addrF = true 705 fi.addrD = ti.jup 706 fi.addrE = ti.jmp 707 } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) { 708 fn.fe = (*Encoder).textMarshal 709 fn.fd = (*Decoder).textUnmarshal 710 fi.addrF = true 711 fi.addrD = ti.tup 712 fi.addrE = ti.tmp 713 } else { 714 if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { 715 if ti.pkgpath == "" { // un-named slice or map 716 if idx := fastpathAV.index(rtid); idx != -1 { 717 fn.fe = fastpathAV[idx].encfn 718 fn.fd = fastpathAV[idx].decfn 719 fi.addrD = true 720 fi.addrF = false 721 } 722 } else { 723 // use mapping for underlying type if there 724 var rtu reflect.Type 725 if rk == reflect.Map { 726 rtu = reflect.MapOf(ti.key, ti.elem) 727 } else { 728 rtu = reflect.SliceOf(ti.elem) 729 } 730 rtuid := rt2id(rtu) 731 if idx := fastpathAV.index(rtuid); idx != -1 { 732 xfnf := fastpathAV[idx].encfn 733 xrt := fastpathAV[idx].rt 734 fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { 735 xfnf(e, xf, xrv.Convert(xrt)) 736 } 737 fi.addrD = true 738 fi.addrF = false // meaning it can be an address(ptr) or a value 739 xfnf2 := fastpathAV[idx].decfn 740 fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { 741 if xrv.Kind() == reflect.Ptr { 742 xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) 743 } else { 744 xfnf2(d, xf, xrv.Convert(xrt)) 745 } 746 } 747 } 748 } 749 } 750 if fn.fe == nil && fn.fd == nil { 751 switch rk { 752 case reflect.Bool: 753 fn.fe = (*Encoder).kBool 754 fn.fd = (*Decoder).kBool 755 case reflect.String: 756 fn.fe = (*Encoder).kString 757 fn.fd = (*Decoder).kString 758 case reflect.Int: 759 fn.fd = (*Decoder).kInt 760 fn.fe = (*Encoder).kInt 761 case reflect.Int8: 762 fn.fe = (*Encoder).kInt8 763 fn.fd = (*Decoder).kInt8 764 case reflect.Int16: 765 fn.fe = (*Encoder).kInt16 766 fn.fd = (*Decoder).kInt16 767 case reflect.Int32: 768 fn.fe = (*Encoder).kInt32 769 fn.fd = (*Decoder).kInt32 770 case reflect.Int64: 771 fn.fe = (*Encoder).kInt64 772 fn.fd = (*Decoder).kInt64 773 case reflect.Uint: 774 fn.fd = (*Decoder).kUint 775 fn.fe = (*Encoder).kUint 776 case reflect.Uint8: 777 fn.fe = (*Encoder).kUint8 778 fn.fd = (*Decoder).kUint8 779 case reflect.Uint16: 780 fn.fe = (*Encoder).kUint16 781 fn.fd = (*Decoder).kUint16 782 case reflect.Uint32: 783 fn.fe = (*Encoder).kUint32 784 fn.fd = (*Decoder).kUint32 785 case reflect.Uint64: 786 fn.fe = (*Encoder).kUint64 787 fn.fd = (*Decoder).kUint64 788 case reflect.Uintptr: 789 fn.fe = (*Encoder).kUintptr 790 fn.fd = (*Decoder).kUintptr 791 case reflect.Float32: 792 fn.fe = (*Encoder).kFloat32 793 fn.fd = (*Decoder).kFloat32 794 case reflect.Float64: 795 fn.fe = (*Encoder).kFloat64 796 fn.fd = (*Decoder).kFloat64 797 case reflect.Invalid: 798 fn.fe = (*Encoder).kInvalid 799 fn.fd = (*Decoder).kErr 800 case reflect.Chan: 801 fi.seq = seqTypeChan 802 fn.fe = (*Encoder).kSlice 803 fn.fd = (*Decoder).kSlice 804 case reflect.Slice: 805 fi.seq = seqTypeSlice 806 fn.fe = (*Encoder).kSlice 807 fn.fd = (*Decoder).kSlice 808 case reflect.Array: 809 fi.seq = seqTypeArray 810 fn.fe = (*Encoder).kSlice 811 fi.addrF = false 812 fi.addrD = false 813 rt2 := reflect.SliceOf(ti.elem) 814 fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { 815 d.h.fn(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) 816 } 817 // fn.fd = (*Decoder).kArray 818 case reflect.Struct: 819 if ti.anyOmitEmpty || ti.mf || ti.mfp { 820 fn.fe = (*Encoder).kStruct 821 } else { 822 fn.fe = (*Encoder).kStructNoOmitempty 823 } 824 fn.fd = (*Decoder).kStruct 825 case reflect.Map: 826 fn.fe = (*Encoder).kMap 827 fn.fd = (*Decoder).kMap 828 case reflect.Interface: 829 // encode: reflect.Interface are handled already by preEncodeValue 830 fn.fd = (*Decoder).kInterface 831 fn.fe = (*Encoder).kErr 832 default: 833 // reflect.Ptr and reflect.Interface are handled already by preEncodeValue 834 fn.fe = (*Encoder).kErr 835 fn.fd = (*Decoder).kErr 836 } 837 } 838 } 839 840 c.mu.Lock() 841 var sp2 []codecRtidFn 842 sp = c.rtidFns.load() 843 if sp == nil { 844 sp2 = []codecRtidFn{{rtid, fn}} 845 c.rtidFns.store(sp2) 846 // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) 847 // xdebugf(">>>> loading stored rtidfns of size: %v", len(c.rtidFns.load())) 848 } else { 849 idx, fn2 := findFn(sp, rtid) 850 if fn2 == nil { 851 sp2 = make([]codecRtidFn, len(sp)+1) 852 copy(sp2, sp[:idx]) 853 copy(sp2[idx+1:], sp[idx:]) 854 sp2[idx] = codecRtidFn{rtid, fn} 855 c.rtidFns.store(sp2) 856 // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) 857 858 } 859 } 860 c.mu.Unlock() 861 return 862} 863 864// Handle defines a specific encoding format. It also stores any runtime state 865// used during an Encoding or Decoding session e.g. stored state about Types, etc. 866// 867// Once a handle is configured, it can be shared across multiple Encoders and Decoders. 868// 869// Note that a Handle is NOT safe for concurrent modification. 870// Consequently, do not modify it after it is configured if shared among 871// multiple Encoders and Decoders in different goroutines. 872// 873// Consequently, the typical usage model is that a Handle is pre-configured 874// before first time use, and not modified while in use. 875// Such a pre-configured Handle is safe for concurrent access. 876type Handle interface { 877 Name() string 878 // return the basic handle. It may not have been inited. 879 // Prefer to use basicHandle() helper function that ensures it has been inited. 880 getBasicHandle() *BasicHandle 881 recreateEncDriver(encDriver) bool 882 newEncDriver(w *Encoder) encDriver 883 newDecDriver(r *Decoder) decDriver 884 isBinary() bool 885 hasElemSeparators() bool 886 // IsBuiltinType(rtid uintptr) bool 887} 888 889// Raw represents raw formatted bytes. 890// We "blindly" store it during encode and retrieve the raw bytes during decode. 891// Note: it is dangerous during encode, so we may gate the behaviour 892// behind an Encode flag which must be explicitly set. 893type Raw []byte 894 895// RawExt represents raw unprocessed extension data. 896// Some codecs will decode extension data as a *RawExt 897// if there is no registered extension for the tag. 898// 899// Only one of Data or Value is nil. 900// If Data is nil, then the content of the RawExt is in the Value. 901type RawExt struct { 902 Tag uint64 903 // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value. 904 // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types 905 Data []byte 906 // Value represents the extension, if Data is nil. 907 // Value is used by codecs (e.g. cbor, json) which leverage the format to do 908 // custom serialization of the types. 909 Value interface{} 910} 911 912// BytesExt handles custom (de)serialization of types to/from []byte. 913// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. 914type BytesExt interface { 915 // WriteExt converts a value to a []byte. 916 // 917 // Note: v is a pointer iff the registered extension type is a struct or array kind. 918 WriteExt(v interface{}) []byte 919 920 // ReadExt updates a value from a []byte. 921 // 922 // Note: dst is always a pointer kind to the registered extension type. 923 ReadExt(dst interface{}, src []byte) 924} 925 926// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. 927// The Encoder or Decoder will then handle the further (de)serialization of that known type. 928// 929// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types. 930type InterfaceExt interface { 931 // ConvertExt converts a value into a simpler interface for easy encoding 932 // e.g. convert time.Time to int64. 933 // 934 // Note: v is a pointer iff the registered extension type is a struct or array kind. 935 ConvertExt(v interface{}) interface{} 936 937 // UpdateExt updates a value from a simpler interface for easy decoding 938 // e.g. convert int64 to time.Time. 939 // 940 // Note: dst is always a pointer kind to the registered extension type. 941 UpdateExt(dst interface{}, src interface{}) 942} 943 944// Ext handles custom (de)serialization of custom types / extensions. 945type Ext interface { 946 BytesExt 947 InterfaceExt 948} 949 950// addExtWrapper is a wrapper implementation to support former AddExt exported method. 951type addExtWrapper struct { 952 encFn func(reflect.Value) ([]byte, error) 953 decFn func(reflect.Value, []byte) error 954} 955 956func (x addExtWrapper) WriteExt(v interface{}) []byte { 957 bs, err := x.encFn(reflect.ValueOf(v)) 958 if err != nil { 959 panic(err) 960 } 961 return bs 962} 963 964func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { 965 if err := x.decFn(reflect.ValueOf(v), bs); err != nil { 966 panic(err) 967 } 968} 969 970func (x addExtWrapper) ConvertExt(v interface{}) interface{} { 971 return x.WriteExt(v) 972} 973 974func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { 975 x.ReadExt(dest, v.([]byte)) 976} 977 978type extWrapper struct { 979 BytesExt 980 InterfaceExt 981} 982 983type bytesExtFailer struct{} 984 985func (bytesExtFailer) WriteExt(v interface{}) []byte { 986 panicv.errorstr("BytesExt.WriteExt is not supported") 987 return nil 988} 989func (bytesExtFailer) ReadExt(v interface{}, bs []byte) { 990 panicv.errorstr("BytesExt.ReadExt is not supported") 991} 992 993type interfaceExtFailer struct{} 994 995func (interfaceExtFailer) ConvertExt(v interface{}) interface{} { 996 panicv.errorstr("InterfaceExt.ConvertExt is not supported") 997 return nil 998} 999func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) { 1000 panicv.errorstr("InterfaceExt.UpdateExt is not supported") 1001} 1002 1003type binaryEncodingType struct{} 1004 1005func (binaryEncodingType) isBinary() bool { return true } 1006 1007type textEncodingType struct{} 1008 1009func (textEncodingType) isBinary() bool { return false } 1010 1011// noBuiltInTypes is embedded into many types which do not support builtins 1012// e.g. msgpack, simple, cbor. 1013 1014// type noBuiltInTypeChecker struct{} 1015// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } 1016// type noBuiltInTypes struct{ noBuiltInTypeChecker } 1017 1018type noBuiltInTypes struct{} 1019 1020func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} 1021func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} 1022 1023// type noStreamingCodec struct{} 1024// func (noStreamingCodec) CheckBreak() bool { return false } 1025// func (noStreamingCodec) hasElemSeparators() bool { return false } 1026 1027type noElemSeparators struct{} 1028 1029func (noElemSeparators) hasElemSeparators() (v bool) { return } 1030func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return } 1031 1032// bigenHelper. 1033// Users must already slice the x completely, because we will not reslice. 1034type bigenHelper struct { 1035 x []byte // must be correctly sliced to appropriate len. slicing is a cost. 1036 w *encWriterSwitch 1037} 1038 1039func (z bigenHelper) writeUint16(v uint16) { 1040 bigen.PutUint16(z.x, v) 1041 z.w.writeb(z.x) 1042} 1043 1044func (z bigenHelper) writeUint32(v uint32) { 1045 bigen.PutUint32(z.x, v) 1046 z.w.writeb(z.x) 1047} 1048 1049func (z bigenHelper) writeUint64(v uint64) { 1050 bigen.PutUint64(z.x, v) 1051 z.w.writeb(z.x) 1052} 1053 1054type extTypeTagFn struct { 1055 rtid uintptr 1056 rtidptr uintptr 1057 rt reflect.Type 1058 tag uint64 1059 ext Ext 1060 _ [1]uint64 // padding 1061} 1062 1063type extHandle []extTypeTagFn 1064 1065// AddExt registes an encode and decode function for a reflect.Type. 1066// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. 1067// 1068// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. 1069func (o *extHandle) AddExt(rt reflect.Type, tag byte, 1070 encfn func(reflect.Value) ([]byte, error), 1071 decfn func(reflect.Value, []byte) error) (err error) { 1072 if encfn == nil || decfn == nil { 1073 return o.SetExt(rt, uint64(tag), nil) 1074 } 1075 return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) 1076} 1077 1078// SetExt will set the extension for a tag and reflect.Type. 1079// Note that the type must be a named type, and specifically not a pointer or Interface. 1080// An error is returned if that is not honored. 1081// To Deregister an ext, call SetExt with nil Ext. 1082// 1083// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. 1084func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { 1085 // o is a pointer, because we may need to initialize it 1086 rk := rt.Kind() 1087 for rk == reflect.Ptr { 1088 rt = rt.Elem() 1089 rk = rt.Kind() 1090 } 1091 1092 if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr { 1093 return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt) 1094 } 1095 1096 rtid := rt2id(rt) 1097 switch rtid { 1098 case timeTypId, rawTypId, rawExtTypId: 1099 // all natively supported type, so cannot have an extension 1100 return // TODO: should we silently ignore, or return an error??? 1101 } 1102 // if o == nil { 1103 // return errors.New("codec.Handle.SetExt: extHandle not initialized") 1104 // } 1105 o2 := *o 1106 // if o2 == nil { 1107 // return errors.New("codec.Handle.SetExt: extHandle not initialized") 1108 // } 1109 for i := range o2 { 1110 v := &o2[i] 1111 if v.rtid == rtid { 1112 v.tag, v.ext = tag, ext 1113 return 1114 } 1115 } 1116 rtidptr := rt2id(reflect.PtrTo(rt)) 1117 *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}}) 1118 return 1119} 1120 1121func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) { 1122 for i := range o { 1123 v = &o[i] 1124 if v.rtid == rtid || v.rtidptr == rtid { 1125 return 1126 } 1127 } 1128 return nil 1129} 1130 1131func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) { 1132 for i := range o { 1133 v = &o[i] 1134 if v.tag == tag { 1135 return 1136 } 1137 } 1138 return nil 1139} 1140 1141type intf2impl struct { 1142 rtid uintptr // for intf 1143 impl reflect.Type 1144 // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned. 1145} 1146 1147type intf2impls []intf2impl 1148 1149// Intf2Impl maps an interface to an implementing type. 1150// This allows us support infering the concrete type 1151// and populating it when passed an interface. 1152// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc. 1153// 1154// Passing a nil impl will clear the mapping. 1155func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) { 1156 if impl != nil && !impl.Implements(intf) { 1157 return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf) 1158 } 1159 rtid := rt2id(intf) 1160 o2 := *o 1161 for i := range o2 { 1162 v := &o2[i] 1163 if v.rtid == rtid { 1164 v.impl = impl 1165 return 1166 } 1167 } 1168 *o = append(o2, intf2impl{rtid, impl}) 1169 return 1170} 1171 1172func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) { 1173 for i := range o { 1174 v := &o[i] 1175 if v.rtid == rtid { 1176 if v.impl == nil { 1177 return 1178 } 1179 if v.impl.Kind() == reflect.Ptr { 1180 return reflect.New(v.impl.Elem()) 1181 } 1182 return reflect.New(v.impl).Elem() 1183 } 1184 } 1185 return 1186} 1187 1188type structFieldInfoFlag uint8 1189 1190const ( 1191 _ structFieldInfoFlag = 1 << iota 1192 structFieldInfoFlagReady 1193 structFieldInfoFlagOmitEmpty 1194) 1195 1196func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) { 1197 *x = *x | f 1198} 1199 1200func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) { 1201 *x = *x &^ f 1202} 1203 1204func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool { 1205 return x&f != 0 1206} 1207 1208func (x structFieldInfoFlag) omitEmpty() bool { 1209 return x.flagGet(structFieldInfoFlagOmitEmpty) 1210} 1211 1212func (x structFieldInfoFlag) ready() bool { 1213 return x.flagGet(structFieldInfoFlagReady) 1214} 1215 1216type structFieldInfo struct { 1217 encName string // encode name 1218 fieldName string // field name 1219 1220 is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct 1221 nis uint8 // num levels of embedding. if 1, then it's not embedded. 1222 1223 encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers 1224 structFieldInfoFlag 1225 _ [1]byte // padding 1226} 1227 1228func (si *structFieldInfo) setToZeroValue(v reflect.Value) { 1229 if v, valid := si.field(v, false); valid { 1230 v.Set(reflect.Zero(v.Type())) 1231 } 1232} 1233 1234// rv returns the field of the struct. 1235// If anonymous, it returns an Invalid 1236func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { 1237 // replicate FieldByIndex 1238 for i, x := range si.is { 1239 if uint8(i) == si.nis { 1240 break 1241 } 1242 if v, valid = baseStructRv(v, update); !valid { 1243 return 1244 } 1245 v = v.Field(int(x)) 1246 } 1247 1248 return v, true 1249} 1250 1251// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { 1252// v, _ = si.field(v, update) 1253// return v 1254// } 1255 1256func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) { 1257 keytype = valueTypeString // default 1258 if stag == "" { 1259 return 1260 } 1261 for i, s := range strings.Split(stag, ",") { 1262 if i == 0 { 1263 } else { 1264 switch s { 1265 case "omitempty": 1266 omitEmpty = true 1267 case "toarray": 1268 toArray = true 1269 case "int": 1270 keytype = valueTypeInt 1271 case "uint": 1272 keytype = valueTypeUint 1273 case "float": 1274 keytype = valueTypeFloat 1275 // case "bool": 1276 // keytype = valueTypeBool 1277 case "string": 1278 keytype = valueTypeString 1279 } 1280 } 1281 } 1282 return 1283} 1284 1285func (si *structFieldInfo) parseTag(stag string) { 1286 // if fname == "" { 1287 // panic(errNoFieldNameToStructFieldInfo) 1288 // } 1289 1290 if stag == "" { 1291 return 1292 } 1293 for i, s := range strings.Split(stag, ",") { 1294 if i == 0 { 1295 if s != "" { 1296 si.encName = s 1297 } 1298 } else { 1299 switch s { 1300 case "omitempty": 1301 si.flagSet(structFieldInfoFlagOmitEmpty) 1302 // si.omitEmpty = true 1303 // case "toarray": 1304 // si.toArray = true 1305 } 1306 } 1307 } 1308} 1309 1310type sfiSortedByEncName []*structFieldInfo 1311 1312func (p sfiSortedByEncName) Len() int { return len(p) } 1313func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName } 1314func (p sfiSortedByEncName) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 1315 1316const structFieldNodeNumToCache = 4 1317 1318type structFieldNodeCache struct { 1319 rv [structFieldNodeNumToCache]reflect.Value 1320 idx [structFieldNodeNumToCache]uint32 1321 num uint8 1322} 1323 1324func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { 1325 for i, k := range &x.idx { 1326 if uint8(i) == x.num { 1327 return // break 1328 } 1329 if key == k { 1330 return x.rv[i], true 1331 } 1332 } 1333 return 1334} 1335 1336func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { 1337 if x.num < structFieldNodeNumToCache { 1338 x.rv[x.num] = fv 1339 x.idx[x.num] = key 1340 x.num++ 1341 return 1342 } 1343} 1344 1345type structFieldNode struct { 1346 v reflect.Value 1347 cache2 structFieldNodeCache 1348 cache3 structFieldNodeCache 1349 update bool 1350} 1351 1352func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { 1353 // return si.fieldval(x.v, x.update) 1354 // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding 1355 // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. 1356 var valid bool 1357 switch si.nis { 1358 case 1: 1359 fv = x.v.Field(int(si.is[0])) 1360 case 2: 1361 if fv, valid = x.cache2.get(uint32(si.is[0])); valid { 1362 fv = fv.Field(int(si.is[1])) 1363 return 1364 } 1365 fv = x.v.Field(int(si.is[0])) 1366 if fv, valid = baseStructRv(fv, x.update); !valid { 1367 return 1368 } 1369 x.cache2.tryAdd(fv, uint32(si.is[0])) 1370 fv = fv.Field(int(si.is[1])) 1371 case 3: 1372 var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) 1373 if fv, valid = x.cache3.get(key); valid { 1374 fv = fv.Field(int(si.is[2])) 1375 return 1376 } 1377 fv = x.v.Field(int(si.is[0])) 1378 if fv, valid = baseStructRv(fv, x.update); !valid { 1379 return 1380 } 1381 fv = fv.Field(int(si.is[1])) 1382 if fv, valid = baseStructRv(fv, x.update); !valid { 1383 return 1384 } 1385 x.cache3.tryAdd(fv, key) 1386 fv = fv.Field(int(si.is[2])) 1387 default: 1388 fv, _ = si.field(x.v, x.update) 1389 } 1390 return 1391} 1392 1393func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { 1394 for v.Kind() == reflect.Ptr { 1395 if v.IsNil() { 1396 if !update { 1397 return 1398 } 1399 v.Set(reflect.New(v.Type().Elem())) 1400 } 1401 v = v.Elem() 1402 } 1403 return v, true 1404} 1405 1406type typeInfoFlag uint8 1407 1408const ( 1409 typeInfoFlagComparable = 1 << iota 1410 typeInfoFlagIsZeroer 1411 typeInfoFlagIsZeroerPtr 1412) 1413 1414// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence. 1415// 1416// During an encode/decode sequence, we work as below: 1417// - If base is a built in type, en/decode base value 1418// - If base is registered as an extension, en/decode base value 1419// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method 1420// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method 1421// - Else decode appropriately based on the reflect.Kind 1422type typeInfo struct { 1423 rt reflect.Type 1424 elem reflect.Type 1425 pkgpath string 1426 1427 rtid uintptr 1428 // rv0 reflect.Value // saved zero value, used if immutableKind 1429 1430 numMeth uint16 // number of methods 1431 kind uint8 1432 chandir uint8 1433 1434 anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty" 1435 toArray bool // whether this (struct) type should be encoded as an array 1436 keyType valueType // if struct, how is the field name stored in a stream? default is string 1437 mbs bool // base type (T or *T) is a MapBySlice 1438 1439 // ---- cpu cache line boundary? 1440 sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map. 1441 sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array. 1442 1443 key reflect.Type 1444 1445 // ---- cpu cache line boundary? 1446 // sfis []structFieldInfo // all sfi, in src order, as created. 1447 sfiNamesSort []byte // all names, with indexes into the sfiSort 1448 1449 // format of marshal type fields below: [btj][mu]p? OR csp? 1450 1451 bm bool // T is a binaryMarshaler 1452 bmp bool // *T is a binaryMarshaler 1453 bu bool // T is a binaryUnmarshaler 1454 bup bool // *T is a binaryUnmarshaler 1455 tm bool // T is a textMarshaler 1456 tmp bool // *T is a textMarshaler 1457 tu bool // T is a textUnmarshaler 1458 tup bool // *T is a textUnmarshaler 1459 1460 jm bool // T is a jsonMarshaler 1461 jmp bool // *T is a jsonMarshaler 1462 ju bool // T is a jsonUnmarshaler 1463 jup bool // *T is a jsonUnmarshaler 1464 cs bool // T is a Selfer 1465 csp bool // *T is a Selfer 1466 mf bool // T is a MissingFielder 1467 mfp bool // *T is a MissingFielder 1468 1469 // other flags, with individual bits representing if set. 1470 flags typeInfoFlag 1471 infoFieldOmitempty bool 1472 1473 _ [6]byte // padding 1474 _ [2]uint64 // padding 1475} 1476 1477func (ti *typeInfo) isFlag(f typeInfoFlag) bool { 1478 return ti.flags&f != 0 1479} 1480 1481func (ti *typeInfo) indexForEncName(name []byte) (index int16) { 1482 var sn []byte 1483 if len(name)+2 <= 32 { 1484 var buf [32]byte // should not escape to heap 1485 sn = buf[:len(name)+2] 1486 } else { 1487 sn = make([]byte, len(name)+2) 1488 } 1489 copy(sn[1:], name) 1490 sn[0], sn[len(sn)-1] = tiSep2(name), 0xff 1491 j := bytes.Index(ti.sfiNamesSort, sn) 1492 if j < 0 { 1493 return -1 1494 } 1495 index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8) 1496 return 1497} 1498 1499type rtid2ti struct { 1500 rtid uintptr 1501 ti *typeInfo 1502} 1503 1504// TypeInfos caches typeInfo for each type on first inspection. 1505// 1506// It is configured with a set of tag keys, which are used to get 1507// configuration for the type. 1508type TypeInfos struct { 1509 // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected 1510 infos atomicTypeInfoSlice 1511 mu sync.Mutex 1512 tags []string 1513 _ [2]uint64 // padding 1514} 1515 1516// NewTypeInfos creates a TypeInfos given a set of struct tags keys. 1517// 1518// This allows users customize the struct tag keys which contain configuration 1519// of their types. 1520func NewTypeInfos(tags []string) *TypeInfos { 1521 return &TypeInfos{tags: tags} 1522} 1523 1524func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { 1525 // check for tags: codec, json, in that order. 1526 // this allows seamless support for many configured structs. 1527 for _, x := range x.tags { 1528 s = t.Get(x) 1529 if s != "" { 1530 return s 1531 } 1532 } 1533 return 1534} 1535 1536func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) { 1537 // binary search. adapted from sort/search.go. 1538 // Note: we use goto (instead of for loop) so this can be inlined. 1539 1540 // if sp == nil { 1541 // return -1, nil 1542 // } 1543 // s := *sp 1544 1545 // h, i, j := 0, 0, len(s) 1546 var h uint // var h, i uint 1547 var j = uint(len(s)) 1548LOOP: 1549 if i < j { 1550 h = i + (j-i)/2 1551 if s[h].rtid < rtid { 1552 i = h + 1 1553 } else { 1554 j = h 1555 } 1556 goto LOOP 1557 } 1558 if i < uint(len(s)) && s[i].rtid == rtid { 1559 ti = s[i].ti 1560 } 1561 return 1562} 1563 1564func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { 1565 sp := x.infos.load() 1566 if sp != nil { 1567 _, pti = findTypeInfo(sp, rtid) 1568 if pti != nil { 1569 return 1570 } 1571 } 1572 1573 rk := rt.Kind() 1574 1575 if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) { 1576 panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt) 1577 } 1578 1579 // do not hold lock while computing this. 1580 // it may lead to duplication, but that's ok. 1581 ti := typeInfo{ 1582 rt: rt, 1583 rtid: rtid, 1584 kind: uint8(rk), 1585 pkgpath: rt.PkgPath(), 1586 keyType: valueTypeString, // default it - so it's never 0 1587 } 1588 // ti.rv0 = reflect.Zero(rt) 1589 1590 // ti.comparable = rt.Comparable() 1591 ti.numMeth = uint16(rt.NumMethod()) 1592 1593 ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp) 1594 ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp) 1595 ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp) 1596 ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp) 1597 ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp) 1598 ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp) 1599 ti.cs, ti.csp = implIntf(rt, selferTyp) 1600 ti.mf, ti.mfp = implIntf(rt, missingFielderTyp) 1601 1602 b1, b2 := implIntf(rt, iszeroTyp) 1603 if b1 { 1604 ti.flags |= typeInfoFlagIsZeroer 1605 } 1606 if b2 { 1607 ti.flags |= typeInfoFlagIsZeroerPtr 1608 } 1609 if rt.Comparable() { 1610 ti.flags |= typeInfoFlagComparable 1611 } 1612 1613 switch rk { 1614 case reflect.Struct: 1615 var omitEmpty bool 1616 if f, ok := rt.FieldByName(structInfoFieldName); ok { 1617 ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag)) 1618 ti.infoFieldOmitempty = omitEmpty 1619 } else { 1620 ti.keyType = valueTypeString 1621 } 1622 pp, pi := &pool.tiload, pool.tiload.Get() // pool.tiLoad() 1623 pv := pi.(*typeInfoLoadArray) 1624 pv.etypes[0] = ti.rtid 1625 // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} 1626 vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]} 1627 x.rget(rt, rtid, omitEmpty, nil, &vv) 1628 // ti.sfis = vv.sfis 1629 ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv) 1630 pp.Put(pi) 1631 case reflect.Map: 1632 ti.elem = rt.Elem() 1633 ti.key = rt.Key() 1634 case reflect.Slice: 1635 ti.mbs, _ = implIntf(rt, mapBySliceTyp) 1636 ti.elem = rt.Elem() 1637 case reflect.Chan: 1638 ti.elem = rt.Elem() 1639 ti.chandir = uint8(rt.ChanDir()) 1640 case reflect.Array, reflect.Ptr: 1641 ti.elem = rt.Elem() 1642 } 1643 // sfi = sfiSrc 1644 1645 x.mu.Lock() 1646 sp = x.infos.load() 1647 var sp2 []rtid2ti 1648 if sp == nil { 1649 pti = &ti 1650 sp2 = []rtid2ti{{rtid, pti}} 1651 x.infos.store(sp2) 1652 } else { 1653 var idx uint 1654 idx, pti = findTypeInfo(sp, rtid) 1655 if pti == nil { 1656 pti = &ti 1657 sp2 = make([]rtid2ti, len(sp)+1) 1658 copy(sp2, sp[:idx]) 1659 copy(sp2[idx+1:], sp[idx:]) 1660 sp2[idx] = rtid2ti{rtid, pti} 1661 x.infos.store(sp2) 1662 } 1663 } 1664 x.mu.Unlock() 1665 return 1666} 1667 1668func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, 1669 indexstack []uint16, pv *typeInfoLoad) { 1670 // Read up fields and store how to access the value. 1671 // 1672 // It uses go's rules for message selectors, 1673 // which say that the field with the shallowest depth is selected. 1674 // 1675 // Note: we consciously use slices, not a map, to simulate a set. 1676 // Typically, types have < 16 fields, 1677 // and iteration using equals is faster than maps there 1678 flen := rt.NumField() 1679 if flen > (1<<maxLevelsEmbedding - 1) { 1680 panicv.errorf("codec: types with > %v fields are not supported - has %v fields", 1681 (1<<maxLevelsEmbedding - 1), flen) 1682 } 1683 // pv.sfis = make([]structFieldInfo, flen) 1684LOOP: 1685 for j, jlen := uint16(0), uint16(flen); j < jlen; j++ { 1686 f := rt.Field(int(j)) 1687 fkind := f.Type.Kind() 1688 // skip if a func type, or is unexported, or structTag value == "-" 1689 switch fkind { 1690 case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: 1691 continue LOOP 1692 } 1693 1694 isUnexported := f.PkgPath != "" 1695 if isUnexported && !f.Anonymous { 1696 continue 1697 } 1698 stag := x.structTag(f.Tag) 1699 if stag == "-" { 1700 continue 1701 } 1702 var si structFieldInfo 1703 var parsed bool 1704 // if anonymous and no struct tag (or it's blank), 1705 // and a struct (or pointer to struct), inline it. 1706 if f.Anonymous && fkind != reflect.Interface { 1707 // ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface 1708 ft := f.Type 1709 isPtr := ft.Kind() == reflect.Ptr 1710 for ft.Kind() == reflect.Ptr { 1711 ft = ft.Elem() 1712 } 1713 isStruct := ft.Kind() == reflect.Struct 1714 1715 // Ignore embedded fields of unexported non-struct types. 1716 // Also, from go1.10, ignore pointers to unexported struct types 1717 // because unmarshal cannot assign a new struct to an unexported field. 1718 // See https://golang.org/issue/21357 1719 if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) { 1720 continue 1721 } 1722 doInline := stag == "" 1723 if !doInline { 1724 si.parseTag(stag) 1725 parsed = true 1726 doInline = si.encName == "" 1727 // doInline = si.isZero() 1728 } 1729 if doInline && isStruct { 1730 // if etypes contains this, don't call rget again (as fields are already seen here) 1731 ftid := rt2id(ft) 1732 // We cannot recurse forever, but we need to track other field depths. 1733 // So - we break if we see a type twice (not the first time). 1734 // This should be sufficient to handle an embedded type that refers to its 1735 // owning type, which then refers to its embedded type. 1736 processIt := true 1737 numk := 0 1738 for _, k := range pv.etypes { 1739 if k == ftid { 1740 numk++ 1741 if numk == rgetMaxRecursion { 1742 processIt = false 1743 break 1744 } 1745 } 1746 } 1747 if processIt { 1748 pv.etypes = append(pv.etypes, ftid) 1749 indexstack2 := make([]uint16, len(indexstack)+1) 1750 copy(indexstack2, indexstack) 1751 indexstack2[len(indexstack)] = j 1752 // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) 1753 x.rget(ft, ftid, omitEmpty, indexstack2, pv) 1754 } 1755 continue 1756 } 1757 } 1758 1759 // after the anonymous dance: if an unexported field, skip 1760 if isUnexported { 1761 continue 1762 } 1763 1764 if f.Name == "" { 1765 panic(errNoFieldNameToStructFieldInfo) 1766 } 1767 1768 // pv.fNames = append(pv.fNames, f.Name) 1769 // if si.encName == "" { 1770 1771 if !parsed { 1772 si.encName = f.Name 1773 si.parseTag(stag) 1774 parsed = true 1775 } else if si.encName == "" { 1776 si.encName = f.Name 1777 } 1778 si.encNameAsciiAlphaNum = true 1779 for i := len(si.encName) - 1; i >= 0; i-- { // bounds-check elimination 1780 b := si.encName[i] 1781 if (b >= '0' && b <= '9') || (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') { 1782 continue 1783 } 1784 si.encNameAsciiAlphaNum = false 1785 break 1786 } 1787 si.fieldName = f.Name 1788 si.flagSet(structFieldInfoFlagReady) 1789 1790 // pv.encNames = append(pv.encNames, si.encName) 1791 1792 // si.ikind = int(f.Type.Kind()) 1793 if len(indexstack) > maxLevelsEmbedding-1 { 1794 panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth", 1795 maxLevelsEmbedding-1, len(indexstack)) 1796 } 1797 si.nis = uint8(len(indexstack)) + 1 1798 copy(si.is[:], indexstack) 1799 si.is[len(indexstack)] = j 1800 1801 if omitEmpty { 1802 si.flagSet(structFieldInfoFlagOmitEmpty) 1803 } 1804 pv.sfis = append(pv.sfis, si) 1805 } 1806} 1807 1808func tiSep(name string) uint8 { 1809 // (xn[0]%64) // (between 192-255 - outside ascii BMP) 1810 // return 0xfe - (name[0] & 63) 1811 // return 0xfe - (name[0] & 63) - uint8(len(name)) 1812 // return 0xfe - (name[0] & 63) - uint8(len(name)&63) 1813 // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07)) 1814 return 0xfe - (name[0] & 63) - uint8(len(name)&63) 1815} 1816 1817func tiSep2(name []byte) uint8 { 1818 return 0xfe - (name[0] & 63) - uint8(len(name)&63) 1819} 1820 1821// resolves the struct field info got from a call to rget. 1822// Returns a trimmed, unsorted and sorted []*structFieldInfo. 1823func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) ( 1824 y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) { 1825 sa := pv.sfiidx[:0] 1826 sn := pv.b[:] 1827 n := len(x) 1828 1829 var xn string 1830 var ui uint16 1831 var sep byte 1832 1833 for i := range x { 1834 ui = uint16(i) 1835 xn = x[i].encName // fieldName or encName? use encName for now. 1836 if len(xn)+2 > cap(pv.b) { 1837 sn = make([]byte, len(xn)+2) 1838 } else { 1839 sn = sn[:len(xn)+2] 1840 } 1841 // use a custom sep, so that misses are less frequent, 1842 // since the sep (first char in search) is as unique as first char in field name. 1843 sep = tiSep(xn) 1844 sn[0], sn[len(sn)-1] = sep, 0xff 1845 copy(sn[1:], xn) 1846 j := bytes.Index(sa, sn) 1847 if j == -1 { 1848 sa = append(sa, sep) 1849 sa = append(sa, xn...) 1850 sa = append(sa, 0xff, byte(ui>>8), byte(ui)) 1851 } else { 1852 index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8 1853 // one of them must be reset to nil, 1854 // and the index updated appropriately to the other one 1855 if x[i].nis == x[index].nis { 1856 } else if x[i].nis < x[index].nis { 1857 sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui) 1858 if x[index].ready() { 1859 x[index].flagClr(structFieldInfoFlagReady) 1860 n-- 1861 } 1862 } else { 1863 if x[i].ready() { 1864 x[i].flagClr(structFieldInfoFlagReady) 1865 n-- 1866 } 1867 } 1868 } 1869 1870 } 1871 var w []structFieldInfo 1872 sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray 1873 if sharingArray { 1874 w = make([]structFieldInfo, n) 1875 } 1876 1877 // remove all the nils (non-ready) 1878 y = make([]*structFieldInfo, n) 1879 n = 0 1880 var sslen int 1881 for i := range x { 1882 if !x[i].ready() { 1883 continue 1884 } 1885 if !anyOmitEmpty && x[i].omitEmpty() { 1886 anyOmitEmpty = true 1887 } 1888 if sharingArray { 1889 w[n] = x[i] 1890 y[n] = &w[n] 1891 } else { 1892 y[n] = &x[i] 1893 } 1894 sslen = sslen + len(x[i].encName) + 4 1895 n++ 1896 } 1897 if n != len(y) { 1898 panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", 1899 rt, len(y), len(x), n) 1900 } 1901 1902 z = make([]*structFieldInfo, len(y)) 1903 copy(z, y) 1904 sort.Sort(sfiSortedByEncName(z)) 1905 1906 sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen 1907 if sharingArray { 1908 ss = make([]byte, 0, sslen) 1909 } else { 1910 ss = sa[:0] // reuse the newly made sa array if necessary 1911 } 1912 for i := range z { 1913 xn = z[i].encName 1914 sep = tiSep(xn) 1915 ui = uint16(i) 1916 ss = append(ss, sep) 1917 ss = append(ss, xn...) 1918 ss = append(ss, 0xff, byte(ui>>8), byte(ui)) 1919 } 1920 return 1921} 1922 1923func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { 1924 return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp) 1925} 1926 1927// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty: 1928// - does it implement IsZero() bool 1929// - is it comparable, and can i compare directly using == 1930// - if checkStruct, then walk through the encodable fields 1931// and check if they are empty or not. 1932func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { 1933 // v is a struct kind - no need to check again. 1934 // We only check isZero on a struct kind, to reduce the amount of times 1935 // that we lookup the rtid and typeInfo for each type as we walk the tree. 1936 1937 vt := v.Type() 1938 rtid := rt2id(vt) 1939 if tinfos == nil { 1940 tinfos = defTypeInfos 1941 } 1942 ti := tinfos.get(rtid, vt) 1943 if ti.rtid == timeTypId { 1944 return rv2i(v).(time.Time).IsZero() 1945 } 1946 if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() { 1947 return rv2i(v.Addr()).(isZeroer).IsZero() 1948 } 1949 if ti.isFlag(typeInfoFlagIsZeroer) { 1950 return rv2i(v).(isZeroer).IsZero() 1951 } 1952 if ti.isFlag(typeInfoFlagComparable) { 1953 return rv2i(v) == rv2i(reflect.Zero(vt)) 1954 } 1955 if !checkStruct { 1956 return false 1957 } 1958 // We only care about what we can encode/decode, 1959 // so that is what we use to check omitEmpty. 1960 for _, si := range ti.sfiSrc { 1961 sfv, valid := si.field(v, false) 1962 if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) { 1963 return false 1964 } 1965 } 1966 return true 1967} 1968 1969// func roundFloat(x float64) float64 { 1970// t := math.Trunc(x) 1971// if math.Abs(x-t) >= 0.5 { 1972// return t + math.Copysign(1, x) 1973// } 1974// return t 1975// } 1976 1977func panicToErr(h errDecorator, err *error) { 1978 // Note: This method MUST be called directly from defer i.e. defer panicToErr ... 1979 // else it seems the recover is not fully handled 1980 if recoverPanicToErr { 1981 if x := recover(); x != nil { 1982 // fmt.Printf("panic'ing with: %v\n", x) 1983 // debug.PrintStack() 1984 panicValToErr(h, x, err) 1985 } 1986 } 1987} 1988 1989func panicValToErr(h errDecorator, v interface{}, err *error) { 1990 switch xerr := v.(type) { 1991 case nil: 1992 case error: 1993 switch xerr { 1994 case nil: 1995 case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized: 1996 // treat as special (bubble up) 1997 *err = xerr 1998 default: 1999 h.wrapErr(xerr, err) 2000 } 2001 case string: 2002 if xerr != "" { 2003 h.wrapErr(xerr, err) 2004 } 2005 case fmt.Stringer: 2006 if xerr != nil { 2007 h.wrapErr(xerr, err) 2008 } 2009 default: 2010 h.wrapErr(v, err) 2011 } 2012} 2013 2014func isImmutableKind(k reflect.Kind) (v bool) { 2015 // return immutableKindsSet[k] 2016 // since we know reflect.Kind is in range 0..31, then use the k%32 == k constraint 2017 return immutableKindsSet[k%reflect.Kind(len(immutableKindsSet))] // bounds-check-elimination 2018} 2019 2020// ---- 2021 2022type codecFnInfo struct { 2023 ti *typeInfo 2024 xfFn Ext 2025 xfTag uint64 2026 seq seqType 2027 addrD bool 2028 addrF bool // if addrD, this says whether decode function can take a value or a ptr 2029 addrE bool 2030} 2031 2032// codecFn encapsulates the captured variables and the encode function. 2033// This way, we only do some calculations one times, and pass to the 2034// code block that should be called (encapsulated in a function) 2035// instead of executing the checks every time. 2036type codecFn struct { 2037 i codecFnInfo 2038 fe func(*Encoder, *codecFnInfo, reflect.Value) 2039 fd func(*Decoder, *codecFnInfo, reflect.Value) 2040 _ [1]uint64 // padding 2041} 2042 2043type codecRtidFn struct { 2044 rtid uintptr 2045 fn *codecFn 2046} 2047 2048// ---- 2049 2050// these "checkOverflow" functions must be inlinable, and not call anybody. 2051// Overflow means that the value cannot be represented without wrapping/overflow. 2052// Overflow=false does not mean that the value can be represented without losing precision 2053// (especially for floating point). 2054 2055type checkOverflow struct{} 2056 2057// func (checkOverflow) Float16(f float64) (overflow bool) { 2058// panicv.errorf("unimplemented") 2059// if f < 0 { 2060// f = -f 2061// } 2062// return math.MaxFloat32 < f && f <= math.MaxFloat64 2063// } 2064 2065func (checkOverflow) Float32(v float64) (overflow bool) { 2066 if v < 0 { 2067 v = -v 2068 } 2069 return math.MaxFloat32 < v && v <= math.MaxFloat64 2070} 2071func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { 2072 if bitsize == 0 || bitsize >= 64 || v == 0 { 2073 return 2074 } 2075 if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { 2076 overflow = true 2077 } 2078 return 2079} 2080func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { 2081 if bitsize == 0 || bitsize >= 64 || v == 0 { 2082 return 2083 } 2084 if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { 2085 overflow = true 2086 } 2087 return 2088} 2089func (checkOverflow) SignedInt(v uint64) (overflow bool) { 2090 //e.g. -127 to 128 for int8 2091 pos := (v >> 63) == 0 2092 ui2 := v & 0x7fffffffffffffff 2093 if pos { 2094 if ui2 > math.MaxInt64 { 2095 overflow = true 2096 } 2097 } else { 2098 if ui2 > math.MaxInt64-1 { 2099 overflow = true 2100 } 2101 } 2102 return 2103} 2104 2105func (x checkOverflow) Float32V(v float64) float64 { 2106 if x.Float32(v) { 2107 panicv.errorf("float32 overflow: %v", v) 2108 } 2109 return v 2110} 2111func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 { 2112 if x.Uint(v, bitsize) { 2113 panicv.errorf("uint64 overflow: %v", v) 2114 } 2115 return v 2116} 2117func (x checkOverflow) IntV(v int64, bitsize uint8) int64 { 2118 if x.Int(v, bitsize) { 2119 panicv.errorf("int64 overflow: %v", v) 2120 } 2121 return v 2122} 2123func (x checkOverflow) SignedIntV(v uint64) int64 { 2124 if x.SignedInt(v) { 2125 panicv.errorf("uint64 to int64 overflow: %v", v) 2126 } 2127 return int64(v) 2128} 2129 2130// ------------------ SORT ----------------- 2131 2132func isNaN(f float64) bool { return f != f } 2133 2134// ----------------------- 2135 2136type ioFlusher interface { 2137 Flush() error 2138} 2139 2140type ioPeeker interface { 2141 Peek(int) ([]byte, error) 2142} 2143 2144type ioBuffered interface { 2145 Buffered() int 2146} 2147 2148// ----------------------- 2149 2150type intSlice []int64 2151type uintSlice []uint64 2152 2153// type uintptrSlice []uintptr 2154type floatSlice []float64 2155type boolSlice []bool 2156type stringSlice []string 2157 2158// type bytesSlice [][]byte 2159 2160func (p intSlice) Len() int { return len(p) } 2161func (p intSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } 2162func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2163 2164func (p uintSlice) Len() int { return len(p) } 2165func (p uintSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } 2166func (p uintSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2167 2168// func (p uintptrSlice) Len() int { return len(p) } 2169// func (p uintptrSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } 2170// func (p uintptrSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2171 2172func (p floatSlice) Len() int { return len(p) } 2173func (p floatSlice) Less(i, j int) bool { 2174 return p[uint(i)] < p[uint(j)] || isNaN(p[uint(i)]) && !isNaN(p[uint(j)]) 2175} 2176func (p floatSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2177 2178func (p stringSlice) Len() int { return len(p) } 2179func (p stringSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } 2180func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2181 2182// func (p bytesSlice) Len() int { return len(p) } 2183// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)], p[uint(j)]) == -1 } 2184// func (p bytesSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2185 2186func (p boolSlice) Len() int { return len(p) } 2187func (p boolSlice) Less(i, j int) bool { return !p[uint(i)] && p[uint(j)] } 2188func (p boolSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2189 2190// --------------------- 2191 2192type sfiRv struct { 2193 v *structFieldInfo 2194 r reflect.Value 2195} 2196 2197type intRv struct { 2198 v int64 2199 r reflect.Value 2200} 2201type intRvSlice []intRv 2202type uintRv struct { 2203 v uint64 2204 r reflect.Value 2205} 2206type uintRvSlice []uintRv 2207type floatRv struct { 2208 v float64 2209 r reflect.Value 2210} 2211type floatRvSlice []floatRv 2212type boolRv struct { 2213 v bool 2214 r reflect.Value 2215} 2216type boolRvSlice []boolRv 2217type stringRv struct { 2218 v string 2219 r reflect.Value 2220} 2221type stringRvSlice []stringRv 2222type bytesRv struct { 2223 v []byte 2224 r reflect.Value 2225} 2226type bytesRvSlice []bytesRv 2227type timeRv struct { 2228 v time.Time 2229 r reflect.Value 2230} 2231type timeRvSlice []timeRv 2232 2233func (p intRvSlice) Len() int { return len(p) } 2234func (p intRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } 2235func (p intRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2236 2237func (p uintRvSlice) Len() int { return len(p) } 2238func (p uintRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } 2239func (p uintRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2240 2241func (p floatRvSlice) Len() int { return len(p) } 2242func (p floatRvSlice) Less(i, j int) bool { 2243 return p[uint(i)].v < p[uint(j)].v || isNaN(p[uint(i)].v) && !isNaN(p[uint(j)].v) 2244} 2245func (p floatRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2246 2247func (p stringRvSlice) Len() int { return len(p) } 2248func (p stringRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } 2249func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2250 2251func (p bytesRvSlice) Len() int { return len(p) } 2252func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } 2253func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2254 2255func (p boolRvSlice) Len() int { return len(p) } 2256func (p boolRvSlice) Less(i, j int) bool { return !p[uint(i)].v && p[uint(j)].v } 2257func (p boolRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2258 2259func (p timeRvSlice) Len() int { return len(p) } 2260func (p timeRvSlice) Less(i, j int) bool { return p[uint(i)].v.Before(p[uint(j)].v) } 2261func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2262 2263// ----------------- 2264 2265type bytesI struct { 2266 v []byte 2267 i interface{} 2268} 2269 2270type bytesISlice []bytesI 2271 2272func (p bytesISlice) Len() int { return len(p) } 2273func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } 2274func (p bytesISlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } 2275 2276// ----------------- 2277 2278type set []uintptr 2279 2280func (s *set) add(v uintptr) (exists bool) { 2281 // e.ci is always nil, or len >= 1 2282 x := *s 2283 if x == nil { 2284 x = make([]uintptr, 1, 8) 2285 x[0] = v 2286 *s = x 2287 return 2288 } 2289 // typically, length will be 1. make this perform. 2290 if len(x) == 1 { 2291 if j := x[0]; j == 0 { 2292 x[0] = v 2293 } else if j == v { 2294 exists = true 2295 } else { 2296 x = append(x, v) 2297 *s = x 2298 } 2299 return 2300 } 2301 // check if it exists 2302 for _, j := range x { 2303 if j == v { 2304 exists = true 2305 return 2306 } 2307 } 2308 // try to replace a "deleted" slot 2309 for i, j := range x { 2310 if j == 0 { 2311 x[i] = v 2312 return 2313 } 2314 } 2315 // if unable to replace deleted slot, just append it. 2316 x = append(x, v) 2317 *s = x 2318 return 2319} 2320 2321func (s *set) remove(v uintptr) (exists bool) { 2322 x := *s 2323 if len(x) == 0 { 2324 return 2325 } 2326 if len(x) == 1 { 2327 if x[0] == v { 2328 x[0] = 0 2329 } 2330 return 2331 } 2332 for i, j := range x { 2333 if j == v { 2334 exists = true 2335 x[i] = 0 // set it to 0, as way to delete it. 2336 // copy(x[i:], x[i+1:]) 2337 // x = x[:len(x)-1] 2338 return 2339 } 2340 } 2341 return 2342} 2343 2344// ------ 2345 2346// bitset types are better than [256]bool, because they permit the whole 2347// bitset array being on a single cache line and use less memory. 2348// 2349// Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap). 2350// 2351// We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces 2352// bounds checking, so we discarded them, and everyone uses bitset256. 2353// 2354// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). 2355// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 2356 2357type bitset256 [32]byte 2358 2359func (x *bitset256) isset(pos byte) bool { 2360 return x[pos>>3]&(1<<(pos&7)) != 0 2361} 2362 2363// func (x *bitset256) issetv(pos byte) byte { 2364// return x[pos>>3] & (1 << (pos & 7)) 2365// } 2366 2367func (x *bitset256) set(pos byte) { 2368 x[pos>>3] |= (1 << (pos & 7)) 2369} 2370 2371// func (x *bitset256) unset(pos byte) { 2372// x[pos>>3] &^= (1 << (pos & 7)) 2373// } 2374 2375// type bit2set256 [64]byte 2376 2377// func (x *bit2set256) set(pos byte, v1, v2 bool) { 2378// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 2379// if v1 { 2380// x[pos>>2] |= 1 << (pos2 + 1) 2381// } 2382// if v2 { 2383// x[pos>>2] |= 1 << pos2 2384// } 2385// } 2386// func (x *bit2set256) get(pos byte) uint8 { 2387// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 2388// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011 2389// } 2390 2391// ------------ 2392 2393type pooler struct { 2394 // function-scoped pooled resources 2395 tiload sync.Pool // for type info loading 2396 sfiRv8, sfiRv16, sfiRv32, sfiRv64, sfiRv128 sync.Pool // for struct encoding 2397 2398 // lifetime-scoped pooled resources 2399 // dn sync.Pool // for decNaked 2400 buf1k, buf2k, buf4k, buf8k, buf16k, buf32k, buf64k sync.Pool // for [N]byte 2401} 2402 2403func (p *pooler) init() { 2404 p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } 2405 2406 p.sfiRv8.New = func() interface{} { return new([8]sfiRv) } 2407 p.sfiRv16.New = func() interface{} { return new([16]sfiRv) } 2408 p.sfiRv32.New = func() interface{} { return new([32]sfiRv) } 2409 p.sfiRv64.New = func() interface{} { return new([64]sfiRv) } 2410 p.sfiRv128.New = func() interface{} { return new([128]sfiRv) } 2411 2412 // p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } 2413 2414 p.buf1k.New = func() interface{} { return new([1 * 1024]byte) } 2415 p.buf2k.New = func() interface{} { return new([2 * 1024]byte) } 2416 p.buf4k.New = func() interface{} { return new([4 * 1024]byte) } 2417 p.buf8k.New = func() interface{} { return new([8 * 1024]byte) } 2418 p.buf16k.New = func() interface{} { return new([16 * 1024]byte) } 2419 p.buf32k.New = func() interface{} { return new([32 * 1024]byte) } 2420 p.buf64k.New = func() interface{} { return new([64 * 1024]byte) } 2421 2422} 2423 2424// func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) { 2425// return &p.strRv8, p.strRv8.Get() 2426// } 2427// func (p *pooler) sfiRv16() (sp *sync.Pool, v interface{}) { 2428// return &p.strRv16, p.strRv16.Get() 2429// } 2430// func (p *pooler) sfiRv32() (sp *sync.Pool, v interface{}) { 2431// return &p.strRv32, p.strRv32.Get() 2432// } 2433// func (p *pooler) sfiRv64() (sp *sync.Pool, v interface{}) { 2434// return &p.strRv64, p.strRv64.Get() 2435// } 2436// func (p *pooler) sfiRv128() (sp *sync.Pool, v interface{}) { 2437// return &p.strRv128, p.strRv128.Get() 2438// } 2439 2440// func (p *pooler) bytes1k() (sp *sync.Pool, v interface{}) { 2441// return &p.buf1k, p.buf1k.Get() 2442// } 2443// func (p *pooler) bytes2k() (sp *sync.Pool, v interface{}) { 2444// return &p.buf2k, p.buf2k.Get() 2445// } 2446// func (p *pooler) bytes4k() (sp *sync.Pool, v interface{}) { 2447// return &p.buf4k, p.buf4k.Get() 2448// } 2449// func (p *pooler) bytes8k() (sp *sync.Pool, v interface{}) { 2450// return &p.buf8k, p.buf8k.Get() 2451// } 2452// func (p *pooler) bytes16k() (sp *sync.Pool, v interface{}) { 2453// return &p.buf16k, p.buf16k.Get() 2454// } 2455// func (p *pooler) bytes32k() (sp *sync.Pool, v interface{}) { 2456// return &p.buf32k, p.buf32k.Get() 2457// } 2458// func (p *pooler) bytes64k() (sp *sync.Pool, v interface{}) { 2459// return &p.buf64k, p.buf64k.Get() 2460// } 2461 2462// func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { 2463// return &p.tiload, p.tiload.Get() 2464// } 2465 2466// func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { 2467// return &p.dn, p.dn.Get() 2468// } 2469 2470// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) { 2471// sp := &(p.dn) 2472// vv := sp.Get() 2473// return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) } 2474// } 2475// func (p *pooler) decNakedGet() (v interface{}) { 2476// return p.dn.Get() 2477// } 2478// func (p *pooler) tiLoadGet() (v interface{}) { 2479// return p.tiload.Get() 2480// } 2481// func (p *pooler) decNakedPut(v interface{}) { 2482// p.dn.Put(v) 2483// } 2484// func (p *pooler) tiLoadPut(v interface{}) { 2485// p.tiload.Put(v) 2486// } 2487 2488// ---------------------------------------------------- 2489 2490type panicHdl struct{} 2491 2492func (panicHdl) errorv(err error) { 2493 if err != nil { 2494 panic(err) 2495 } 2496} 2497 2498func (panicHdl) errorstr(message string) { 2499 if message != "" { 2500 panic(message) 2501 } 2502} 2503 2504func (panicHdl) errorf(format string, params ...interface{}) { 2505 if format == "" { 2506 } else if len(params) == 0 { 2507 panic(format) 2508 } else { 2509 panic(fmt.Sprintf(format, params...)) 2510 } 2511} 2512 2513// ---------------------------------------------------- 2514 2515type errDecorator interface { 2516 wrapErr(in interface{}, out *error) 2517} 2518 2519type errDecoratorDef struct{} 2520 2521func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) } 2522 2523// ---------------------------------------------------- 2524 2525type must struct{} 2526 2527func (must) String(s string, err error) string { 2528 if err != nil { 2529 panicv.errorv(err) 2530 } 2531 return s 2532} 2533func (must) Int(s int64, err error) int64 { 2534 if err != nil { 2535 panicv.errorv(err) 2536 } 2537 return s 2538} 2539func (must) Uint(s uint64, err error) uint64 { 2540 if err != nil { 2541 panicv.errorv(err) 2542 } 2543 return s 2544} 2545func (must) Float(s float64, err error) float64 { 2546 if err != nil { 2547 panicv.errorv(err) 2548 } 2549 return s 2550} 2551 2552// ------------------- 2553 2554type bytesBufPooler struct { 2555 pool *sync.Pool 2556 poolbuf interface{} 2557} 2558 2559func (z *bytesBufPooler) end() { 2560 if z.pool != nil { 2561 z.pool.Put(z.poolbuf) 2562 z.pool, z.poolbuf = nil, nil 2563 } 2564} 2565 2566func (z *bytesBufPooler) get(bufsize int) (buf []byte) { 2567 // ensure an end is called first (if necessary) 2568 if z.pool != nil { 2569 z.pool.Put(z.poolbuf) 2570 z.pool, z.poolbuf = nil, nil 2571 } 2572 2573 // // Try to use binary search. 2574 // // This is not optimal, as most folks select 1k or 2k buffers 2575 // // so a linear search is better (sequence of if/else blocks) 2576 // if bufsize < 1 { 2577 // bufsize = 0 2578 // } else { 2579 // bufsize-- 2580 // bufsize /= 1024 2581 // } 2582 // switch bufsize { 2583 // case 0: 2584 // z.pool, z.poolbuf = pool.bytes1k() 2585 // buf = z.poolbuf.(*[1 * 1024]byte)[:] 2586 // case 1: 2587 // z.pool, z.poolbuf = pool.bytes2k() 2588 // buf = z.poolbuf.(*[2 * 1024]byte)[:] 2589 // case 2, 3: 2590 // z.pool, z.poolbuf = pool.bytes4k() 2591 // buf = z.poolbuf.(*[4 * 1024]byte)[:] 2592 // case 4, 5, 6, 7: 2593 // z.pool, z.poolbuf = pool.bytes8k() 2594 // buf = z.poolbuf.(*[8 * 1024]byte)[:] 2595 // case 8, 9, 10, 11, 12, 13, 14, 15: 2596 // z.pool, z.poolbuf = pool.bytes16k() 2597 // buf = z.poolbuf.(*[16 * 1024]byte)[:] 2598 // case 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31: 2599 // z.pool, z.poolbuf = pool.bytes32k() 2600 // buf = z.poolbuf.(*[32 * 1024]byte)[:] 2601 // default: 2602 // z.pool, z.poolbuf = pool.bytes64k() 2603 // buf = z.poolbuf.(*[64 * 1024]byte)[:] 2604 // } 2605 // return 2606 2607 if bufsize <= 1*1024 { 2608 z.pool, z.poolbuf = &pool.buf1k, pool.buf1k.Get() // pool.bytes1k() 2609 buf = z.poolbuf.(*[1 * 1024]byte)[:] 2610 } else if bufsize <= 2*1024 { 2611 z.pool, z.poolbuf = &pool.buf2k, pool.buf2k.Get() // pool.bytes2k() 2612 buf = z.poolbuf.(*[2 * 1024]byte)[:] 2613 } else if bufsize <= 4*1024 { 2614 z.pool, z.poolbuf = &pool.buf4k, pool.buf4k.Get() // pool.bytes4k() 2615 buf = z.poolbuf.(*[4 * 1024]byte)[:] 2616 } else if bufsize <= 8*1024 { 2617 z.pool, z.poolbuf = &pool.buf8k, pool.buf8k.Get() // pool.bytes8k() 2618 buf = z.poolbuf.(*[8 * 1024]byte)[:] 2619 } else if bufsize <= 16*1024 { 2620 z.pool, z.poolbuf = &pool.buf16k, pool.buf16k.Get() // pool.bytes16k() 2621 buf = z.poolbuf.(*[16 * 1024]byte)[:] 2622 } else if bufsize <= 32*1024 { 2623 z.pool, z.poolbuf = &pool.buf32k, pool.buf32k.Get() // pool.bytes32k() 2624 buf = z.poolbuf.(*[32 * 1024]byte)[:] 2625 } else { 2626 z.pool, z.poolbuf = &pool.buf64k, pool.buf64k.Get() // pool.bytes64k() 2627 buf = z.poolbuf.(*[64 * 1024]byte)[:] 2628 } 2629 return 2630} 2631 2632// ---------------- 2633 2634type sfiRvPooler struct { 2635 pool *sync.Pool 2636 poolv interface{} 2637} 2638 2639func (z *sfiRvPooler) end() { 2640 if z.pool != nil { 2641 z.pool.Put(z.poolv) 2642 z.pool, z.poolv = nil, nil 2643 } 2644} 2645 2646func (z *sfiRvPooler) get(newlen int) (fkvs []sfiRv) { 2647 if newlen < 0 { // bounds-check-elimination 2648 // cannot happen // here for bounds-check-elimination 2649 } else if newlen <= 8 { 2650 z.pool, z.poolv = &pool.sfiRv8, pool.sfiRv8.Get() // pool.sfiRv8() 2651 fkvs = z.poolv.(*[8]sfiRv)[:newlen] 2652 } else if newlen <= 16 { 2653 z.pool, z.poolv = &pool.sfiRv16, pool.sfiRv16.Get() // pool.sfiRv16() 2654 fkvs = z.poolv.(*[16]sfiRv)[:newlen] 2655 } else if newlen <= 32 { 2656 z.pool, z.poolv = &pool.sfiRv32, pool.sfiRv32.Get() // pool.sfiRv32() 2657 fkvs = z.poolv.(*[32]sfiRv)[:newlen] 2658 } else if newlen <= 64 { 2659 z.pool, z.poolv = &pool.sfiRv64, pool.sfiRv64.Get() // pool.sfiRv64() 2660 fkvs = z.poolv.(*[64]sfiRv)[:newlen] 2661 } else if newlen <= 128 { 2662 z.pool, z.poolv = &pool.sfiRv128, pool.sfiRv128.Get() // pool.sfiRv128() 2663 fkvs = z.poolv.(*[128]sfiRv)[:newlen] 2664 } else { 2665 fkvs = make([]sfiRv, newlen) 2666 } 2667 return 2668} 2669 2670// xdebugf printf. the message in red on the terminal. 2671// Use it in place of fmt.Printf (which it calls internally) 2672func xdebugf(pattern string, args ...interface{}) { 2673 var delim string 2674 if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' { 2675 delim = "\n" 2676 } 2677 fmt.Printf("\033[1;31m"+pattern+delim+"\033[0m", args...) 2678} 2679 2680// func isImmutableKind(k reflect.Kind) (v bool) { 2681// return false || 2682// k == reflect.Int || 2683// k == reflect.Int8 || 2684// k == reflect.Int16 || 2685// k == reflect.Int32 || 2686// k == reflect.Int64 || 2687// k == reflect.Uint || 2688// k == reflect.Uint8 || 2689// k == reflect.Uint16 || 2690// k == reflect.Uint32 || 2691// k == reflect.Uint64 || 2692// k == reflect.Uintptr || 2693// k == reflect.Float32 || 2694// k == reflect.Float64 || 2695// k == reflect.Bool || 2696// k == reflect.String 2697// } 2698 2699// func timeLocUTCName(tzint int16) string { 2700// if tzint == 0 { 2701// return "UTC" 2702// } 2703// var tzname = []byte("UTC+00:00") 2704// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf.. inline below. 2705// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first 2706// var tzhr, tzmin int16 2707// if tzint < 0 { 2708// tzname[3] = '-' // (TODO: verify. this works here) 2709// tzhr, tzmin = -tzint/60, (-tzint)%60 2710// } else { 2711// tzhr, tzmin = tzint/60, tzint%60 2712// } 2713// tzname[4] = timeDigits[tzhr/10] 2714// tzname[5] = timeDigits[tzhr%10] 2715// tzname[7] = timeDigits[tzmin/10] 2716// tzname[8] = timeDigits[tzmin%10] 2717// return string(tzname) 2718// //return time.FixedZone(string(tzname), int(tzint)*60) 2719// } 2720