1// Copyright 2018 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5package runtime 6 7import ( 8 "runtime/internal/sys" 9 "unsafe" 10) 11 12// For gccgo, use go:linkname to export compiler-called functions. 13// 14//go:linkname mapaccess1_faststr 15//go:linkname mapaccess2_faststr 16//go:linkname mapassign_faststr 17//go:linkname mapdelete_faststr 18 19func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { 20 if raceenabled && h != nil { 21 callerpc := getcallerpc() 22 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) 23 } 24 if h == nil || h.count == 0 { 25 return unsafe.Pointer(&zeroVal[0]) 26 } 27 if h.flags&hashWriting != 0 { 28 throw("concurrent map read and map write") 29 } 30 key := stringStructOf(&ky) 31 if h.B == 0 { 32 // One-bucket table. 33 b := (*bmap)(h.buckets) 34 if key.len < 32 { 35 // short key, doing lots of comparisons is ok 36 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 37 k := (*stringStruct)(kptr) 38 if k.len != key.len || isEmpty(b.tophash[i]) { 39 if b.tophash[i] == emptyRest { 40 break 41 } 42 continue 43 } 44 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 45 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)) 46 } 47 } 48 return unsafe.Pointer(&zeroVal[0]) 49 } 50 // long key, try not to do more comparisons than necessary 51 keymaybe := uintptr(bucketCnt) 52 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 53 k := (*stringStruct)(kptr) 54 if k.len != key.len || isEmpty(b.tophash[i]) { 55 if b.tophash[i] == emptyRest { 56 break 57 } 58 continue 59 } 60 if k.str == key.str { 61 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)) 62 } 63 // check first 4 bytes 64 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 65 continue 66 } 67 // check last 4 bytes 68 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 69 continue 70 } 71 if keymaybe != bucketCnt { 72 // Two keys are potential matches. Use hash to distinguish them. 73 goto dohash 74 } 75 keymaybe = i 76 } 77 if keymaybe != bucketCnt { 78 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize)) 79 if memequal(k.str, key.str, uintptr(key.len)) { 80 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)) 81 } 82 } 83 return unsafe.Pointer(&zeroVal[0]) 84 } 85dohash: 86 hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 87 m := bucketMask(h.B) 88 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 89 if c := h.oldbuckets; c != nil { 90 if !h.sameSizeGrow() { 91 // There used to be half as many buckets; mask down one more power of two. 92 m >>= 1 93 } 94 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 95 if !evacuated(oldb) { 96 b = oldb 97 } 98 } 99 top := tophash(hash) 100 for ; b != nil; b = b.overflow(t) { 101 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 102 k := (*stringStruct)(kptr) 103 if k.len != key.len || b.tophash[i] != top { 104 continue 105 } 106 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 107 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)) 108 } 109 } 110 } 111 return unsafe.Pointer(&zeroVal[0]) 112} 113 114func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { 115 if raceenabled && h != nil { 116 callerpc := getcallerpc() 117 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) 118 } 119 if h == nil || h.count == 0 { 120 return unsafe.Pointer(&zeroVal[0]), false 121 } 122 if h.flags&hashWriting != 0 { 123 throw("concurrent map read and map write") 124 } 125 key := stringStructOf(&ky) 126 if h.B == 0 { 127 // One-bucket table. 128 b := (*bmap)(h.buckets) 129 if key.len < 32 { 130 // short key, doing lots of comparisons is ok 131 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 132 k := (*stringStruct)(kptr) 133 if k.len != key.len || isEmpty(b.tophash[i]) { 134 if b.tophash[i] == emptyRest { 135 break 136 } 137 continue 138 } 139 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 140 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true 141 } 142 } 143 return unsafe.Pointer(&zeroVal[0]), false 144 } 145 // long key, try not to do more comparisons than necessary 146 keymaybe := uintptr(bucketCnt) 147 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 148 k := (*stringStruct)(kptr) 149 if k.len != key.len || isEmpty(b.tophash[i]) { 150 if b.tophash[i] == emptyRest { 151 break 152 } 153 continue 154 } 155 if k.str == key.str { 156 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true 157 } 158 // check first 4 bytes 159 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 160 continue 161 } 162 // check last 4 bytes 163 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 164 continue 165 } 166 if keymaybe != bucketCnt { 167 // Two keys are potential matches. Use hash to distinguish them. 168 goto dohash 169 } 170 keymaybe = i 171 } 172 if keymaybe != bucketCnt { 173 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize)) 174 if memequal(k.str, key.str, uintptr(key.len)) { 175 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true 176 } 177 } 178 return unsafe.Pointer(&zeroVal[0]), false 179 } 180dohash: 181 hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 182 m := bucketMask(h.B) 183 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 184 if c := h.oldbuckets; c != nil { 185 if !h.sameSizeGrow() { 186 // There used to be half as many buckets; mask down one more power of two. 187 m >>= 1 188 } 189 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 190 if !evacuated(oldb) { 191 b = oldb 192 } 193 } 194 top := tophash(hash) 195 for ; b != nil; b = b.overflow(t) { 196 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 197 k := (*stringStruct)(kptr) 198 if k.len != key.len || b.tophash[i] != top { 199 continue 200 } 201 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 202 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true 203 } 204 } 205 } 206 return unsafe.Pointer(&zeroVal[0]), false 207} 208 209func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { 210 if h == nil { 211 panic(plainError("assignment to entry in nil map")) 212 } 213 if raceenabled { 214 callerpc := getcallerpc() 215 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr)) 216 } 217 if h.flags&hashWriting != 0 { 218 throw("concurrent map writes") 219 } 220 key := stringStructOf(&s) 221 hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) 222 223 // Set hashWriting after calling t.hasher for consistency with mapassign. 224 h.flags ^= hashWriting 225 226 if h.buckets == nil { 227 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 228 } 229 230again: 231 bucket := hash & bucketMask(h.B) 232 if h.growing() { 233 growWork_faststr(t, h, bucket) 234 } 235 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 236 top := tophash(hash) 237 238 var insertb *bmap 239 var inserti uintptr 240 var insertk unsafe.Pointer 241 242bucketloop: 243 for { 244 for i := uintptr(0); i < bucketCnt; i++ { 245 if b.tophash[i] != top { 246 if isEmpty(b.tophash[i]) && insertb == nil { 247 insertb = b 248 inserti = i 249 } 250 if b.tophash[i] == emptyRest { 251 break bucketloop 252 } 253 continue 254 } 255 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 256 if k.len != key.len { 257 continue 258 } 259 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { 260 continue 261 } 262 // already have a mapping for key. Update it. 263 inserti = i 264 insertb = b 265 goto done 266 } 267 ovf := b.overflow(t) 268 if ovf == nil { 269 break 270 } 271 b = ovf 272 } 273 274 // Did not find mapping for key. Allocate new cell & add entry. 275 276 // If we hit the max load factor or we have too many overflow buckets, 277 // and we're not already in the middle of growing, start growing. 278 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 279 hashGrow(t, h) 280 goto again // Growing the table invalidates everything, so try again 281 } 282 283 if insertb == nil { 284 // all current buckets are full, allocate a new one. 285 insertb = h.newoverflow(t, b) 286 inserti = 0 // not necessary, but avoids needlessly spilling inserti 287 } 288 insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks 289 290 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize) 291 // store new key at insert position 292 *((*stringStruct)(insertk)) = *key 293 h.count++ 294 295done: 296 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize)) 297 if h.flags&hashWriting == 0 { 298 throw("concurrent map writes") 299 } 300 h.flags &^= hashWriting 301 return elem 302} 303 304func mapdelete_faststr(t *maptype, h *hmap, ky string) { 305 if raceenabled && h != nil { 306 callerpc := getcallerpc() 307 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr)) 308 } 309 if h == nil || h.count == 0 { 310 return 311 } 312 if h.flags&hashWriting != 0 { 313 throw("concurrent map writes") 314 } 315 316 key := stringStructOf(&ky) 317 hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 318 319 // Set hashWriting after calling t.hasher for consistency with mapdelete 320 h.flags ^= hashWriting 321 322 bucket := hash & bucketMask(h.B) 323 if h.growing() { 324 growWork_faststr(t, h, bucket) 325 } 326 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 327 bOrig := b 328 top := tophash(hash) 329search: 330 for ; b != nil; b = b.overflow(t) { 331 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 332 k := (*stringStruct)(kptr) 333 if k.len != key.len || b.tophash[i] != top { 334 continue 335 } 336 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { 337 continue 338 } 339 // Clear key's pointer. 340 k.str = nil 341 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)) 342 if t.elem.ptrdata != 0 { 343 memclrHasPointers(e, t.elem.size) 344 } else { 345 memclrNoHeapPointers(e, t.elem.size) 346 } 347 b.tophash[i] = emptyOne 348 // If the bucket now ends in a bunch of emptyOne states, 349 // change those to emptyRest states. 350 if i == bucketCnt-1 { 351 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { 352 goto notLast 353 } 354 } else { 355 if b.tophash[i+1] != emptyRest { 356 goto notLast 357 } 358 } 359 for { 360 b.tophash[i] = emptyRest 361 if i == 0 { 362 if b == bOrig { 363 break // beginning of initial bucket, we're done. 364 } 365 // Find previous bucket, continue at its last entry. 366 c := b 367 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { 368 } 369 i = bucketCnt - 1 370 } else { 371 i-- 372 } 373 if b.tophash[i] != emptyOne { 374 break 375 } 376 } 377 notLast: 378 h.count-- 379 break search 380 } 381 } 382 383 if h.flags&hashWriting == 0 { 384 throw("concurrent map writes") 385 } 386 h.flags &^= hashWriting 387} 388 389func growWork_faststr(t *maptype, h *hmap, bucket uintptr) { 390 // make sure we evacuate the oldbucket corresponding 391 // to the bucket we're about to use 392 evacuate_faststr(t, h, bucket&h.oldbucketmask()) 393 394 // evacuate one more oldbucket to make progress on growing 395 if h.growing() { 396 evacuate_faststr(t, h, h.nevacuate) 397 } 398} 399 400func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { 401 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 402 newbit := h.noldbuckets() 403 if !evacuated(b) { 404 // TODO: reuse overflow buckets instead of using new ones, if there 405 // is no iterator using the old buckets. (If !oldIterator.) 406 407 // xy contains the x and y (low and high) evacuation destinations. 408 var xy [2]evacDst 409 x := &xy[0] 410 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 411 x.k = add(unsafe.Pointer(x.b), dataOffset) 412 x.e = add(x.k, bucketCnt*2*sys.PtrSize) 413 414 if !h.sameSizeGrow() { 415 // Only calculate y pointers if we're growing bigger. 416 // Otherwise GC can see bad pointers. 417 y := &xy[1] 418 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 419 y.k = add(unsafe.Pointer(y.b), dataOffset) 420 y.e = add(y.k, bucketCnt*2*sys.PtrSize) 421 } 422 423 for ; b != nil; b = b.overflow(t) { 424 k := add(unsafe.Pointer(b), dataOffset) 425 e := add(k, bucketCnt*2*sys.PtrSize) 426 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) { 427 top := b.tophash[i] 428 if isEmpty(top) { 429 b.tophash[i] = evacuatedEmpty 430 continue 431 } 432 if top < minTopHash { 433 throw("bad map state") 434 } 435 var useY uint8 436 if !h.sameSizeGrow() { 437 // Compute hash to make our evacuation decision (whether we need 438 // to send this key/elem to bucket x or bucket y). 439 hash := t.hasher(k, uintptr(h.hash0)) 440 if hash&newbit != 0 { 441 useY = 1 442 } 443 } 444 445 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap 446 dst := &xy[useY] // evacuation destination 447 448 if dst.i == bucketCnt { 449 dst.b = h.newoverflow(t, dst.b) 450 dst.i = 0 451 dst.k = add(unsafe.Pointer(dst.b), dataOffset) 452 dst.e = add(dst.k, bucketCnt*2*sys.PtrSize) 453 } 454 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check 455 456 // Copy key. 457 *(*string)(dst.k) = *(*string)(k) 458 459 typedmemmove(t.elem, dst.e, e) 460 dst.i++ 461 // These updates might push these pointers past the end of the 462 // key or elem arrays. That's ok, as we have the overflow pointer 463 // at the end of the bucket to protect against pointing past the 464 // end of the bucket. 465 dst.k = add(dst.k, 2*sys.PtrSize) 466 dst.e = add(dst.e, uintptr(t.elemsize)) 467 } 468 } 469 // Unlink the overflow buckets & clear key/elem to help GC. 470 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 { 471 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) 472 // Preserve b.tophash because the evacuation 473 // state is maintained there. 474 ptr := add(b, dataOffset) 475 n := uintptr(t.bucketsize) - dataOffset 476 memclrHasPointers(ptr, n) 477 } 478 } 479 480 if oldbucket == h.nevacuate { 481 advanceEvacuationMark(h, t, newbit) 482 } 483} 484