1// Copyright 2018 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5package runtime 6 7import ( 8 "runtime/internal/sys" 9 "unsafe" 10) 11 12// For gccgo, use go:linkname to export compiler-called functions. 13// 14//go:linkname mapaccess1_fast64 15//go:linkname mapaccess2_fast64 16//go:linkname mapassign_fast64 17//go:linkname mapassign_fast64ptr 18//go:linkname mapdelete_fast64 19 20func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { 21 if raceenabled && h != nil { 22 callerpc := getcallerpc() 23 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) 24 } 25 if h == nil || h.count == 0 { 26 return unsafe.Pointer(&zeroVal[0]) 27 } 28 if h.flags&hashWriting != 0 { 29 throw("concurrent map read and map write") 30 } 31 var b *bmap 32 if h.B == 0 { 33 // One-bucket table. No need to hash. 34 b = (*bmap)(h.buckets) 35 } else { 36 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 37 m := bucketMask(h.B) 38 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 39 if c := h.oldbuckets; c != nil { 40 if !h.sameSizeGrow() { 41 // There used to be half as many buckets; mask down one more power of two. 42 m >>= 1 43 } 44 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 45 if !evacuated(oldb) { 46 b = oldb 47 } 48 } 49 } 50 for ; b != nil; b = b.overflow(t) { 51 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { 52 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { 53 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)) 54 } 55 } 56 } 57 return unsafe.Pointer(&zeroVal[0]) 58} 59 60func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { 61 if raceenabled && h != nil { 62 callerpc := getcallerpc() 63 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) 64 } 65 if h == nil || h.count == 0 { 66 return unsafe.Pointer(&zeroVal[0]), false 67 } 68 if h.flags&hashWriting != 0 { 69 throw("concurrent map read and map write") 70 } 71 var b *bmap 72 if h.B == 0 { 73 // One-bucket table. No need to hash. 74 b = (*bmap)(h.buckets) 75 } else { 76 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 77 m := bucketMask(h.B) 78 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 79 if c := h.oldbuckets; c != nil { 80 if !h.sameSizeGrow() { 81 // There used to be half as many buckets; mask down one more power of two. 82 m >>= 1 83 } 84 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 85 if !evacuated(oldb) { 86 b = oldb 87 } 88 } 89 } 90 for ; b != nil; b = b.overflow(t) { 91 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { 92 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { 93 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true 94 } 95 } 96 } 97 return unsafe.Pointer(&zeroVal[0]), false 98} 99 100func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { 101 if h == nil { 102 panic(plainError("assignment to entry in nil map")) 103 } 104 if raceenabled { 105 callerpc := getcallerpc() 106 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64)) 107 } 108 if h.flags&hashWriting != 0 { 109 throw("concurrent map writes") 110 } 111 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 112 113 // Set hashWriting after calling t.hasher for consistency with mapassign. 114 h.flags ^= hashWriting 115 116 if h.buckets == nil { 117 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 118 } 119 120again: 121 bucket := hash & bucketMask(h.B) 122 if h.growing() { 123 growWork_fast64(t, h, bucket) 124 } 125 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 126 127 var insertb *bmap 128 var inserti uintptr 129 var insertk unsafe.Pointer 130 131bucketloop: 132 for { 133 for i := uintptr(0); i < bucketCnt; i++ { 134 if isEmpty(b.tophash[i]) { 135 if insertb == nil { 136 insertb = b 137 inserti = i 138 } 139 if b.tophash[i] == emptyRest { 140 break bucketloop 141 } 142 continue 143 } 144 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) 145 if k != key { 146 continue 147 } 148 insertb = b 149 inserti = i 150 goto done 151 } 152 ovf := b.overflow(t) 153 if ovf == nil { 154 break 155 } 156 b = ovf 157 } 158 159 // Did not find mapping for key. Allocate new cell & add entry. 160 161 // If we hit the max load factor or we have too many overflow buckets, 162 // and we're not already in the middle of growing, start growing. 163 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 164 hashGrow(t, h) 165 goto again // Growing the table invalidates everything, so try again 166 } 167 168 if insertb == nil { 169 // The current bucket and all the overflow buckets connected to it are full, allocate a new one. 170 insertb = h.newoverflow(t, b) 171 inserti = 0 // not necessary, but avoids needlessly spilling inserti 172 } 173 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks 174 175 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) 176 // store new key at insert position 177 *(*uint64)(insertk) = key 178 179 h.count++ 180 181done: 182 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize)) 183 if h.flags&hashWriting == 0 { 184 throw("concurrent map writes") 185 } 186 h.flags &^= hashWriting 187 return elem 188} 189 190func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 191 if h == nil { 192 panic(plainError("assignment to entry in nil map")) 193 } 194 if raceenabled { 195 callerpc := getcallerpc() 196 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64)) 197 } 198 if h.flags&hashWriting != 0 { 199 throw("concurrent map writes") 200 } 201 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 202 203 // Set hashWriting after calling t.hasher for consistency with mapassign. 204 h.flags ^= hashWriting 205 206 if h.buckets == nil { 207 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 208 } 209 210again: 211 bucket := hash & bucketMask(h.B) 212 if h.growing() { 213 growWork_fast64(t, h, bucket) 214 } 215 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 216 217 var insertb *bmap 218 var inserti uintptr 219 var insertk unsafe.Pointer 220 221bucketloop: 222 for { 223 for i := uintptr(0); i < bucketCnt; i++ { 224 if isEmpty(b.tophash[i]) { 225 if insertb == nil { 226 insertb = b 227 inserti = i 228 } 229 if b.tophash[i] == emptyRest { 230 break bucketloop 231 } 232 continue 233 } 234 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8))) 235 if k != key { 236 continue 237 } 238 insertb = b 239 inserti = i 240 goto done 241 } 242 ovf := b.overflow(t) 243 if ovf == nil { 244 break 245 } 246 b = ovf 247 } 248 249 // Did not find mapping for key. Allocate new cell & add entry. 250 251 // If we hit the max load factor or we have too many overflow buckets, 252 // and we're not already in the middle of growing, start growing. 253 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 254 hashGrow(t, h) 255 goto again // Growing the table invalidates everything, so try again 256 } 257 258 if insertb == nil { 259 // The current bucket and all the overflow buckets connected to it are full, allocate a new one. 260 insertb = h.newoverflow(t, b) 261 inserti = 0 // not necessary, but avoids needlessly spilling inserti 262 } 263 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks 264 265 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) 266 // store new key at insert position 267 *(*unsafe.Pointer)(insertk) = key 268 269 h.count++ 270 271done: 272 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize)) 273 if h.flags&hashWriting == 0 { 274 throw("concurrent map writes") 275 } 276 h.flags &^= hashWriting 277 return elem 278} 279 280func mapdelete_fast64(t *maptype, h *hmap, key uint64) { 281 if raceenabled && h != nil { 282 callerpc := getcallerpc() 283 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64)) 284 } 285 if h == nil || h.count == 0 { 286 return 287 } 288 if h.flags&hashWriting != 0 { 289 throw("concurrent map writes") 290 } 291 292 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 293 294 // Set hashWriting after calling t.hasher for consistency with mapdelete 295 h.flags ^= hashWriting 296 297 bucket := hash & bucketMask(h.B) 298 if h.growing() { 299 growWork_fast64(t, h, bucket) 300 } 301 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 302 bOrig := b 303search: 304 for ; b != nil; b = b.overflow(t) { 305 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { 306 if key != *(*uint64)(k) || isEmpty(b.tophash[i]) { 307 continue 308 } 309 // Only clear key if there are pointers in it. 310 if t.key.ptrdata != 0 { 311 if sys.PtrSize == 8 { 312 *(*unsafe.Pointer)(k) = nil 313 } else { 314 // There are three ways to squeeze at one ore more 32 bit pointers into 64 bits. 315 // Just call memclrHasPointers instead of trying to handle all cases here. 316 memclrHasPointers(k, 8) 317 } 318 } 319 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)) 320 if t.elem.ptrdata != 0 { 321 memclrHasPointers(e, t.elem.size) 322 } else { 323 memclrNoHeapPointers(e, t.elem.size) 324 } 325 b.tophash[i] = emptyOne 326 // If the bucket now ends in a bunch of emptyOne states, 327 // change those to emptyRest states. 328 if i == bucketCnt-1 { 329 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { 330 goto notLast 331 } 332 } else { 333 if b.tophash[i+1] != emptyRest { 334 goto notLast 335 } 336 } 337 for { 338 b.tophash[i] = emptyRest 339 if i == 0 { 340 if b == bOrig { 341 break // beginning of initial bucket, we're done. 342 } 343 // Find previous bucket, continue at its last entry. 344 c := b 345 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { 346 } 347 i = bucketCnt - 1 348 } else { 349 i-- 350 } 351 if b.tophash[i] != emptyOne { 352 break 353 } 354 } 355 notLast: 356 h.count-- 357 // Reset the hash seed to make it more difficult for attackers to 358 // repeatedly trigger hash collisions. See issue 25237. 359 if h.count == 0 { 360 h.hash0 = fastrand() 361 } 362 break search 363 } 364 } 365 366 if h.flags&hashWriting == 0 { 367 throw("concurrent map writes") 368 } 369 h.flags &^= hashWriting 370} 371 372func growWork_fast64(t *maptype, h *hmap, bucket uintptr) { 373 // make sure we evacuate the oldbucket corresponding 374 // to the bucket we're about to use 375 evacuate_fast64(t, h, bucket&h.oldbucketmask()) 376 377 // evacuate one more oldbucket to make progress on growing 378 if h.growing() { 379 evacuate_fast64(t, h, h.nevacuate) 380 } 381} 382 383func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { 384 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 385 newbit := h.noldbuckets() 386 if !evacuated(b) { 387 // TODO: reuse overflow buckets instead of using new ones, if there 388 // is no iterator using the old buckets. (If !oldIterator.) 389 390 // xy contains the x and y (low and high) evacuation destinations. 391 var xy [2]evacDst 392 x := &xy[0] 393 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 394 x.k = add(unsafe.Pointer(x.b), dataOffset) 395 x.e = add(x.k, bucketCnt*8) 396 397 if !h.sameSizeGrow() { 398 // Only calculate y pointers if we're growing bigger. 399 // Otherwise GC can see bad pointers. 400 y := &xy[1] 401 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 402 y.k = add(unsafe.Pointer(y.b), dataOffset) 403 y.e = add(y.k, bucketCnt*8) 404 } 405 406 for ; b != nil; b = b.overflow(t) { 407 k := add(unsafe.Pointer(b), dataOffset) 408 e := add(k, bucketCnt*8) 409 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) { 410 top := b.tophash[i] 411 if isEmpty(top) { 412 b.tophash[i] = evacuatedEmpty 413 continue 414 } 415 if top < minTopHash { 416 throw("bad map state") 417 } 418 var useY uint8 419 if !h.sameSizeGrow() { 420 // Compute hash to make our evacuation decision (whether we need 421 // to send this key/elem to bucket x or bucket y). 422 hash := t.hasher(k, uintptr(h.hash0)) 423 if hash&newbit != 0 { 424 useY = 1 425 } 426 } 427 428 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap 429 dst := &xy[useY] // evacuation destination 430 431 if dst.i == bucketCnt { 432 dst.b = h.newoverflow(t, dst.b) 433 dst.i = 0 434 dst.k = add(unsafe.Pointer(dst.b), dataOffset) 435 dst.e = add(dst.k, bucketCnt*8) 436 } 437 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check 438 439 // Copy key. 440 if t.key.ptrdata != 0 && writeBarrier.enabled { 441 if sys.PtrSize == 8 { 442 // Write with a write barrier. 443 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) 444 } else { 445 // There are three ways to squeeze at least one 32 bit pointer into 64 bits. 446 // Give up and call typedmemmove. 447 typedmemmove(t.key, dst.k, k) 448 } 449 } else { 450 *(*uint64)(dst.k) = *(*uint64)(k) 451 } 452 453 typedmemmove(t.elem, dst.e, e) 454 dst.i++ 455 // These updates might push these pointers past the end of the 456 // key or elem arrays. That's ok, as we have the overflow pointer 457 // at the end of the bucket to protect against pointing past the 458 // end of the bucket. 459 dst.k = add(dst.k, 8) 460 dst.e = add(dst.e, uintptr(t.elemsize)) 461 } 462 } 463 // Unlink the overflow buckets & clear key/elem to help GC. 464 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 { 465 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) 466 // Preserve b.tophash because the evacuation 467 // state is maintained there. 468 ptr := add(b, dataOffset) 469 n := uintptr(t.bucketsize) - dataOffset 470 memclrHasPointers(ptr, n) 471 } 472 } 473 474 if oldbucket == h.nevacuate { 475 advanceEvacuationMark(h, t, newbit) 476 } 477} 478