1// Copyright 2018 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/sys"
9	"unsafe"
10)
11
12// For gccgo, use go:linkname to export compiler-called functions.
13//
14//go:linkname mapaccess1_fast32
15//go:linkname mapaccess2_fast32
16//go:linkname mapassign_fast32
17//go:linkname mapassign_fast32ptr
18//go:linkname mapdelete_fast32
19
20func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
21	if raceenabled && h != nil {
22		callerpc := getcallerpc()
23		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
24	}
25	if h == nil || h.count == 0 {
26		return unsafe.Pointer(&zeroVal[0])
27	}
28	if h.flags&hashWriting != 0 {
29		throw("concurrent map read and map write")
30	}
31	var b *bmap
32	if h.B == 0 {
33		// One-bucket table. No need to hash.
34		b = (*bmap)(h.buckets)
35	} else {
36		hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
37		m := bucketMask(h.B)
38		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
39		if c := h.oldbuckets; c != nil {
40			if !h.sameSizeGrow() {
41				// There used to be half as many buckets; mask down one more power of two.
42				m >>= 1
43			}
44			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
45			if !evacuated(oldb) {
46				b = oldb
47			}
48		}
49	}
50	for ; b != nil; b = b.overflow(t) {
51		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
52			if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
53				return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
54			}
55		}
56	}
57	return unsafe.Pointer(&zeroVal[0])
58}
59
60func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
61	if raceenabled && h != nil {
62		callerpc := getcallerpc()
63		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
64	}
65	if h == nil || h.count == 0 {
66		return unsafe.Pointer(&zeroVal[0]), false
67	}
68	if h.flags&hashWriting != 0 {
69		throw("concurrent map read and map write")
70	}
71	var b *bmap
72	if h.B == 0 {
73		// One-bucket table. No need to hash.
74		b = (*bmap)(h.buckets)
75	} else {
76		hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
77		m := bucketMask(h.B)
78		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
79		if c := h.oldbuckets; c != nil {
80			if !h.sameSizeGrow() {
81				// There used to be half as many buckets; mask down one more power of two.
82				m >>= 1
83			}
84			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
85			if !evacuated(oldb) {
86				b = oldb
87			}
88		}
89	}
90	for ; b != nil; b = b.overflow(t) {
91		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
92			if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
93				return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
94			}
95		}
96	}
97	return unsafe.Pointer(&zeroVal[0]), false
98}
99
100func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
101	if h == nil {
102		panic(plainError("assignment to entry in nil map"))
103	}
104	if raceenabled {
105		callerpc := getcallerpc()
106		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
107	}
108	if h.flags&hashWriting != 0 {
109		throw("concurrent map writes")
110	}
111	hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
112
113	// Set hashWriting after calling t.hasher for consistency with mapassign.
114	h.flags ^= hashWriting
115
116	if h.buckets == nil {
117		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
118	}
119
120again:
121	bucket := hash & bucketMask(h.B)
122	if h.growing() {
123		growWork_fast32(t, h, bucket)
124	}
125	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
126
127	var insertb *bmap
128	var inserti uintptr
129	var insertk unsafe.Pointer
130
131bucketloop:
132	for {
133		for i := uintptr(0); i < bucketCnt; i++ {
134			if isEmpty(b.tophash[i]) {
135				if insertb == nil {
136					inserti = i
137					insertb = b
138				}
139				if b.tophash[i] == emptyRest {
140					break bucketloop
141				}
142				continue
143			}
144			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
145			if k != key {
146				continue
147			}
148			inserti = i
149			insertb = b
150			goto done
151		}
152		ovf := b.overflow(t)
153		if ovf == nil {
154			break
155		}
156		b = ovf
157	}
158
159	// Did not find mapping for key. Allocate new cell & add entry.
160
161	// If we hit the max load factor or we have too many overflow buckets,
162	// and we're not already in the middle of growing, start growing.
163	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
164		hashGrow(t, h)
165		goto again // Growing the table invalidates everything, so try again
166	}
167
168	if insertb == nil {
169		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
170		insertb = h.newoverflow(t, b)
171		inserti = 0 // not necessary, but avoids needlessly spilling inserti
172	}
173	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
174
175	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
176	// store new key at insert position
177	*(*uint32)(insertk) = key
178
179	h.count++
180
181done:
182	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
183	if h.flags&hashWriting == 0 {
184		throw("concurrent map writes")
185	}
186	h.flags &^= hashWriting
187	return elem
188}
189
190func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
191	if h == nil {
192		panic(plainError("assignment to entry in nil map"))
193	}
194	if raceenabled {
195		callerpc := getcallerpc()
196		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
197	}
198	if h.flags&hashWriting != 0 {
199		throw("concurrent map writes")
200	}
201	hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
202
203	// Set hashWriting after calling t.hasher for consistency with mapassign.
204	h.flags ^= hashWriting
205
206	if h.buckets == nil {
207		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
208	}
209
210again:
211	bucket := hash & bucketMask(h.B)
212	if h.growing() {
213		growWork_fast32(t, h, bucket)
214	}
215	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
216
217	var insertb *bmap
218	var inserti uintptr
219	var insertk unsafe.Pointer
220
221bucketloop:
222	for {
223		for i := uintptr(0); i < bucketCnt; i++ {
224			if isEmpty(b.tophash[i]) {
225				if insertb == nil {
226					inserti = i
227					insertb = b
228				}
229				if b.tophash[i] == emptyRest {
230					break bucketloop
231				}
232				continue
233			}
234			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
235			if k != key {
236				continue
237			}
238			inserti = i
239			insertb = b
240			goto done
241		}
242		ovf := b.overflow(t)
243		if ovf == nil {
244			break
245		}
246		b = ovf
247	}
248
249	// Did not find mapping for key. Allocate new cell & add entry.
250
251	// If we hit the max load factor or we have too many overflow buckets,
252	// and we're not already in the middle of growing, start growing.
253	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
254		hashGrow(t, h)
255		goto again // Growing the table invalidates everything, so try again
256	}
257
258	if insertb == nil {
259		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
260		insertb = h.newoverflow(t, b)
261		inserti = 0 // not necessary, but avoids needlessly spilling inserti
262	}
263	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
264
265	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
266	// store new key at insert position
267	*(*unsafe.Pointer)(insertk) = key
268
269	h.count++
270
271done:
272	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
273	if h.flags&hashWriting == 0 {
274		throw("concurrent map writes")
275	}
276	h.flags &^= hashWriting
277	return elem
278}
279
280func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
281	if raceenabled && h != nil {
282		callerpc := getcallerpc()
283		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
284	}
285	if h == nil || h.count == 0 {
286		return
287	}
288	if h.flags&hashWriting != 0 {
289		throw("concurrent map writes")
290	}
291
292	hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
293
294	// Set hashWriting after calling t.hasher for consistency with mapdelete
295	h.flags ^= hashWriting
296
297	bucket := hash & bucketMask(h.B)
298	if h.growing() {
299		growWork_fast32(t, h, bucket)
300	}
301	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
302	bOrig := b
303search:
304	for ; b != nil; b = b.overflow(t) {
305		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
306			if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
307				continue
308			}
309			// Only clear key if there are pointers in it.
310			// This can only happen if pointers are 32 bit
311			// wide as 64 bit pointers do not fit into a 32 bit key.
312			if sys.PtrSize == 4 && t.key.ptrdata != 0 {
313				// The key must be a pointer as we checked pointers are
314				// 32 bits wide and the key is 32 bits wide also.
315				*(*unsafe.Pointer)(k) = nil
316			}
317			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
318			if t.elem.ptrdata != 0 {
319				memclrHasPointers(e, t.elem.size)
320			} else {
321				memclrNoHeapPointers(e, t.elem.size)
322			}
323			b.tophash[i] = emptyOne
324			// If the bucket now ends in a bunch of emptyOne states,
325			// change those to emptyRest states.
326			if i == bucketCnt-1 {
327				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
328					goto notLast
329				}
330			} else {
331				if b.tophash[i+1] != emptyRest {
332					goto notLast
333				}
334			}
335			for {
336				b.tophash[i] = emptyRest
337				if i == 0 {
338					if b == bOrig {
339						break // beginning of initial bucket, we're done.
340					}
341					// Find previous bucket, continue at its last entry.
342					c := b
343					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
344					}
345					i = bucketCnt - 1
346				} else {
347					i--
348				}
349				if b.tophash[i] != emptyOne {
350					break
351				}
352			}
353		notLast:
354			h.count--
355			// Reset the hash seed to make it more difficult for attackers to
356			// repeatedly trigger hash collisions. See issue 25237.
357			if h.count == 0 {
358				h.hash0 = fastrand()
359			}
360			break search
361		}
362	}
363
364	if h.flags&hashWriting == 0 {
365		throw("concurrent map writes")
366	}
367	h.flags &^= hashWriting
368}
369
370func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
371	// make sure we evacuate the oldbucket corresponding
372	// to the bucket we're about to use
373	evacuate_fast32(t, h, bucket&h.oldbucketmask())
374
375	// evacuate one more oldbucket to make progress on growing
376	if h.growing() {
377		evacuate_fast32(t, h, h.nevacuate)
378	}
379}
380
381func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
382	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
383	newbit := h.noldbuckets()
384	if !evacuated(b) {
385		// TODO: reuse overflow buckets instead of using new ones, if there
386		// is no iterator using the old buckets.  (If !oldIterator.)
387
388		// xy contains the x and y (low and high) evacuation destinations.
389		var xy [2]evacDst
390		x := &xy[0]
391		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
392		x.k = add(unsafe.Pointer(x.b), dataOffset)
393		x.e = add(x.k, bucketCnt*4)
394
395		if !h.sameSizeGrow() {
396			// Only calculate y pointers if we're growing bigger.
397			// Otherwise GC can see bad pointers.
398			y := &xy[1]
399			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
400			y.k = add(unsafe.Pointer(y.b), dataOffset)
401			y.e = add(y.k, bucketCnt*4)
402		}
403
404		for ; b != nil; b = b.overflow(t) {
405			k := add(unsafe.Pointer(b), dataOffset)
406			e := add(k, bucketCnt*4)
407			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
408				top := b.tophash[i]
409				if isEmpty(top) {
410					b.tophash[i] = evacuatedEmpty
411					continue
412				}
413				if top < minTopHash {
414					throw("bad map state")
415				}
416				var useY uint8
417				if !h.sameSizeGrow() {
418					// Compute hash to make our evacuation decision (whether we need
419					// to send this key/elem to bucket x or bucket y).
420					hash := t.hasher(k, uintptr(h.hash0))
421					if hash&newbit != 0 {
422						useY = 1
423					}
424				}
425
426				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
427				dst := &xy[useY]                 // evacuation destination
428
429				if dst.i == bucketCnt {
430					dst.b = h.newoverflow(t, dst.b)
431					dst.i = 0
432					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
433					dst.e = add(dst.k, bucketCnt*4)
434				}
435				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
436
437				// Copy key.
438				if sys.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
439					// Write with a write barrier.
440					*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
441				} else {
442					*(*uint32)(dst.k) = *(*uint32)(k)
443				}
444
445				typedmemmove(t.elem, dst.e, e)
446				dst.i++
447				// These updates might push these pointers past the end of the
448				// key or elem arrays.  That's ok, as we have the overflow pointer
449				// at the end of the bucket to protect against pointing past the
450				// end of the bucket.
451				dst.k = add(dst.k, 4)
452				dst.e = add(dst.e, uintptr(t.elemsize))
453			}
454		}
455		// Unlink the overflow buckets & clear key/elem to help GC.
456		if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
457			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
458			// Preserve b.tophash because the evacuation
459			// state is maintained there.
460			ptr := add(b, dataOffset)
461			n := uintptr(t.bucketsize) - dataOffset
462			memclrHasPointers(ptr, n)
463		}
464	}
465
466	if oldbucket == h.nevacuate {
467		advanceEvacuationMark(h, t, newbit)
468	}
469}
470