runtime: use MapMaxKeyBytes,MapMaxElemBytes,MapBucketCount of internal/abi

For #59670

Change-Id: I9265e033bf3a84c3dc7b4a5d52c0df9672435f0d
GitHub-Last-Rev: 8e4099095c
GitHub-Pull-Request: golang/go#64774
Reviewed-on: https://go-review.googlesource.com/c/go/+/550117
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Than McIntosh <thanm@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
qiulaidongfeng 2024-01-31 03:51:34 +00:00 committed by Keith Randall
parent 24070cf747
commit fdb200834f
7 changed files with 137 additions and 140 deletions

View File

@ -6,6 +6,7 @@ package walk
import (
"fmt"
"internal/abi"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@ -183,8 +184,7 @@ var mapassign = mkmapnames("mapassign", "ptr")
var mapdelete = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
// Check runtime/map.go:maxElemSize before changing.
if t.Elem().Size() > 128 {
if t.Elem().Size() > abi.MapMaxElemBytes {
return mapslow
}
switch reflectdata.AlgType(t.Key()) {

View File

@ -7,10 +7,15 @@ package abi
// Map constants common to several packages
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
const (
// Maximum number of key/elem pairs a bucket can hold.
MapBucketCountBits = 3 // log2 of number of elements in a bucket.
MapBucketCount = 1 << MapBucketCountBits
MapMaxKeyBytes = 128 // Must fit in a uint8.
MapMaxElemBytes = 128 // Must fit in a uint8.
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
// Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
MapMaxKeyBytes = 128
MapMaxElemBytes = 128 // Must fit in a uint8.
)
// ZeroValSize is the size in bytes of runtime.zeroVal.

View File

@ -751,7 +751,7 @@ func MapTombstoneCheck(m map[int]int) {
b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
n := 0
for b := b0; b != nil; b = b.overflow(t) {
for i := 0; i < bucketCnt; i++ {
for i := 0; i < abi.MapBucketCount; i++ {
if b.tophash[i] != emptyRest {
n++
}
@ -759,7 +759,7 @@ func MapTombstoneCheck(m map[int]int) {
}
k := 0
for b := b0; b != nil; b = b.overflow(t) {
for i := 0; i < bucketCnt; i++ {
for i := 0; i < abi.MapBucketCount; i++ {
if k < n && b.tophash[i] == emptyRest {
panic("early emptyRest")
}

View File

@ -64,20 +64,12 @@ import (
const (
// Maximum number of key/elem pairs a bucket can hold.
bucketCntBits = abi.MapBucketCountBits
bucketCnt = abi.MapBucketCount
// Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
// Because of minimum alignment rules, bucketCnt is known to be at least 8.
// Represent as loadFactorNum/loadFactorDen, to allow integer math.
loadFactorDen = 2
loadFactorNum = loadFactorDen * bucketCnt * 13 / 16
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
// Fast versions cannot handle big elems - the cutoff size for
// fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
maxKeySize = abi.MapMaxKeyBytes
maxElemSize = abi.MapMaxElemBytes
loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
// data offset should be the size of the bmap struct, but needs to be
// aligned correctly. For amd64p32 this means 64-bit alignment
@ -152,7 +144,7 @@ type bmap struct {
// tophash generally contains the top byte of the hash value
// for each key in this bucket. If tophash[0] < minTopHash,
// tophash[0] is a bucket evacuation state instead.
tophash [bucketCnt]uint8
tophash [abi.MapBucketCount]uint8
// Followed by bucketCnt keys and then bucketCnt elems.
// NOTE: packing all the keys together and then all the elems together makes the
// code a bit more complicated than alternating key/elem/key/elem/... but it allows
@ -431,7 +423,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
top := tophash(hash)
bucketloop:
for ; b != nil; b = b.overflow(t) {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break bucketloop
@ -443,7 +435,7 @@ bucketloop:
k = *((*unsafe.Pointer)(k))
}
if t.Key.Equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
@ -492,7 +484,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
top := tophash(hash)
bucketloop:
for ; b != nil; b = b.overflow(t) {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break bucketloop
@ -504,7 +496,7 @@ bucketloop:
k = *((*unsafe.Pointer)(k))
}
if t.Key.Equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
@ -536,7 +528,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
top := tophash(hash)
bucketloop:
for ; b != nil; b = b.overflow(t) {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break bucketloop
@ -548,7 +540,7 @@ bucketloop:
k = *((*unsafe.Pointer)(k))
}
if t.Key.Equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
e = *((*unsafe.Pointer)(e))
}
@ -618,12 +610,12 @@ again:
var elem unsafe.Pointer
bucketloop:
for {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if isEmpty(b.tophash[i]) && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
}
if b.tophash[i] == emptyRest {
break bucketloop
@ -641,7 +633,7 @@ bucketloop:
if t.NeedKeyUpdate() {
typedmemmove(t.Key, k, key)
}
elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
goto done
}
ovf := b.overflow(t)
@ -665,7 +657,7 @@ bucketloop:
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
elem = add(insertk, bucketCnt*uintptr(t.KeySize))
elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
}
// store new key/elem at insert position
@ -731,7 +723,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
top := tophash(hash)
search:
for ; b != nil; b = b.overflow(t) {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break search
@ -752,7 +744,7 @@ search:
} else if t.Key.PtrBytes != 0 {
memclrHasPointers(k, t.Key.Size_)
}
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
*(*unsafe.Pointer)(e) = nil
} else if t.Elem.PtrBytes != 0 {
@ -765,7 +757,7 @@ search:
// change those to emptyRest states.
// It would be nice to make this a separate function, but
// for loops are not currently inlineable.
if i == bucketCnt-1 {
if i == abi.MapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
@ -784,7 +776,7 @@ search:
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
i = bucketCnt - 1
i = abi.MapBucketCount - 1
} else {
i--
}
@ -845,7 +837,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
// decide where to start
r := uintptr(rand())
it.startBucket = r & bucketMask(h.B)
it.offset = uint8(r >> h.B & (bucketCnt - 1))
it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
// iterator state
it.bucket = it.startBucket
@ -906,8 +898,8 @@ next:
}
i = 0
}
for ; i < bucketCnt; i++ {
offi := (i + it.offset) & (bucketCnt - 1)
for ; i < abi.MapBucketCount; i++ {
offi := (i + it.offset) & (abi.MapBucketCount - 1)
if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
// TODO: emptyRest is hard to use here, as we start iterating
// in the middle of a bucket. It's feasible, just tricky.
@ -917,7 +909,7 @@ next:
if t.IndirectKey() {
k = *((*unsafe.Pointer)(k))
}
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
if checkBucket != noCheck && !h.sameSizeGrow() {
// Special case: iterator was started during a grow to a larger size
// and the grow is not done yet. We're working on a bucket whose
@ -1008,7 +1000,7 @@ func mapclear(t *maptype, h *hmap) {
for i := uintptr(0); i <= mask; i++ {
b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
for ; b != nil; b = b.overflow(t) {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
b.tophash[i] = emptyRest
}
}
@ -1095,7 +1087,7 @@ func hashGrow(t *maptype, h *hmap) {
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
func overLoadFactor(count int, B uint8) bool {
return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
}
// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
@ -1173,7 +1165,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.e = add(x.k, bucketCnt*uintptr(t.KeySize))
x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
@ -1181,13 +1173,13 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.e = add(y.k, bucketCnt*uintptr(t.KeySize))
y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
e := add(k, bucketCnt*uintptr(t.KeySize))
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
@ -1233,13 +1225,13 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
dst := &xy[useY] // evacuation destination
if dst.i == bucketCnt {
if dst.i == abi.MapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize))
dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
if t.IndirectKey() {
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
} else {
@ -1307,18 +1299,18 @@ func reflect_makemap(t *maptype, cap int) *hmap {
if t.Key.Equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
throw("key size wrong")
}
if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
throw("elem size wrong")
}
if t.Key.Align_ > bucketCnt {
if t.Key.Align_ > abi.MapBucketCount {
throw("key align too big")
}
if t.Elem.Align_ > bucketCnt {
if t.Elem.Align_ > abi.MapBucketCount {
throw("elem align too big")
}
if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
@ -1327,7 +1319,7 @@ func reflect_makemap(t *maptype, cap int) *hmap {
if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
throw("elem size not a multiple of elem align")
}
if bucketCnt < 8 {
if abi.MapBucketCount < 8 {
throw("bucketsize too small for proper alignment")
}
if dataOffset%uintptr(t.Key.Align_) != 0 {
@ -1452,26 +1444,26 @@ func mapclone(m any) any {
// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
for i := 0; i < bucketCnt; i++ {
for i := 0; i < abi.MapBucketCount; i++ {
if isEmpty(src.tophash[i]) {
continue
}
for ; pos < bucketCnt; pos++ {
for ; pos < abi.MapBucketCount; pos++ {
if isEmpty(dst.tophash[pos]) {
break
}
}
if pos == bucketCnt {
if pos == abi.MapBucketCount {
dst = h.newoverflow(t, dst)
pos = 0
}
srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
dst.tophash[pos] = src.tophash[i]
if t.IndirectKey() {
@ -1575,7 +1567,7 @@ func mapclone2(t *maptype, src *hmap) *hmap {
// Process entries one at a time.
for srcBmap != nil {
// move from oldBlucket to new bucket
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(srcBmap.tophash[i]) {
continue
}
@ -1589,7 +1581,7 @@ func mapclone2(t *maptype, src *hmap) *hmap {
srcK = *((*unsafe.Pointer)(srcK))
}
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
if t.IndirectElem() {
srcEle = *((*unsafe.Pointer)(srcEle))
}
@ -1615,7 +1607,7 @@ func keys(m any, p unsafe.Pointer) {
}
s := (*slice)(p)
r := int(rand())
offset := uint8(r >> h.B & (bucketCnt - 1))
offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
if h.B == 0 {
copyKeys(t, h, (*bmap)(h.buckets), s, offset)
return
@ -1644,8 +1636,8 @@ func keys(m any, p unsafe.Pointer) {
func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
for b != nil {
for i := uintptr(0); i < bucketCnt; i++ {
offi := (i + uintptr(offset)) & (bucketCnt - 1)
for i := uintptr(0); i < abi.MapBucketCount; i++ {
offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
if isEmpty(b.tophash[offi]) {
continue
}
@ -1678,7 +1670,7 @@ func values(m any, p unsafe.Pointer) {
}
s := (*slice)(p)
r := int(rand())
offset := uint8(r >> h.B & (bucketCnt - 1))
offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
if h.B == 0 {
copyValues(t, h, (*bmap)(h.buckets), s, offset)
return
@ -1707,8 +1699,8 @@ func values(m any, p unsafe.Pointer) {
func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
for b != nil {
for i := uintptr(0); i < bucketCnt; i++ {
offi := (i + uintptr(offset)) & (bucketCnt - 1)
for i := uintptr(0); i < abi.MapBucketCount; i++ {
offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
if isEmpty(b.tophash[offi]) {
continue
}
@ -1717,7 +1709,7 @@ func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
fatal("concurrent map read and map write")
}
ele := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
if t.IndirectElem() {
ele = *((*unsafe.Pointer)(ele))
}

View File

@ -41,9 +41,9 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
}
}
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
}
}
}
@ -81,9 +81,9 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
}
}
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)), true
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)), true
}
}
}
@ -123,7 +123,7 @@ again:
bucketloop:
for {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
inserti = i
@ -163,7 +163,7 @@ bucketloop:
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
// store new key at insert position
@ -172,7 +172,7 @@ bucketloop:
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
@ -213,7 +213,7 @@ again:
bucketloop:
for {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
inserti = i
@ -253,7 +253,7 @@ bucketloop:
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
// store new key at insert position
@ -262,7 +262,7 @@ bucketloop:
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
@ -295,7 +295,7 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
bOrig := b
search:
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
continue
}
@ -307,7 +307,7 @@ search:
// 32 bits wide and the key is 32 bits wide also.
*(*unsafe.Pointer)(k) = nil
}
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
if t.Elem.PtrBytes != 0 {
memclrHasPointers(e, t.Elem.Size_)
} else {
@ -316,7 +316,7 @@ search:
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
if i == bucketCnt-1 {
if i == abi.MapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
@ -335,7 +335,7 @@ search:
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
i = bucketCnt - 1
i = abi.MapBucketCount - 1
} else {
i--
}
@ -383,7 +383,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.e = add(x.k, bucketCnt*4)
x.e = add(x.k, abi.MapBucketCount*4)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
@ -391,13 +391,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.e = add(y.k, bucketCnt*4)
y.e = add(y.k, abi.MapBucketCount*4)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
e := add(k, bucketCnt*4)
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
e := add(k, abi.MapBucketCount*4)
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
@ -419,13 +419,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
dst := &xy[useY] // evacuation destination
if dst.i == bucketCnt {
if dst.i == abi.MapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.e = add(dst.k, bucketCnt*4)
dst.e = add(dst.k, abi.MapBucketCount*4)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled {

View File

@ -41,9 +41,9 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
}
}
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize))
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
}
}
}
@ -81,9 +81,9 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
}
}
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)), true
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)), true
}
}
}
@ -123,7 +123,7 @@ again:
bucketloop:
for {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
insertb = b
@ -163,7 +163,7 @@ bucketloop:
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
// store new key at insert position
@ -172,7 +172,7 @@ bucketloop:
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize))
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
@ -213,7 +213,7 @@ again:
bucketloop:
for {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if isEmpty(b.tophash[i]) {
if insertb == nil {
insertb = b
@ -253,7 +253,7 @@ bucketloop:
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
// store new key at insert position
@ -262,7 +262,7 @@ bucketloop:
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize))
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
@ -295,7 +295,7 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
bOrig := b
search:
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
continue
}
@ -309,7 +309,7 @@ search:
memclrHasPointers(k, 8)
}
}
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize))
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
if t.Elem.PtrBytes != 0 {
memclrHasPointers(e, t.Elem.Size_)
} else {
@ -318,7 +318,7 @@ search:
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
if i == bucketCnt-1 {
if i == abi.MapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
@ -337,7 +337,7 @@ search:
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
i = bucketCnt - 1
i = abi.MapBucketCount - 1
} else {
i--
}
@ -385,7 +385,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.e = add(x.k, bucketCnt*8)
x.e = add(x.k, abi.MapBucketCount*8)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
@ -393,13 +393,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.e = add(y.k, bucketCnt*8)
y.e = add(y.k, abi.MapBucketCount*8)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
e := add(k, bucketCnt*8)
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
e := add(k, abi.MapBucketCount*8)
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
@ -421,13 +421,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
dst := &xy[useY] // evacuation destination
if dst.i == bucketCnt {
if dst.i == abi.MapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.e = add(dst.k, bucketCnt*8)
dst.e = add(dst.k, abi.MapBucketCount*8)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
if t.Key.PtrBytes != 0 && writeBarrier.enabled {

View File

@ -27,7 +27,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
@ -36,14 +36,14 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
keymaybe := uintptr(abi.MapBucketCount)
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
@ -52,7 +52,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
continue
}
if k.str == key.str {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
@ -62,16 +62,16 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
continue
}
if keymaybe != bucketCnt {
if keymaybe != abi.MapBucketCount {
// Two keys are potential matches. Use hash to distinguish them.
goto dohash
}
keymaybe = i
}
if keymaybe != bucketCnt {
if keymaybe != abi.MapBucketCount {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
}
}
return unsafe.Pointer(&zeroVal[0])
@ -92,13 +92,13 @@ dohash:
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
}
}
}
@ -122,7 +122,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
@ -131,14 +131,14 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
keymaybe := uintptr(abi.MapBucketCount)
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
@ -147,7 +147,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
continue
}
if k.str == key.str {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
@ -157,16 +157,16 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
continue
}
if keymaybe != bucketCnt {
if keymaybe != abi.MapBucketCount {
// Two keys are potential matches. Use hash to distinguish them.
goto dohash
}
keymaybe = i
}
if keymaybe != bucketCnt {
if keymaybe != abi.MapBucketCount {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
@ -187,13 +187,13 @@ dohash:
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
}
}
}
@ -235,7 +235,7 @@ again:
bucketloop:
for {
for i := uintptr(0); i < bucketCnt; i++ {
for i := uintptr(0); i < abi.MapBucketCount; i++ {
if b.tophash[i] != top {
if isEmpty(b.tophash[i]) && insertb == nil {
insertb = b
@ -282,7 +282,7 @@ bucketloop:
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
insertb.tophash[inserti&(abi.MapBucketCount-1)] = top // mask inserti to avoid bounds checks
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
// store new key at insert position
@ -290,7 +290,7 @@ bucketloop:
h.count++
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
@ -325,7 +325,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
top := tophash(hash)
search:
for ; b != nil; b = b.overflow(t) {
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
@ -335,7 +335,7 @@ search:
}
// Clear key's pointer.
k.str = nil
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
if t.Elem.PtrBytes != 0 {
memclrHasPointers(e, t.Elem.Size_)
} else {
@ -344,7 +344,7 @@ search:
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
if i == bucketCnt-1 {
if i == abi.MapBucketCount-1 {
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
@ -363,7 +363,7 @@ search:
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
i = bucketCnt - 1
i = abi.MapBucketCount - 1
} else {
i--
}
@ -411,7 +411,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
x.e = add(x.k, abi.MapBucketCount*2*goarch.PtrSize)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
@ -419,13 +419,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
y.e = add(y.k, abi.MapBucketCount*2*goarch.PtrSize)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
e := add(k, bucketCnt*2*goarch.PtrSize)
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
e := add(k, abi.MapBucketCount*2*goarch.PtrSize)
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
@ -447,13 +447,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
dst := &xy[useY] // evacuation destination
if dst.i == bucketCnt {
if dst.i == abi.MapBucketCount {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
dst.e = add(dst.k, abi.MapBucketCount*2*goarch.PtrSize)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
*(*string)(dst.k) = *(*string)(k)