mirror of
https://github.com/golang/go.git
synced 2024-09-21 10:28:27 +00:00
cmd/compile,runtime: disable swissmap fast variants
Temporary measure to reduce the required MVP code. For #54766. Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-longtest-swissmap Change-Id: I44dc8acd0dc8280c6beb40451998e84bc85c238a Reviewed-on: https://go-review.googlesource.com/c/go/+/580915 Reviewed-by: Keith Randall <khr@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Keith Randall <khr@google.com>
This commit is contained in:
parent
4f7dc282c4
commit
1985c0ccf9
@ -192,30 +192,8 @@ func mapfast(t *types.Type) int {
|
||||
}
|
||||
|
||||
func mapfastSwiss(t *types.Type) int {
|
||||
if t.Elem().Size() > abi.SwissMapMaxElemBytes {
|
||||
return mapslow
|
||||
}
|
||||
switch reflectdata.AlgType(t.Key()) {
|
||||
case types.AMEM32:
|
||||
if !t.Key().HasPointers() {
|
||||
return mapfast32
|
||||
}
|
||||
if types.PtrSize == 4 {
|
||||
return mapfast32ptr
|
||||
}
|
||||
base.Fatalf("small pointer %v", t.Key())
|
||||
case types.AMEM64:
|
||||
if !t.Key().HasPointers() {
|
||||
return mapfast64
|
||||
}
|
||||
if types.PtrSize == 8 {
|
||||
return mapfast64ptr
|
||||
}
|
||||
// Two-word object, at least one of which is a pointer.
|
||||
// Use the slow path.
|
||||
case types.ASTRING:
|
||||
return mapfaststr
|
||||
}
|
||||
// TODO(#54766): Temporarily avoid specialized variants to minimize
|
||||
// required code.
|
||||
return mapslow
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,8 @@ func (v Value) MapIndex(key Value) Value {
|
||||
// of unexported fields.
|
||||
|
||||
var e unsafe.Pointer
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
|
||||
// TODO(#54766): temporarily disable specialized variants.
|
||||
if false && (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
|
||||
k := *(*string)(key.ptr)
|
||||
e = mapaccess_faststr(v.typ(), v.pointer(), k)
|
||||
} else {
|
||||
@ -423,7 +424,8 @@ func (v Value) SetMapIndex(key, elem Value) {
|
||||
key.mustBeExported()
|
||||
tt := (*mapType)(unsafe.Pointer(v.typ()))
|
||||
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
|
||||
// TODO(#54766): temporarily disable specialized variants.
|
||||
if false && (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
|
||||
k := *(*string)(key.ptr)
|
||||
if elem.typ() == nil {
|
||||
mapdelete_faststr(v.typ(), v.pointer(), k)
|
||||
|
@ -7,458 +7,29 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"internal/goarch"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
|
||||
}
|
||||
if h == nil || h.count == 0 {
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
}
|
||||
var b *bmap
|
||||
if h.B == 0 {
|
||||
// One-bucket table. No need to hash.
|
||||
b = (*bmap)(h.buckets)
|
||||
} else {
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
m := bucketMask(h.B)
|
||||
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
|
||||
if c := h.oldbuckets; c != nil {
|
||||
if !h.sameSizeGrow() {
|
||||
// There used to be half as many buckets; mask down one more power of two.
|
||||
m >>= 1
|
||||
}
|
||||
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
|
||||
if !evacuated(oldb) {
|
||||
b = oldb
|
||||
}
|
||||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 4) {
|
||||
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
throw("mapaccess1_fast32 unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
|
||||
}
|
||||
if h == nil || h.count == 0 {
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
}
|
||||
var b *bmap
|
||||
if h.B == 0 {
|
||||
// One-bucket table. No need to hash.
|
||||
b = (*bmap)(h.buckets)
|
||||
} else {
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
m := bucketMask(h.B)
|
||||
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
|
||||
if c := h.oldbuckets; c != nil {
|
||||
if !h.sameSizeGrow() {
|
||||
// There used to be half as many buckets; mask down one more power of two.
|
||||
m >>= 1
|
||||
}
|
||||
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
|
||||
if !evacuated(oldb) {
|
||||
b = oldb
|
||||
}
|
||||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 4) {
|
||||
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*4+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
throw("mapaccess2_fast32 unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
||||
if h == nil {
|
||||
panic(plainError("assignment to entry in nil map"))
|
||||
}
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
|
||||
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
||||
h.flags ^= hashWriting
|
||||
|
||||
if h.buckets == nil {
|
||||
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
|
||||
}
|
||||
|
||||
again:
|
||||
bucket := hash & bucketMask(h.B)
|
||||
if h.growing() {
|
||||
growWork_fast32(t, h, bucket)
|
||||
}
|
||||
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
||||
|
||||
var insertb *bmap
|
||||
var inserti uintptr
|
||||
var insertk unsafe.Pointer
|
||||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
inserti = i
|
||||
insertb = b
|
||||
}
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
}
|
||||
continue
|
||||
}
|
||||
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
|
||||
if k != key {
|
||||
continue
|
||||
}
|
||||
inserti = i
|
||||
insertb = b
|
||||
goto done
|
||||
}
|
||||
ovf := b.overflow(t)
|
||||
if ovf == nil {
|
||||
break
|
||||
}
|
||||
b = ovf
|
||||
}
|
||||
|
||||
// Did not find mapping for key. Allocate new cell & add entry.
|
||||
|
||||
// If we hit the max load factor or we have too many overflow buckets,
|
||||
// and we're not already in the middle of growing, start growing.
|
||||
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
||||
hashGrow(t, h)
|
||||
goto again // Growing the table invalidates everything, so try again
|
||||
}
|
||||
|
||||
if insertb == nil {
|
||||
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
|
||||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
|
||||
// store new key at insert position
|
||||
*(*uint32)(insertk) = key
|
||||
|
||||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
h.flags &^= hashWriting
|
||||
return elem
|
||||
throw("mapassign_fast32 unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
||||
if h == nil {
|
||||
panic(plainError("assignment to entry in nil map"))
|
||||
}
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
|
||||
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
||||
h.flags ^= hashWriting
|
||||
|
||||
if h.buckets == nil {
|
||||
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
|
||||
}
|
||||
|
||||
again:
|
||||
bucket := hash & bucketMask(h.B)
|
||||
if h.growing() {
|
||||
growWork_fast32(t, h, bucket)
|
||||
}
|
||||
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
||||
|
||||
var insertb *bmap
|
||||
var inserti uintptr
|
||||
var insertk unsafe.Pointer
|
||||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
inserti = i
|
||||
insertb = b
|
||||
}
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
}
|
||||
continue
|
||||
}
|
||||
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
|
||||
if k != key {
|
||||
continue
|
||||
}
|
||||
inserti = i
|
||||
insertb = b
|
||||
goto done
|
||||
}
|
||||
ovf := b.overflow(t)
|
||||
if ovf == nil {
|
||||
break
|
||||
}
|
||||
b = ovf
|
||||
}
|
||||
|
||||
// Did not find mapping for key. Allocate new cell & add entry.
|
||||
|
||||
// If we hit the max load factor or we have too many overflow buckets,
|
||||
// and we're not already in the middle of growing, start growing.
|
||||
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
||||
hashGrow(t, h)
|
||||
goto again // Growing the table invalidates everything, so try again
|
||||
}
|
||||
|
||||
if insertb == nil {
|
||||
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
|
||||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
|
||||
// store new key at insert position
|
||||
*(*unsafe.Pointer)(insertk) = key
|
||||
|
||||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
h.flags &^= hashWriting
|
||||
return elem
|
||||
throw("mapassign_fast32ptr unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
|
||||
}
|
||||
if h == nil || h.count == 0 {
|
||||
return
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
|
||||
// Set hashWriting after calling t.hasher for consistency with mapdelete
|
||||
h.flags ^= hashWriting
|
||||
|
||||
bucket := hash & bucketMask(h.B)
|
||||
if h.growing() {
|
||||
growWork_fast32(t, h, bucket)
|
||||
}
|
||||
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
||||
bOrig := b
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 4) {
|
||||
if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
// Only clear key if there are pointers in it.
|
||||
// This can only happen if pointers are 32 bit
|
||||
// wide as 64 bit pointers do not fit into a 32 bit key.
|
||||
if goarch.PtrSize == 4 && t.Key.Pointers() {
|
||||
// The key must be a pointer as we checked pointers are
|
||||
// 32 bits wide and the key is 32 bits wide also.
|
||||
*(*unsafe.Pointer)(k) = nil
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
if t.Elem.Pointers() {
|
||||
memclrHasPointers(e, t.Elem.Size_)
|
||||
} else {
|
||||
memclrNoHeapPointers(e, t.Elem.Size_)
|
||||
}
|
||||
b.tophash[i] = emptyOne
|
||||
// If the bucket now ends in a bunch of emptyOne states,
|
||||
// change those to emptyRest states.
|
||||
if i == abi.SwissMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
} else {
|
||||
if b.tophash[i+1] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
}
|
||||
for {
|
||||
b.tophash[i] = emptyRest
|
||||
if i == 0 {
|
||||
if b == bOrig {
|
||||
break // beginning of initial bucket, we're done.
|
||||
}
|
||||
// Find previous bucket, continue at its last entry.
|
||||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.SwissMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
if b.tophash[i] != emptyOne {
|
||||
break
|
||||
}
|
||||
}
|
||||
notLast:
|
||||
h.count--
|
||||
// Reset the hash seed to make it more difficult for attackers to
|
||||
// repeatedly trigger hash collisions. See issue 25237.
|
||||
if h.count == 0 {
|
||||
h.hash0 = uint32(rand())
|
||||
}
|
||||
break search
|
||||
}
|
||||
}
|
||||
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
h.flags &^= hashWriting
|
||||
}
|
||||
|
||||
func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
|
||||
// make sure we evacuate the oldbucket corresponding
|
||||
// to the bucket we're about to use
|
||||
evacuate_fast32(t, h, bucket&h.oldbucketmask())
|
||||
|
||||
// evacuate one more oldbucket to make progress on growing
|
||||
if h.growing() {
|
||||
evacuate_fast32(t, h, h.nevacuate)
|
||||
}
|
||||
}
|
||||
|
||||
func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
|
||||
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
|
||||
newbit := h.noldbuckets()
|
||||
if !evacuated(b) {
|
||||
// TODO: reuse overflow buckets instead of using new ones, if there
|
||||
// is no iterator using the old buckets. (If !oldIterator.)
|
||||
|
||||
// xy contains the x and y (low and high) evacuation destinations.
|
||||
var xy [2]evacDst
|
||||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.SwissMapBucketCount*4)
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
// Otherwise GC can see bad pointers.
|
||||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.SwissMapBucketCount*4)
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.SwissMapBucketCount*4)
|
||||
for i := 0; i < abi.SwissMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
continue
|
||||
}
|
||||
if top < minTopHash {
|
||||
throw("bad map state")
|
||||
}
|
||||
var useY uint8
|
||||
if !h.sameSizeGrow() {
|
||||
// Compute hash to make our evacuation decision (whether we need
|
||||
// to send this key/elem to bucket x or bucket y).
|
||||
hash := t.Hasher(k, uintptr(h.hash0))
|
||||
if hash&newbit != 0 {
|
||||
useY = 1
|
||||
}
|
||||
}
|
||||
|
||||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.SwissMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.SwissMapBucketCount*4)
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.SwissMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
|
||||
// Copy key.
|
||||
if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
|
||||
// Write with a write barrier.
|
||||
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
|
||||
} else {
|
||||
*(*uint32)(dst.k) = *(*uint32)(k)
|
||||
}
|
||||
|
||||
typedmemmove(t.Elem, dst.e, e)
|
||||
dst.i++
|
||||
// These updates might push these pointers past the end of the
|
||||
// key or elem arrays. That's ok, as we have the overflow pointer
|
||||
// at the end of the bucket to protect against pointing past the
|
||||
// end of the bucket.
|
||||
dst.k = add(dst.k, 4)
|
||||
dst.e = add(dst.e, uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
// Unlink the overflow buckets & clear key/elem to help GC.
|
||||
if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
|
||||
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
|
||||
// Preserve b.tophash because the evacuation
|
||||
// state is maintained there.
|
||||
ptr := add(b, dataOffset)
|
||||
n := uintptr(t.BucketSize) - dataOffset
|
||||
memclrHasPointers(ptr, n)
|
||||
}
|
||||
}
|
||||
|
||||
if oldbucket == h.nevacuate {
|
||||
advanceEvacuationMark(h, t, newbit)
|
||||
}
|
||||
throw("mapdelete_fast32 unimplemented")
|
||||
}
|
||||
|
@ -7,466 +7,29 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"internal/goarch"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
|
||||
}
|
||||
if h == nil || h.count == 0 {
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
}
|
||||
var b *bmap
|
||||
if h.B == 0 {
|
||||
// One-bucket table. No need to hash.
|
||||
b = (*bmap)(h.buckets)
|
||||
} else {
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
m := bucketMask(h.B)
|
||||
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
|
||||
if c := h.oldbuckets; c != nil {
|
||||
if !h.sameSizeGrow() {
|
||||
// There used to be half as many buckets; mask down one more power of two.
|
||||
m >>= 1
|
||||
}
|
||||
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
|
||||
if !evacuated(oldb) {
|
||||
b = oldb
|
||||
}
|
||||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 8) {
|
||||
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
throw("mapaccess1_fast64 unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
|
||||
}
|
||||
if h == nil || h.count == 0 {
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
}
|
||||
var b *bmap
|
||||
if h.B == 0 {
|
||||
// One-bucket table. No need to hash.
|
||||
b = (*bmap)(h.buckets)
|
||||
} else {
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
m := bucketMask(h.B)
|
||||
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
|
||||
if c := h.oldbuckets; c != nil {
|
||||
if !h.sameSizeGrow() {
|
||||
// There used to be half as many buckets; mask down one more power of two.
|
||||
m >>= 1
|
||||
}
|
||||
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
|
||||
if !evacuated(oldb) {
|
||||
b = oldb
|
||||
}
|
||||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 8) {
|
||||
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*8+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
throw("mapaccess2_fast64 unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
||||
if h == nil {
|
||||
panic(plainError("assignment to entry in nil map"))
|
||||
}
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
|
||||
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
||||
h.flags ^= hashWriting
|
||||
|
||||
if h.buckets == nil {
|
||||
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
|
||||
}
|
||||
|
||||
again:
|
||||
bucket := hash & bucketMask(h.B)
|
||||
if h.growing() {
|
||||
growWork_fast64(t, h, bucket)
|
||||
}
|
||||
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
||||
|
||||
var insertb *bmap
|
||||
var inserti uintptr
|
||||
var insertk unsafe.Pointer
|
||||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
insertb = b
|
||||
inserti = i
|
||||
}
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
}
|
||||
continue
|
||||
}
|
||||
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
||||
if k != key {
|
||||
continue
|
||||
}
|
||||
insertb = b
|
||||
inserti = i
|
||||
goto done
|
||||
}
|
||||
ovf := b.overflow(t)
|
||||
if ovf == nil {
|
||||
break
|
||||
}
|
||||
b = ovf
|
||||
}
|
||||
|
||||
// Did not find mapping for key. Allocate new cell & add entry.
|
||||
|
||||
// If we hit the max load factor or we have too many overflow buckets,
|
||||
// and we're not already in the middle of growing, start growing.
|
||||
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
||||
hashGrow(t, h)
|
||||
goto again // Growing the table invalidates everything, so try again
|
||||
}
|
||||
|
||||
if insertb == nil {
|
||||
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
|
||||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
||||
// store new key at insert position
|
||||
*(*uint64)(insertk) = key
|
||||
|
||||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
h.flags &^= hashWriting
|
||||
return elem
|
||||
throw("mapassign_fast64 unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
||||
if h == nil {
|
||||
panic(plainError("assignment to entry in nil map"))
|
||||
}
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
|
||||
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
||||
h.flags ^= hashWriting
|
||||
|
||||
if h.buckets == nil {
|
||||
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
|
||||
}
|
||||
|
||||
again:
|
||||
bucket := hash & bucketMask(h.B)
|
||||
if h.growing() {
|
||||
growWork_fast64(t, h, bucket)
|
||||
}
|
||||
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
||||
|
||||
var insertb *bmap
|
||||
var inserti uintptr
|
||||
var insertk unsafe.Pointer
|
||||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
insertb = b
|
||||
inserti = i
|
||||
}
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
}
|
||||
continue
|
||||
}
|
||||
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
||||
if k != key {
|
||||
continue
|
||||
}
|
||||
insertb = b
|
||||
inserti = i
|
||||
goto done
|
||||
}
|
||||
ovf := b.overflow(t)
|
||||
if ovf == nil {
|
||||
break
|
||||
}
|
||||
b = ovf
|
||||
}
|
||||
|
||||
// Did not find mapping for key. Allocate new cell & add entry.
|
||||
|
||||
// If we hit the max load factor or we have too many overflow buckets,
|
||||
// and we're not already in the middle of growing, start growing.
|
||||
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
||||
hashGrow(t, h)
|
||||
goto again // Growing the table invalidates everything, so try again
|
||||
}
|
||||
|
||||
if insertb == nil {
|
||||
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
|
||||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
||||
// store new key at insert position
|
||||
*(*unsafe.Pointer)(insertk) = key
|
||||
|
||||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
h.flags &^= hashWriting
|
||||
return elem
|
||||
throw("mapassign_fast64ptr unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
|
||||
}
|
||||
if h == nil || h.count == 0 {
|
||||
return
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
||||
|
||||
// Set hashWriting after calling t.hasher for consistency with mapdelete
|
||||
h.flags ^= hashWriting
|
||||
|
||||
bucket := hash & bucketMask(h.B)
|
||||
if h.growing() {
|
||||
growWork_fast64(t, h, bucket)
|
||||
}
|
||||
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
||||
bOrig := b
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 8) {
|
||||
if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
// Only clear key if there are pointers in it.
|
||||
if t.Key.Pointers() {
|
||||
if goarch.PtrSize == 8 {
|
||||
*(*unsafe.Pointer)(k) = nil
|
||||
} else {
|
||||
// There are three ways to squeeze at one or more 32 bit pointers into 64 bits.
|
||||
// Just call memclrHasPointers instead of trying to handle all cases here.
|
||||
memclrHasPointers(k, 8)
|
||||
}
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
if t.Elem.Pointers() {
|
||||
memclrHasPointers(e, t.Elem.Size_)
|
||||
} else {
|
||||
memclrNoHeapPointers(e, t.Elem.Size_)
|
||||
}
|
||||
b.tophash[i] = emptyOne
|
||||
// If the bucket now ends in a bunch of emptyOne states,
|
||||
// change those to emptyRest states.
|
||||
if i == abi.SwissMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
} else {
|
||||
if b.tophash[i+1] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
}
|
||||
for {
|
||||
b.tophash[i] = emptyRest
|
||||
if i == 0 {
|
||||
if b == bOrig {
|
||||
break // beginning of initial bucket, we're done.
|
||||
}
|
||||
// Find previous bucket, continue at its last entry.
|
||||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.SwissMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
if b.tophash[i] != emptyOne {
|
||||
break
|
||||
}
|
||||
}
|
||||
notLast:
|
||||
h.count--
|
||||
// Reset the hash seed to make it more difficult for attackers to
|
||||
// repeatedly trigger hash collisions. See issue 25237.
|
||||
if h.count == 0 {
|
||||
h.hash0 = uint32(rand())
|
||||
}
|
||||
break search
|
||||
}
|
||||
}
|
||||
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
h.flags &^= hashWriting
|
||||
}
|
||||
|
||||
func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
|
||||
// make sure we evacuate the oldbucket corresponding
|
||||
// to the bucket we're about to use
|
||||
evacuate_fast64(t, h, bucket&h.oldbucketmask())
|
||||
|
||||
// evacuate one more oldbucket to make progress on growing
|
||||
if h.growing() {
|
||||
evacuate_fast64(t, h, h.nevacuate)
|
||||
}
|
||||
}
|
||||
|
||||
func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
|
||||
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
|
||||
newbit := h.noldbuckets()
|
||||
if !evacuated(b) {
|
||||
// TODO: reuse overflow buckets instead of using new ones, if there
|
||||
// is no iterator using the old buckets. (If !oldIterator.)
|
||||
|
||||
// xy contains the x and y (low and high) evacuation destinations.
|
||||
var xy [2]evacDst
|
||||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.SwissMapBucketCount*8)
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
// Otherwise GC can see bad pointers.
|
||||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.SwissMapBucketCount*8)
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.SwissMapBucketCount*8)
|
||||
for i := 0; i < abi.SwissMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
continue
|
||||
}
|
||||
if top < minTopHash {
|
||||
throw("bad map state")
|
||||
}
|
||||
var useY uint8
|
||||
if !h.sameSizeGrow() {
|
||||
// Compute hash to make our evacuation decision (whether we need
|
||||
// to send this key/elem to bucket x or bucket y).
|
||||
hash := t.Hasher(k, uintptr(h.hash0))
|
||||
if hash&newbit != 0 {
|
||||
useY = 1
|
||||
}
|
||||
}
|
||||
|
||||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.SwissMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.SwissMapBucketCount*8)
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.SwissMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
|
||||
// Copy key.
|
||||
if t.Key.Pointers() && writeBarrier.enabled {
|
||||
if goarch.PtrSize == 8 {
|
||||
// Write with a write barrier.
|
||||
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
|
||||
} else {
|
||||
// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
|
||||
// Give up and call typedmemmove.
|
||||
typedmemmove(t.Key, dst.k, k)
|
||||
}
|
||||
} else {
|
||||
*(*uint64)(dst.k) = *(*uint64)(k)
|
||||
}
|
||||
|
||||
typedmemmove(t.Elem, dst.e, e)
|
||||
dst.i++
|
||||
// These updates might push these pointers past the end of the
|
||||
// key or elem arrays. That's ok, as we have the overflow pointer
|
||||
// at the end of the bucket to protect against pointing past the
|
||||
// end of the bucket.
|
||||
dst.k = add(dst.k, 8)
|
||||
dst.e = add(dst.e, uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
// Unlink the overflow buckets & clear key/elem to help GC.
|
||||
if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
|
||||
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
|
||||
// Preserve b.tophash because the evacuation
|
||||
// state is maintained there.
|
||||
ptr := add(b, dataOffset)
|
||||
n := uintptr(t.BucketSize) - dataOffset
|
||||
memclrHasPointers(ptr, n)
|
||||
}
|
||||
}
|
||||
|
||||
if oldbucket == h.nevacuate {
|
||||
advanceEvacuationMark(h, t, newbit)
|
||||
}
|
||||
throw("mapdelete_fast64 unimplemented")
|
||||
}
|
||||
|
@ -7,481 +7,24 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"internal/goarch"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
|
||||
}
|
||||
if h == nil || h.count == 0 {
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
}
|
||||
key := stringStructOf(&ky)
|
||||
if h.B == 0 {
|
||||
// One-bucket table.
|
||||
b := (*bmap)(h.buckets)
|
||||
if key.len < 32 {
|
||||
// short key, doing lots of comparisons is ok
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
// long key, try not to do more comparisons than necessary
|
||||
keymaybe := uintptr(abi.SwissMapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
if k.str == key.str {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
}
|
||||
// check first 4 bytes
|
||||
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
||||
continue
|
||||
}
|
||||
// check last 4 bytes
|
||||
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
||||
continue
|
||||
}
|
||||
if keymaybe != abi.SwissMapBucketCount {
|
||||
// Two keys are potential matches. Use hash to distinguish them.
|
||||
goto dohash
|
||||
}
|
||||
keymaybe = i
|
||||
}
|
||||
if keymaybe != abi.SwissMapBucketCount {
|
||||
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
|
||||
if memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
dohash:
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
||||
m := bucketMask(h.B)
|
||||
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
|
||||
if c := h.oldbuckets; c != nil {
|
||||
if !h.sameSizeGrow() {
|
||||
// There used to be half as many buckets; mask down one more power of two.
|
||||
m >>= 1
|
||||
}
|
||||
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
|
||||
if !evacuated(oldb) {
|
||||
b = oldb
|
||||
}
|
||||
}
|
||||
top := tophash(hash)
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || b.tophash[i] != top {
|
||||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
throw("mapaccess1_faststr unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
|
||||
}
|
||||
if h == nil || h.count == 0 {
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map read and map write")
|
||||
}
|
||||
key := stringStructOf(&ky)
|
||||
if h.B == 0 {
|
||||
// One-bucket table.
|
||||
b := (*bmap)(h.buckets)
|
||||
if key.len < 32 {
|
||||
// short key, doing lots of comparisons is ok
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
}
|
||||
// long key, try not to do more comparisons than necessary
|
||||
keymaybe := uintptr(abi.SwissMapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
if k.str == key.str {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
// check first 4 bytes
|
||||
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
||||
continue
|
||||
}
|
||||
// check last 4 bytes
|
||||
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
||||
continue
|
||||
}
|
||||
if keymaybe != abi.SwissMapBucketCount {
|
||||
// Two keys are potential matches. Use hash to distinguish them.
|
||||
goto dohash
|
||||
}
|
||||
keymaybe = i
|
||||
}
|
||||
if keymaybe != abi.SwissMapBucketCount {
|
||||
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
|
||||
if memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
}
|
||||
dohash:
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
||||
m := bucketMask(h.B)
|
||||
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
|
||||
if c := h.oldbuckets; c != nil {
|
||||
if !h.sameSizeGrow() {
|
||||
// There used to be half as many buckets; mask down one more power of two.
|
||||
m >>= 1
|
||||
}
|
||||
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
|
||||
if !evacuated(oldb) {
|
||||
b = oldb
|
||||
}
|
||||
}
|
||||
top := tophash(hash)
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || b.tophash[i] != top {
|
||||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
throw("mapaccess2_faststr unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
|
||||
if h == nil {
|
||||
panic(plainError("assignment to entry in nil map"))
|
||||
}
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
key := stringStructOf(&s)
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
|
||||
|
||||
// Set hashWriting after calling t.hasher for consistency with mapassign.
|
||||
h.flags ^= hashWriting
|
||||
|
||||
if h.buckets == nil {
|
||||
h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
|
||||
}
|
||||
|
||||
again:
|
||||
bucket := hash & bucketMask(h.B)
|
||||
if h.growing() {
|
||||
growWork_faststr(t, h, bucket)
|
||||
}
|
||||
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
||||
top := tophash(hash)
|
||||
|
||||
var insertb *bmap
|
||||
var inserti uintptr
|
||||
var insertk unsafe.Pointer
|
||||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if isEmpty(b.tophash[i]) && insertb == nil {
|
||||
insertb = b
|
||||
inserti = i
|
||||
}
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
}
|
||||
continue
|
||||
}
|
||||
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
|
||||
if k.len != key.len {
|
||||
continue
|
||||
}
|
||||
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
|
||||
continue
|
||||
}
|
||||
// already have a mapping for key. Update it.
|
||||
inserti = i
|
||||
insertb = b
|
||||
// Overwrite existing key, so it can be garbage collected.
|
||||
// The size is already guaranteed to be set correctly.
|
||||
k.str = key.str
|
||||
goto done
|
||||
}
|
||||
ovf := b.overflow(t)
|
||||
if ovf == nil {
|
||||
break
|
||||
}
|
||||
b = ovf
|
||||
}
|
||||
|
||||
// Did not find mapping for key. Allocate new cell & add entry.
|
||||
|
||||
// If we hit the max load factor or we have too many overflow buckets,
|
||||
// and we're not already in the middle of growing, start growing.
|
||||
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
||||
hashGrow(t, h)
|
||||
goto again // Growing the table invalidates everything, so try again
|
||||
}
|
||||
|
||||
if insertb == nil {
|
||||
// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
|
||||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = top // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
|
||||
// store new key at insert position
|
||||
*((*stringStruct)(insertk)) = *key
|
||||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
h.flags &^= hashWriting
|
||||
return elem
|
||||
throw("mapassign_faststr unimplemented")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
|
||||
}
|
||||
if h == nil || h.count == 0 {
|
||||
return
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
||||
key := stringStructOf(&ky)
|
||||
hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
||||
|
||||
// Set hashWriting after calling t.hasher for consistency with mapdelete
|
||||
h.flags ^= hashWriting
|
||||
|
||||
bucket := hash & bucketMask(h.B)
|
||||
if h.growing() {
|
||||
growWork_faststr(t, h, bucket)
|
||||
}
|
||||
b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
|
||||
bOrig := b
|
||||
top := tophash(hash)
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || b.tophash[i] != top {
|
||||
continue
|
||||
}
|
||||
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
|
||||
continue
|
||||
}
|
||||
// Clear key's pointer.
|
||||
k.str = nil
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
if t.Elem.Pointers() {
|
||||
memclrHasPointers(e, t.Elem.Size_)
|
||||
} else {
|
||||
memclrNoHeapPointers(e, t.Elem.Size_)
|
||||
}
|
||||
b.tophash[i] = emptyOne
|
||||
// If the bucket now ends in a bunch of emptyOne states,
|
||||
// change those to emptyRest states.
|
||||
if i == abi.SwissMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
} else {
|
||||
if b.tophash[i+1] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
}
|
||||
for {
|
||||
b.tophash[i] = emptyRest
|
||||
if i == 0 {
|
||||
if b == bOrig {
|
||||
break // beginning of initial bucket, we're done.
|
||||
}
|
||||
// Find previous bucket, continue at its last entry.
|
||||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.SwissMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
if b.tophash[i] != emptyOne {
|
||||
break
|
||||
}
|
||||
}
|
||||
notLast:
|
||||
h.count--
|
||||
// Reset the hash seed to make it more difficult for attackers to
|
||||
// repeatedly trigger hash collisions. See issue 25237.
|
||||
if h.count == 0 {
|
||||
h.hash0 = uint32(rand())
|
||||
}
|
||||
break search
|
||||
}
|
||||
}
|
||||
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
h.flags &^= hashWriting
|
||||
}
|
||||
|
||||
func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
|
||||
// make sure we evacuate the oldbucket corresponding
|
||||
// to the bucket we're about to use
|
||||
evacuate_faststr(t, h, bucket&h.oldbucketmask())
|
||||
|
||||
// evacuate one more oldbucket to make progress on growing
|
||||
if h.growing() {
|
||||
evacuate_faststr(t, h, h.nevacuate)
|
||||
}
|
||||
}
|
||||
|
||||
func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
|
||||
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
|
||||
newbit := h.noldbuckets()
|
||||
if !evacuated(b) {
|
||||
// TODO: reuse overflow buckets instead of using new ones, if there
|
||||
// is no iterator using the old buckets. (If !oldIterator.)
|
||||
|
||||
// xy contains the x and y (low and high) evacuation destinations.
|
||||
var xy [2]evacDst
|
||||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.SwissMapBucketCount*2*goarch.PtrSize)
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
// Otherwise GC can see bad pointers.
|
||||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.SwissMapBucketCount*2*goarch.PtrSize)
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.SwissMapBucketCount*2*goarch.PtrSize)
|
||||
for i := 0; i < abi.SwissMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
continue
|
||||
}
|
||||
if top < minTopHash {
|
||||
throw("bad map state")
|
||||
}
|
||||
var useY uint8
|
||||
if !h.sameSizeGrow() {
|
||||
// Compute hash to make our evacuation decision (whether we need
|
||||
// to send this key/elem to bucket x or bucket y).
|
||||
hash := t.Hasher(k, uintptr(h.hash0))
|
||||
if hash&newbit != 0 {
|
||||
useY = 1
|
||||
}
|
||||
}
|
||||
|
||||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.SwissMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.SwissMapBucketCount*2*goarch.PtrSize)
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.SwissMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
|
||||
// Copy key.
|
||||
*(*string)(dst.k) = *(*string)(k)
|
||||
|
||||
typedmemmove(t.Elem, dst.e, e)
|
||||
dst.i++
|
||||
// These updates might push these pointers past the end of the
|
||||
// key or elem arrays. That's ok, as we have the overflow pointer
|
||||
// at the end of the bucket to protect against pointing past the
|
||||
// end of the bucket.
|
||||
dst.k = add(dst.k, 2*goarch.PtrSize)
|
||||
dst.e = add(dst.e, uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
// Unlink the overflow buckets & clear key/elem to help GC.
|
||||
if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
|
||||
b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
|
||||
// Preserve b.tophash because the evacuation
|
||||
// state is maintained there.
|
||||
ptr := add(b, dataOffset)
|
||||
n := uintptr(t.BucketSize) - dataOffset
|
||||
memclrHasPointers(ptr, n)
|
||||
}
|
||||
}
|
||||
|
||||
if oldbucket == h.nevacuate {
|
||||
advanceEvacuationMark(h, t, newbit)
|
||||
}
|
||||
throw("mapdelete_faststr unimplemented")
|
||||
}
|
||||
|
@ -4,6 +4,11 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// TODO(#54766): Temporarily disable for swissmap, which have fast variants
|
||||
// disabled. This test expects fast variants.
|
||||
//
|
||||
//go:build !goexperiment.swissmap
|
||||
|
||||
package codegen
|
||||
|
||||
// This file contains code generation tests related to the handling of
|
||||
|
20
test/live.go
20
test/live.go
@ -277,26 +277,6 @@ func f17a(p *byte) { // ERROR "live at entry to f17a: p$"
|
||||
m2[x2] = p // ERROR "live at call to mapassign: p$"
|
||||
}
|
||||
|
||||
func f17b(p *byte) { // ERROR "live at entry to f17b: p$"
|
||||
// key temporary
|
||||
if b {
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
}
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
}
|
||||
|
||||
func f17c() {
|
||||
// key and value temporaries
|
||||
if b {
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
}
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
}
|
||||
|
||||
func f17d() *byte
|
||||
|
||||
func g18() [2]string
|
||||
|
||||
func f18() {
|
||||
|
42
test/live_noswiss.go
Normal file
42
test/live_noswiss.go
Normal file
@ -0,0 +1,42 @@
|
||||
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
|
||||
|
||||
//go:build !goexperiment.swissmap && !goexperiment.regabiargs
|
||||
|
||||
// For register ABI, liveness info changes slightly. See live_regabi.go.
|
||||
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// non-swissmap-specific tests for live.go
|
||||
// TODO(#54766): temporary while fast variants are disabled.
|
||||
|
||||
package main
|
||||
|
||||
// str is used to ensure that a temp is required for runtime calls below.
|
||||
func str() string
|
||||
|
||||
var b bool
|
||||
var m2 map[[2]string]*byte
|
||||
var m2s map[string]*byte
|
||||
var x2 [2]string
|
||||
|
||||
func f17b(p *byte) { // ERROR "live at entry to f17b: p$"
|
||||
// key temporary
|
||||
if b {
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
}
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
}
|
||||
|
||||
func f17c() {
|
||||
// key and value temporaries
|
||||
if b {
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
}
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
}
|
||||
|
||||
func f17d() *byte
|
@ -261,7 +261,6 @@ func f16() {
|
||||
delete(mi, iface())
|
||||
}
|
||||
|
||||
var m2s map[string]*byte
|
||||
var m2 map[[2]string]*byte
|
||||
var x2 [2]string
|
||||
var bp *byte
|
||||
@ -274,26 +273,6 @@ func f17a(p *byte) { // ERROR "live at entry to f17a: p$"
|
||||
m2[x2] = p // ERROR "live at call to mapassign: p$"
|
||||
}
|
||||
|
||||
func f17b(p *byte) { // ERROR "live at entry to f17b: p$"
|
||||
// key temporary
|
||||
if b {
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
}
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
}
|
||||
|
||||
func f17c() {
|
||||
// key and value temporaries
|
||||
if b {
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
}
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
}
|
||||
|
||||
func f17d() *byte
|
||||
|
||||
func g18() [2]string
|
||||
|
||||
func f18() {
|
||||
|
38
test/live_regabi_noswiss.go
Normal file
38
test/live_regabi_noswiss.go
Normal file
@ -0,0 +1,38 @@
|
||||
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
|
||||
|
||||
//go:build !goexperiment.swissmap && ((amd64 && goexperiment.regabiargs) || (arm64 && goexperiment.regabiargs))
|
||||
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// non-swissmap-specific tests for live_regabi.go
|
||||
// TODO(#54766): temporary while fast variants are disabled.
|
||||
|
||||
package main
|
||||
|
||||
func str() string
|
||||
|
||||
var b bool
|
||||
var m2s map[string]*byte
|
||||
|
||||
func f17b(p *byte) { // ERROR "live at entry to f17b: p$"
|
||||
// key temporary
|
||||
if b {
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
|
||||
}
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
|
||||
}
|
||||
|
||||
func f17c() {
|
||||
// key and value temporaries
|
||||
if b {
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
}
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
|
||||
}
|
||||
|
||||
func f17d() *byte
|
40
test/live_regabi_swiss.go
Normal file
40
test/live_regabi_swiss.go
Normal file
@ -0,0 +1,40 @@
|
||||
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
|
||||
|
||||
//go:build goexperiment.swissmap && ((amd64 && goexperiment.regabiargs) || (arm64 && goexperiment.regabiargs))
|
||||
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// swissmap-specific tests for live_regabi.go
|
||||
// TODO(#54766): temporary while fast variants are disabled.
|
||||
|
||||
package main
|
||||
|
||||
func str() string
|
||||
|
||||
var b bool
|
||||
var m2s map[string]*byte
|
||||
|
||||
func f17b(p *byte) { // ERROR "live at entry to f17b: p$"
|
||||
// key temporary
|
||||
if b {
|
||||
// TODO(go.dev/issue/54766): There is an extra autotmp here vs old maps.
|
||||
m2s[str()] = p // ERROR "live at call to mapassign: p$" "live at call to str: p$" "stack object .autotmp_1 string$" "stack object .autotmp_2 string$"
|
||||
|
||||
}
|
||||
m2s[str()] = p // ERROR "live at call to mapassign: p$" "live at call to str: p$"
|
||||
m2s[str()] = p // ERROR "live at call to mapassign: p$" "live at call to str: p$"
|
||||
}
|
||||
|
||||
func f17c() {
|
||||
// key and value temporaries
|
||||
if b {
|
||||
// TODO(go.dev/issue/54766): There is an extra autotmp here vs old maps.
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign: .autotmp_[0-9]+$" "stack object .autotmp_0 string$" "stack object .autotmp_1 string$"
|
||||
}
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign: .autotmp_[0-9]+$"
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign: .autotmp_[0-9]+$"
|
||||
}
|
||||
|
||||
func f17d() *byte
|
44
test/live_swiss.go
Normal file
44
test/live_swiss.go
Normal file
@ -0,0 +1,44 @@
|
||||
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
|
||||
|
||||
//go:build goexperiment.swissmap && !goexperiment.regabiargs
|
||||
|
||||
// For register ABI, liveness info changes slightly. See live_regabi.go.
|
||||
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// swissmap-specific tests for live.go
|
||||
// TODO(#54766): temporary while fast variants are disabled.
|
||||
|
||||
package main
|
||||
|
||||
// str is used to ensure that a temp is required for runtime calls below.
|
||||
func str() string
|
||||
|
||||
var b bool
|
||||
var m2 map[[2]string]*byte
|
||||
var m2s map[string]*byte
|
||||
var x2 [2]string
|
||||
|
||||
func f17b(p *byte) { // ERROR "live at entry to f17b: p$"
|
||||
// key temporary
|
||||
if b {
|
||||
// TODO(go.dev/issue/54766): There is an extra autotmp here vs old maps.
|
||||
m2s[str()] = p // ERROR "live at call to mapassign: p$" "live at call to str: p$" "stack object .autotmp_[0-9]+ string$"
|
||||
}
|
||||
m2s[str()] = p // ERROR "live at call to mapassign: p$" "live at call to str: p$"
|
||||
m2s[str()] = p // ERROR "live at call to mapassign: p$" "live at call to str: p$"
|
||||
}
|
||||
|
||||
func f17c() {
|
||||
// key and value temporaries
|
||||
if b {
|
||||
// TODO(go.dev/issue/54766): There is an extra autotmp here vs old maps.
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ string$"
|
||||
}
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign: .autotmp_[0-9]+$"
|
||||
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign: .autotmp_[0-9]+$"
|
||||
}
|
||||
|
||||
func f17d() *byte
|
Loading…
Reference in New Issue
Block a user