[dev.typeparams] all: merge dev.regabi (cb05a0a) into dev.typeparams

Merge List:

+ 2021-01-05 cb05a0aa6a [dev.regabi] cmd/compile: remove toolstash scaffolding
+ 2021-01-05 9821838832 [dev.regabi] cmd/compile: remove CaptureVars
+ 2021-01-05 fd43831f44 [dev.regabi] cmd/compile: reimplement capture analysis
+ 2021-01-05 fb69c67cad [dev.regabi] test: enable finalizer tests on !amd64
+ 2021-01-05 81f4f0e912 [dev.regabi] cmd/compile: remove race-y check in Name.Canonical
+ 2021-01-05 4a9d9adea4 [dev.regabi] cmd/compile: remove initname function

Change-Id: I519f349ff62b6c9bc5db2a0d34feef4b5d42cbae
This commit is contained in:
Matthew Dempsky 2021-01-07 11:37:41 -08:00
commit 5b9152de57
10 changed files with 153 additions and 171 deletions

View File

@ -88,12 +88,20 @@ import (
// A batch holds escape analysis state that's shared across an entire
// batch of functions being analyzed at once.
type batch struct {
allLocs []*location
allLocs []*location
closures []closure
heapLoc location
blankLoc location
}
// A closure holds a closure expression and its spill hole (i.e.,
// where the hole representing storing into its closure record).
type closure struct {
k hole
clo *ir.ClosureExpr
}
// An escape holds state specific to a single function being analyzed
// within a batch.
type escape struct {
@ -145,6 +153,10 @@ type location struct {
// paramEsc records the represented parameter's leak set.
paramEsc leaks
captured bool // has a closure captured this variable?
reassigned bool // has this variable been reassigned?
addrtaken bool // has this variable's address been taken?
}
// An edge represents an assignment edge between two Go variables.
@ -209,6 +221,21 @@ func Batch(fns []*ir.Func, recursive bool) {
}
}
// We've walked the function bodies, so we've seen everywhere a
// variable might be reassigned or have it's address taken. Now we
// can decide whether closures should capture their free variables
// by value or reference.
for _, closure := range b.closures {
b.flowClosure(closure.k, closure.clo)
}
b.closures = nil
for _, loc := range b.allLocs {
if why := HeapAllocReason(loc.n); why != "" {
b.flow(b.heapHole().addr(loc.n, why), loc)
}
}
b.walkAll()
b.finish(fns)
}
@ -270,6 +297,37 @@ func (b *batch) walkFunc(fn *ir.Func) {
}
}
func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) {
for _, cv := range clo.Func.ClosureVars {
n := cv.Canonical()
loc := b.oldLoc(cv)
if !loc.captured {
base.FatalfAt(cv.Pos(), "closure variable never captured: %v", cv)
}
// Capture by value for variables <= 128 bytes that are never reassigned.
n.SetByval(!loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128)
if !n.Byval() {
n.SetAddrtaken(true)
}
if base.Flag.LowerM > 1 {
how := "ref"
if n.Byval() {
how = "value"
}
base.WarnfAt(n.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", n.Curfn, how, n, loc.addrtaken, loc.reassigned, n.Type().Size())
}
// Flow captured variables to closure.
k := k
if !cv.Byval() {
k = k.addr(cv, "reference")
}
b.flow(k.note(cv, "captured by a closure"), loc)
}
}
// Below we implement the methods for walking the AST and recording
// data flow edges. Note that because a sub-expression might have
// side-effects, it's important to always visit the entire AST.
@ -308,7 +366,7 @@ func (e *escape) stmt(n ir.Node) {
}()
if base.Flag.LowerM > 2 {
fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, funcSym(e.curfn), n)
fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
}
e.stmts(n.Init())
@ -380,6 +438,7 @@ func (e *escape) stmt(n ir.Node) {
} else {
e.flow(ks[1].deref(n, "range-deref"), tmp)
}
e.reassigned(ks, n)
e.block(n.Body)
e.loopDepth--
@ -447,7 +506,9 @@ func (e *escape) stmt(n ir.Node) {
case ir.OAS2FUNC:
n := n.(*ir.AssignListStmt)
e.stmts(n.Rhs[0].Init())
e.call(e.addrs(n.Lhs), n.Rhs[0], nil)
ks := e.addrs(n.Lhs)
e.call(ks, n.Rhs[0], nil)
e.reassigned(ks, n)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
results := e.curfn.Type().Results().FieldSlice()
@ -507,7 +568,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
// nop
} else if k.derefs >= 0 && !n.Type().HasPointers() {
k = e.discardHole()
k.dst = &e.blankLoc
}
switch n.Op() {
@ -691,20 +752,23 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
k = e.spill(k, n)
e.closures = append(e.closures, closure{k, n})
if fn := n.Func; fn.IsHiddenClosure() {
e.walkFunc(fn)
}
for _, cv := range fn.ClosureVars {
if loc := e.oldLoc(cv); !loc.captured {
loc.captured = true
// Link addresses of captured variables to closure.
k = e.spill(k, n)
for _, v := range n.Func.ClosureVars {
k := k
if !v.Byval() {
k = k.addr(v, "reference")
// Ignore reassignments to the variable in straightline code
// preceding the first capture by a closure.
if loc.loopDepth == e.loopDepth {
loc.reassigned = false
}
}
}
e.expr(k.note(n, "captured by a closure"), v.Defn)
e.walkFunc(fn)
}
case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
@ -728,6 +792,9 @@ func (e *escape) unsafeValue(k hole, n ir.Node) {
if n.Type().Kind() != types.TUINTPTR {
base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
if k.addrtaken {
base.Fatalf("unexpected addrtaken")
}
e.stmts(n.Init())
@ -828,33 +895,59 @@ func (e *escape) addrs(l ir.Nodes) []hole {
return ks
}
// reassigned marks the locations associated with the given holes as
// reassigned, unless the location represents a variable declared and
// assigned exactly once by where.
func (e *escape) reassigned(ks []hole, where ir.Node) {
if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
// Zero-value assignment for variable declared without an
// explicit initial value. Assume this is its initialization
// statement.
return
}
}
for _, k := range ks {
loc := k.dst
// Variables declared by range statements are assigned on every iteration.
if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
continue
}
loc.reassigned = true
}
}
// assignList evaluates the assignment dsts... = srcs....
func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
for i, dst := range dsts {
ks := e.addrs(dsts)
for i, k := range ks {
var src ir.Node
if i < len(srcs) {
src = srcs[i]
}
e.assign(dst, src, why, where)
}
}
// assign evaluates the assignment dst = src.
func (e *escape) assign(dst, src ir.Node, why string, where ir.Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
if ignore && base.Flag.LowerM != 0 {
base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", funcSym(e.curfn), where)
}
if dst := dsts[i]; dst != nil {
// Detect implicit conversion of uintptr to unsafe.Pointer when
// storing into reflect.{Slice,String}Header.
if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
continue
}
k := e.addr(dst)
if dst != nil && dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
} else {
if ignore {
k = e.discardHole()
// Filter out some no-op assignments for escape analysis.
if src != nil && isSelfAssign(dst, src) {
if base.Flag.LowerM != 0 {
base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
}
k = e.discardHole()
}
}
e.expr(k.note(where, why), src)
}
e.reassigned(ks, where)
}
func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
@ -1034,7 +1127,7 @@ func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
func (e *escape) inMutualBatch(fn *ir.Name) bool {
if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
if fn.Defn.Esc() == escFuncUnknown {
base.Fatalf("graph inconsistency")
base.Fatalf("graph inconsistency: %v", fn)
}
return true
}
@ -1049,6 +1142,11 @@ type hole struct {
derefs int // >= -1
notes *note
// addrtaken indicates whether this context is taking the address of
// the expression, independent of whether the address will actually
// be stored into a variable.
addrtaken bool
// uintptrEscapesHack indicates this context is evaluating an
// argument for a //go:uintptrescapes function.
uintptrEscapesHack bool
@ -1079,6 +1177,7 @@ func (k hole) shift(delta int) hole {
if k.derefs < -1 {
base.Fatalf("derefs underflow: %v", k.derefs)
}
k.addrtaken = delta < 0
return k
}
@ -1123,6 +1222,9 @@ func (e *escape) teeHole(ks ...hole) hole {
}
func (e *escape) dcl(n *ir.Name) hole {
if n.Curfn != e.curfn || n.IsClosureVar() {
base.Fatalf("bad declaration of %v", n)
}
loc := e.oldLoc(n)
loc.loopDepth = e.loopDepth
return loc.asHole()
@ -1176,10 +1278,6 @@ func (e *escape) newLoc(n ir.Node, transient bool) *location {
}
n.Opt = loc
}
if why := HeapAllocReason(n); why != "" {
e.flow(e.heapHole().addr(n, why), loc)
}
}
return loc
}
@ -1192,9 +1290,13 @@ func (l *location) asHole() hole {
return hole{dst: l}
}
func (e *escape) flow(k hole, src *location) {
func (b *batch) flow(k hole, src *location) {
if k.addrtaken {
src.addrtaken = true
}
dst := k.dst
if dst == &e.blankLoc {
if dst == &b.blankLoc {
return
}
if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
@ -1206,9 +1308,10 @@ func (e *escape) flow(k hole, src *location) {
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
if logopt.Enabled() {
logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
var e_curfn *ir.Func // TODO(mdempsky): Fix.
logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
}
}
@ -1220,8 +1323,8 @@ func (e *escape) flow(k hole, src *location) {
dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
}
func (e *escape) heapHole() hole { return e.heapLoc.asHole() }
func (e *escape) discardHole() hole { return e.blankLoc.asHole() }
func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
// walkAll computes the minimal dereferences between all pairs of
// locations.
@ -1686,14 +1789,6 @@ const (
escFuncTagged
)
// funcSym returns fn.Nname.Sym if no nils are encountered along the way.
func funcSym(fn *ir.Func) *types.Sym {
if fn == nil || fn.Nname == nil {
return nil
}
return fn.Sym()
}
// Mark labels that have no backjumps to them as not increasing e.loopdepth.
type labelState int
@ -1863,7 +1958,7 @@ func mayAffectMemory(n ir.Node) bool {
// HeapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
func HeapAllocReason(n ir.Node) string {
if n.Type() == nil {
if n == nil || n.Type() == nil {
return ""
}

View File

@ -232,22 +232,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
}
typecheck.IncrementalAddrtaken = true
// Decide how to capture closed variables.
// This needs to run before escape analysis,
// because variables captured by value do not escape.
base.Timer.Start("fe", "capturevars")
for _, n := range typecheck.Target.Decls {
if n.Op() == ir.ODCLFUNC {
n := n.(*ir.Func)
if n.OClosure != nil {
ir.CurFunc = n
typecheck.CaptureVars(n)
}
}
}
typecheck.CaptureVarsComplete = true
ir.CurFunc = nil
if base.Debug.TypecheckInl != 0 {
// Typecheck imported function bodies if Debug.l > 1,
// otherwise lazily when used or re-exported.

View File

@ -59,8 +59,7 @@ type Name struct {
// (results) are numbered starting at one, followed by function inputs
// (parameters), and then local variables. Vargen is used to distinguish
// local variables/params with the same name.
Vargen int32
Decldepth int32 // declaration loop depth, increased for every loop or label
Vargen int32
Ntype Ntype
Heapaddr *Name // temp holding heap address of param
@ -260,15 +259,13 @@ func (n *Name) Alias() bool { return n.flags&nameAlias != 0 }
func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) }
const (
nameCaptured = 1 << iota // is the variable captured by a closure
nameReadonly
nameReadonly = 1 << iota
nameByval // is the variable captured by value or by reference
nameNeedzero // if it contains pointers, needs to be zeroed on function entry
nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
nameUsed // for variable declared and not used error
nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn
nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
nameAssigned // is the variable ever assigned to
nameAddrtaken // address taken, even if not moved to heap
nameInlFormal // PAUTO created by inliner, derived from callee formal
nameInlLocal // PAUTO created by inliner, derived from callee local
@ -277,28 +274,24 @@ const (
nameAlias // is type name an alias
)
func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 }
func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 }
func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 }
func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 }
func (n *Name) Used() bool { return n.flags&nameUsed != 0 }
func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 }
func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 }
func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 }
func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 }
func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 }
func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 }
func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 }
func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) }
func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) }
func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) }
func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) }
func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) }
func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) }
func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) }
func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) }
func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) }
func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
@ -341,9 +334,6 @@ func (n *Name) SetVal(v constant.Value) {
func (n *Name) Canonical() *Name {
if n.IsClosureVar() {
n = n.Defn.(*Name)
if n.IsClosureVar() {
base.Fatalf("recursive closure variable: %v", n)
}
}
return n
}

View File

@ -21,7 +21,7 @@ func TestSizeof(t *testing.T) {
_64bit uintptr // size on 64bit platforms
}{
{Func{}, 184, 320},
{Name{}, 124, 216},
{Name{}, 120, 216},
}
for _, tt := range tests {

View File

@ -266,7 +266,7 @@ func autoexport(n *ir.Name, ctxt ir.Class) {
return
}
if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) {
if types.IsExported(n.Sym().Name) || n.Sym().Name == "init" {
Export(n)
}
if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
@ -422,10 +422,6 @@ func funcargs2(t *types.Type) {
}
}
func initname(s string) bool {
return s == "init"
}
var vargen int
func Temp(t *types.Type) *ir.Name {

View File

@ -100,32 +100,6 @@ func PartialCallType(n *ir.SelectorExpr) *types.Type {
return t
}
// CaptureVars is called in a separate phase after all typechecking is done.
// It decides whether each variable captured by a closure should be captured
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
func CaptureVars(fn *ir.Func) {
for _, v := range fn.ClosureVars {
outermost := v.Defn.(*ir.Name)
// out parameters will be assigned to implicitly upon return.
if outermost.Class != ir.PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && outermost.Type().Size() <= 128 {
outermost.SetByval(true)
} else {
outermost.SetAddrtaken(true)
}
if base.Flag.LowerM > 1 {
how := "ref"
if v.Byval() {
how = "value"
}
base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", v.Curfn, how, v, outermost.Addrtaken(), outermost.Assigned(), v.Type().Size())
}
}
}
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body.
func ImportedBody(fn *ir.Func) {
@ -198,9 +172,6 @@ func fnpkg(fn *ir.Name) *types.Pkg {
return fn.Sym().Pkg
}
// CaptureVarsComplete is set to true when the capturevars phase is done.
var CaptureVarsComplete bool
// closurename generates a new unique name for a closure within
// outerfunc.
func closurename(outerfunc *ir.Func) *types.Sym {
@ -336,22 +307,6 @@ func tcClosure(clo *ir.ClosureExpr, top int) {
return
}
for _, ln := range fn.ClosureVars {
n := ln.Defn
if !n.Name().Captured() {
n.Name().SetCaptured(true)
if n.Name().Decldepth == 0 {
base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n)
}
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
if n.Name().Decldepth == decldepth {
n.Name().SetAssigned(false)
}
}
}
fn.Nname.SetSym(closurename(ir.CurFunc))
ir.MarkFunc(fn.Nname)
Func(fn)
@ -363,10 +318,7 @@ func tcClosure(clo *ir.ClosureExpr, top int) {
if ir.CurFunc != nil && clo.Type() != nil {
oldfn := ir.CurFunc
ir.CurFunc = fn
olddd := decldepth
decldepth = 1
Stmts(fn.Body)
decldepth = olddd
ir.CurFunc = oldfn
}
@ -400,12 +352,6 @@ func tcFunc(n *ir.Func) {
defer tracePrint("typecheckfunc", n)(nil)
}
for _, ln := range n.Dcl {
if ln.Op() == ir.ONAME && (ln.Class == ir.PPARAM || ln.Class == ir.PPARAMOUT) {
ln.Decldepth = 1
}
}
n.Nname = AssignExpr(n.Nname).(*ir.Name)
t := n.Nname.Type()
if t == nil {

View File

@ -228,7 +228,6 @@ func plural(n int) string {
// tcFor typechecks an OFOR node.
func tcFor(n *ir.ForStmt) ir.Node {
Stmts(n.Init())
decldepth++
n.Cond = Expr(n.Cond)
n.Cond = DefaultLit(n.Cond, nil)
if n.Cond != nil {
@ -242,7 +241,6 @@ func tcFor(n *ir.ForStmt) ir.Node {
Stmts(n.Late)
}
Stmts(n.Body)
decldepth--
return n
}
@ -337,9 +335,7 @@ func tcRange(n *ir.RangeStmt) {
n.Value = AssignExpr(n.Value)
}
decldepth++
Stmts(n.Body)
decldepth--
}
// tcReturn typechecks an ORETURN node.

View File

@ -21,8 +21,6 @@ var InitTodoFunc = ir.NewFunc(base.Pos)
var inimport bool // set during import
var decldepth int32
var TypecheckAllowed bool
var (
@ -58,7 +56,6 @@ func Callee(n ir.Node) ir.Node {
func FuncBody(n *ir.Func) {
ir.CurFunc = n
decldepth = 1
errorsBefore := base.Errors()
Stmts(n.Body)
CheckUnused(n)
@ -506,9 +503,6 @@ func typecheck1(n ir.Node, top int) ir.Node {
case ir.ONAME:
n := n.(*ir.Name)
if n.Decldepth == 0 {
n.Decldepth = decldepth
}
if n.BuiltinOp != 0 {
if top&ctxCallee == 0 {
base.Errorf("use of builtin %v not in function call", n.Sym())
@ -839,7 +833,6 @@ func typecheck1(n ir.Node, top int) ir.Node {
return n
case ir.OLABEL:
decldepth++
if n.Sym().IsBlank() {
// Empty identifier is valid but useless.
// Eliminate now to simplify life later.
@ -1620,18 +1613,6 @@ func checkassign(stmt ir.Node, n ir.Node) {
return
}
// Variables declared in ORANGE are assigned on every iteration.
if !ir.DeclaredBy(n, stmt) || stmt.Op() == ir.ORANGE {
r := ir.OuterValue(n)
if r.Op() == ir.ONAME {
r := r.(*ir.Name)
r.SetAssigned(true)
if r.IsClosureVar() {
r.Defn.Name().SetAssigned(true)
}
}
}
if ir.IsAddressable(n) {
return
}

View File

@ -18,12 +18,8 @@ import (
var sink func()
func main() {
// Does not work on 32-bits due to partially conservative GC.
// Does not work with gccgo, due to partially conservative GC.
// Try to enable when we have fully precise GC.
if runtime.GOARCH != "amd64" {
return
}
// Likewise for gccgo.
if runtime.Compiler == "gccgo" {
return
}
@ -60,4 +56,3 @@ func main() {
panic("not all finalizers are called")
}
}

View File

@ -14,6 +14,7 @@ import (
)
const N = 10
var count int64
func run() error {
@ -31,10 +32,9 @@ func run() error {
}
func main() {
// Does not work on 32-bits, or with gccgo, due to partially
// conservative GC.
// Does not work with gccgo, due to partially conservative GC.
// Try to enable when we have fully precise GC.
if runtime.GOARCH != "amd64" || runtime.Compiler == "gccgo" {
if runtime.Compiler == "gccgo" {
return
}
count = N
@ -56,4 +56,3 @@ func main() {
panic("not all finalizers are called")
}
}