runtime: generate the lock ranking from a DAG description

Currently, the runtime lock rank graph is maintained manually in a
large set of arrays that give the partial order and a manual
topological sort of this partial order. Any changes to the rank graph
are difficult to reason about and hard to review, as well as likely to
cause merge conflicts. Furthermore, because the partial order is
manually maintained, it's not actually transitively closed (though
it's close), meaning there are many cases where rank a can be acquired
before b and b before c, but a cannot be acquired before c. While this
isn't technically wrong, it's very strange in the context of lock
ordering.

Replace all of this with a much more compact, readable, and
maintainable description of the rank graph written in the internal/dag
graph language. We statically generate the runtime structures from
this description, which has the advantage that the parser doesn't have
to run during runtime initialization and the structures can live in
static data where they can be accessed from any point during runtime
init.

The current description was automatically generated from the existing
partial order, combined with a transitive reduction. This ensures it's
correct, but it could use some manual messaging to call out the
logical layers and add some structure.

We do lose the ad hoc string names of the lock ranks in this
translation, which could mostly be derived from the rank constant
names, but not always. I may bring those back but in a more uniform
way.

We no longer need the tests in lockrank_test.go because they were
checking that we manually maintained the structures correctly.

Fixes #53789.

Change-Id: I54451d561b22e61150aff7e9b8602ba9737e1b9b
Reviewed-on: https://go-review.googlesource.com/c/go/+/418715
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Austin Clements 2022-07-19 17:31:52 -04:00
parent ddfd639408
commit 2b8a9a484f
8 changed files with 472 additions and 181 deletions

63
src/internal/dag/alg.go Normal file
View File

@ -0,0 +1,63 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dag
// Transpose reverses all edges in g.
func (g *Graph) Transpose() {
old := g.edges
g.edges = make(map[string]map[string]bool)
for _, n := range g.Nodes {
g.edges[n] = make(map[string]bool)
}
for from, tos := range old {
for to := range tos {
g.edges[to][from] = true
}
}
}
// Topo returns a topological sort of g. This function is deterministic.
func (g *Graph) Topo() []string {
topo := make([]string, 0, len(g.Nodes))
marks := make(map[string]bool)
var visit func(n string)
visit = func(n string) {
if marks[n] {
return
}
for _, to := range g.Edges(n) {
visit(to)
}
marks[n] = true
topo = append(topo, n)
}
for _, root := range g.Nodes {
visit(root)
}
for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
topo[i], topo[j] = topo[j], topo[i]
}
return topo
}
// TransitiveReduction removes edges from g that are transitively
// reachable. g must be transitively closed.
func (g *Graph) TransitiveReduction() {
// For i -> j -> k, if i -> k exists, delete it.
for _, i := range g.Nodes {
for _, j := range g.Nodes {
if g.HasEdge(i, j) {
for _, k := range g.Nodes {
if g.HasEdge(j, k) {
g.DelEdge(i, k)
}
}
}
}
}
}

View File

@ -0,0 +1,46 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dag
import (
"reflect"
"strings"
"testing"
)
func TestTranspose(t *testing.T) {
g := mustParse(t, diamond)
g.Transpose()
wantEdges(t, g, "a->b a->c a->d b->d c->d")
}
func TestTopo(t *testing.T) {
g := mustParse(t, diamond)
got := g.Topo()
// "d" is the root, so it's first.
//
// "c" and "b" could be in either order, but Topo is
// deterministic in reverse node definition order.
//
// "a" is a leaf.
wantNodes := strings.Fields("d c b a")
if !reflect.DeepEqual(wantNodes, got) {
t.Fatalf("want topo sort %v, got %v", wantNodes, got)
}
}
func TestTransitiveReduction(t *testing.T) {
t.Run("diamond", func(t *testing.T) {
g := mustParse(t, diamond)
g.TransitiveReduction()
wantEdges(t, g, "b->a c->a d->b d->c")
})
t.Run("chain", func(t *testing.T) {
const chain = `NONE < a < b < c < d; a, d < e;`
g := mustParse(t, chain)
g.TransitiveReduction()
wantEdges(t, g, "e->d d->c c->b b->a")
})
}

View File

@ -71,6 +71,10 @@ func (g *Graph) AddEdge(from, to string) {
g.edges[from][to] = true
}
func (g *Graph) DelEdge(from, to string) {
delete(g.edges[from], to)
}
func (g *Graph) HasEdge(from, to string) bool {
return g.edges[from] != nil && g.edges[from][to]
}

View File

@ -1,161 +1,111 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file records the static ranks of the locks in the runtime. If a lock
// is not given a rank, then it is assumed to be a leaf lock, which means no other
// lock can be acquired while it is held. Therefore, leaf locks do not need to be
// given an explicit rank. We list all of the architecture-independent leaf locks
// for documentation purposes, but don't list any of the architecture-dependent
// locks (which are all leaf locks). debugLock is ignored for ranking, since it is used
// when printing out lock ranking errors.
//
// lockInit(l *mutex, rank int) is used to set the rank of lock before it is used.
// If there is no clear place to initialize a lock, then the rank of a lock can be
// specified during the lock call itself via lockWithrank(l *mutex, rank int).
//
// Besides the static lock ranking (which is a total ordering of the locks), we
// also represent and enforce the actual partial order among the locks in the
// arcs[] array below. That is, if it is possible that lock B can be acquired when
// lock A is the previous acquired lock that is still held, then there should be
// an entry for A in arcs[B][]. We will currently fail not only if the total order
// (the lock ranking) is violated, but also if there is a missing entry in the
// partial order.
// Code generated by mklockrank.go; DO NOT EDIT.
package runtime
type lockRank int
// Constants representing the lock rank of the architecture-independent locks in
// the runtime. Locks with lower rank must be taken before locks with higher
// rank.
// Constants representing the ranks of all non-leaf runtime locks, in rank order.
// Locks with lower rank must be taken before locks with higher rank,
// in addition to satisfying the partial order in lockPartialOrder.
// A few ranks allow self-cycles, which are specified in lockPartialOrder.
const (
lockRankDummy lockRank = iota
lockRankUnknown lockRank = iota
// Locks held above sched
lockRankSysmon
lockRankScavenge
lockRankForcegc
lockRankSweepWaiters
lockRankAssistQueue
lockRankCpuprof
lockRankSweep
lockRankPollDesc
lockRankSched
lockRankDeadlock
lockRankItab
lockRankNotifyList
lockRankRoot
lockRankRwmutexW
lockRankDefer
lockRankScavenge
lockRankForcegc
lockRankSched
lockRankAllg
lockRankAllp
lockRankTimers // Multiple timers locked simultaneously in destroy()
lockRankItab
lockRankTimers
lockRankReflectOffs
lockRankHchan // Multiple hchans acquired in lock order in syncadjustsudogs()
lockRankHchan
lockRankTraceBuf
lockRankFin
lockRankNotifyList
lockRankTraceStrings
lockRankMspanSpecial
lockRankProfInsert
lockRankProfBlock
lockRankProfMemActive
lockRankProfMemFuture
lockRankGcBitsArenas
lockRankRoot
lockRankSpanSetSpine
lockRankMheapSpecial
lockRankProfMemFuture
lockRankTrace
lockRankTraceStackTab
lockRankNetpollInit
lockRankRwmutexW
lockRankRwmutexR
lockRankSpanSetSpine
lockRankGscan
lockRankStackpool
lockRankStackLarge
lockRankDefer
lockRankHchanLeaf
lockRankSudog
// Memory-related non-leaf locks
lockRankWbufSpans
lockRankMheap
lockRankMheapSpecial
// Memory-related leaf locks
lockRankGlobalAlloc
// Other leaf locks
// Generally, hchan must be acquired before gscan. But in one specific
// case (in syncadjustsudogs from markroot after the g has been suspended
// by suspendG), we allow gscan to be acquired, and then an hchan lock. To
// allow this case, we get this lockRankHchanLeaf rank in
// syncadjustsudogs(), rather than lockRankHchan. By using this special
// rank, we don't allow any further locks to be acquired other than more
// hchan locks.
lockRankHchanLeaf
lockRankPanic
)
// lockRankLeafRank is the rank of lock that does not have a declared rank, and hence is
// a leaf lock.
// lockRankLeafRank is the rank of lock that does not have a declared rank,
// and hence is a leaf lock.
const lockRankLeafRank lockRank = 1000
// lockNames gives the names associated with each of the above ranks
// lockNames gives the names associated with each of the above ranks.
var lockNames = []string{
lockRankDummy: "",
lockRankSysmon: "sysmon",
lockRankScavenge: "scavenge",
lockRankForcegc: "forcegc",
lockRankSweepWaiters: "sweepWaiters",
lockRankAssistQueue: "assistQueue",
lockRankCpuprof: "cpuprof",
lockRankSweep: "sweep",
lockRankPollDesc: "pollDesc",
lockRankSched: "sched",
lockRankDeadlock: "deadlock",
lockRankItab: "itab",
lockRankNotifyList: "notifyList",
lockRankRoot: "root",
lockRankRwmutexW: "rwmutexW",
lockRankDefer: "defer",
lockRankScavenge: "scavenge",
lockRankForcegc: "forcegc",
lockRankSched: "sched",
lockRankAllg: "allg",
lockRankAllp: "allp",
lockRankTimers: "timers",
lockRankItab: "itab",
lockRankReflectOffs: "reflectOffs",
lockRankHchan: "hchan",
lockRankTraceBuf: "traceBuf",
lockRankFin: "fin",
lockRankNotifyList: "notifyList",
lockRankTraceStrings: "traceStrings",
lockRankMspanSpecial: "mspanSpecial",
lockRankProfInsert: "profInsert",
lockRankProfBlock: "profBlock",
lockRankProfMemActive: "profMemActive",
lockRankProfMemFuture: "profMemFuture",
lockRankGcBitsArenas: "gcBitsArenas",
lockRankRoot: "root",
lockRankSpanSetSpine: "spanSetSpine",
lockRankMheapSpecial: "mheapSpecial",
lockRankProfMemFuture: "profMemFuture",
lockRankTrace: "trace",
lockRankTraceStackTab: "traceStackTab",
lockRankNetpollInit: "netpollInit",
lockRankRwmutexW: "rwmutexW",
lockRankRwmutexR: "rwmutexR",
lockRankSpanSetSpine: "spanSetSpine",
lockRankGscan: "gscan",
lockRankStackpool: "stackpool",
lockRankStackLarge: "stackLarge",
lockRankDefer: "defer",
lockRankHchanLeaf: "hchanLeaf",
lockRankSudog: "sudog",
lockRankWbufSpans: "wbufSpans",
lockRankMheap: "mheap",
lockRankMheapSpecial: "mheapSpecial",
lockRankGlobalAlloc: "globalAlloc.mutex",
lockRankHchanLeaf: "hchanLeaf",
lockRankGlobalAlloc: "globalAlloc",
lockRankPanic: "panic",
}
@ -166,64 +116,60 @@ func (rank lockRank) String() string {
if rank == lockRankLeafRank {
return "LEAF"
}
if rank < 0 || int(rank) >= len(lockNames) {
return "BAD RANK"
}
return lockNames[rank]
}
// lockPartialOrder is a partial order among the various lock types, listing the
// immediate ordering that has actually been observed in the runtime. Each entry
// (which corresponds to a particular lock rank) specifies the list of locks
// that can already be held immediately "above" it.
// lockPartialOrder is the transitive closure of the lock rank graph.
// An entry for rank X lists all of the ranks that can already be held
// when rank X is acquired.
//
// So, for example, the lockRankSched entry shows that all the locks preceding
// it in rank can actually be held. The allp lock shows that only the sysmon or
// sched lock can be held immediately above it when it is acquired.
// Lock ranks that allow self-cycles list themselves.
var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankDummy: {},
lockRankSysmon: {},
lockRankScavenge: {lockRankSysmon},
lockRankForcegc: {lockRankSysmon},
lockRankSweepWaiters: {},
lockRankAssistQueue: {},
lockRankCpuprof: {},
lockRankSweep: {},
lockRankPollDesc: {},
lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc},
lockRankDeadlock: {lockRankDeadlock},
lockRankAllg: {lockRankSysmon, lockRankSched},
lockRankAllp: {lockRankSysmon, lockRankSched},
lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankPollDesc, lockRankSched, lockRankAllp, lockRankTimers},
lockRankDeadlock: {},
lockRankItab: {},
lockRankReflectOffs: {lockRankItab},
lockRankHchan: {lockRankScavenge, lockRankSweep, lockRankHchan},
lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankSched, lockRankAllg, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf},
lockRankNotifyList: {},
lockRankTraceStrings: {lockRankTraceBuf},
lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings, lockRankProfMemActive},
lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
lockRankRoot: {},
lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings, lockRankRoot},
lockRankTraceStackTab: {lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankTimers, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankRoot, lockRankTrace},
lockRankNetpollInit: {lockRankTimers},
lockRankRwmutexW: {},
lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW},
lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankSpanSetSpine},
lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankSpanSetSpine, lockRankGscan},
lockRankStackLarge: {lockRankSysmon, lockRankAssistQueue, lockRankSched, lockRankItab, lockRankHchan, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankSpanSetSpine, lockRankGscan},
lockRankDefer: {},
lockRankSudog: {lockRankHchan, lockRankNotifyList},
lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankRoot, lockRankTrace, lockRankGscan, lockRankDefer, lockRankSudog},
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankSpanSetSpine, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankDefer, lockRankSudog, lockRankWbufSpans},
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
lockRankGlobalAlloc: {lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankSpanSetSpine, lockRankMheap, lockRankMheapSpecial},
lockRankHchanLeaf: {lockRankGscan, lockRankHchanLeaf},
lockRankPanic: {lockRankDeadlock}, // plus any other lock held on throw.
lockRankScavenge: {lockRankSysmon},
lockRankForcegc: {lockRankSysmon},
lockRankSched: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankScavenge, lockRankForcegc},
lockRankAllg: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankScavenge, lockRankForcegc, lockRankSched},
lockRankAllp: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankScavenge, lockRankForcegc, lockRankSched},
lockRankTimers: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllp, lockRankTimers},
lockRankReflectOffs: {lockRankItab},
lockRankHchan: {lockRankSysmon, lockRankSweep, lockRankScavenge, lockRankHchan},
lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
lockRankFin: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf},
lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
lockRankMspanSpecial: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfInsert: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfBlock: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfMemActive: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings},
lockRankGcBitsArenas: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings},
lockRankSpanSetSpine: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings},
lockRankMheapSpecial: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfMemFuture: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
lockRankTrace: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankRoot, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings},
lockRankTraceStackTab: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankRoot, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankTraceStrings, lockRankTrace},
lockRankNetpollInit: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllp, lockRankTimers},
lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW},
lockRankGscan: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankRoot, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit},
lockRankStackpool: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankRoot, lockRankRwmutexW, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankGscan},
lockRankStackLarge: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankRoot, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankGscan},
lockRankHchanLeaf: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankRoot, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankGscan, lockRankHchanLeaf},
lockRankSudog: {lockRankSysmon, lockRankSweep, lockRankNotifyList, lockRankScavenge, lockRankHchan},
lockRankWbufSpans: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankRoot, lockRankDefer, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankTraceStrings, lockRankMspanSpecial, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankGscan, lockRankSudog},
lockRankMheap: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankRoot, lockRankRwmutexW, lockRankDefer, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankTraceStrings, lockRankMspanSpecial, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankSudog, lockRankWbufSpans},
lockRankGlobalAlloc: {lockRankSysmon, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankItab, lockRankNotifyList, lockRankRoot, lockRankRwmutexW, lockRankDefer, lockRankScavenge, lockRankForcegc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankTraceStrings, lockRankMspanSpecial, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankMheapSpecial, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankSudog, lockRankWbufSpans, lockRankMheap},
lockRankPanic: {lockRankDeadlock},
}

View File

@ -24,6 +24,9 @@ type lockRankStruct struct {
pad int
}
// lockInit(l *mutex, rank int) sets the rank of lock before it is used.
// If there is no clear place to initialize a lock, then the rank of a lock can be
// specified during the lock call itself via lockWithRank(l *mutex, rank int).
func lockInit(l *mutex, rank lockRank) {
l.rank = rank
}

View File

@ -1,41 +1,29 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
. "runtime"
"bytes"
"internal/testenv"
"os"
"os/exec"
"testing"
)
// Check that the partial order in lockPartialOrder fits within the total order
// determined by the order of the lockRank constants.
func TestLockRankPartialOrder(t *testing.T) {
for r, list := range LockPartialOrder {
rank := LockRank(r)
for _, e := range list {
entry := LockRank(e)
if entry > rank {
t.Errorf("lockPartialOrder row %v entry %v is inconsistent with total lock ranking order", rank, entry)
}
}
}
}
// Verify that partial order lists are kept sorted. This is a purely cosemetic
// check to make manual reviews simpler. It does not affect correctness, unlike
// the above test.
func TestLockRankPartialOrderSortedEntries(t *testing.T) {
for r, list := range LockPartialOrder {
rank := LockRank(r)
var prev LockRank
for _, e := range list {
entry := LockRank(e)
if entry <= prev {
t.Errorf("Partial order for rank %v out of order: %v <= %v in %v", rank, entry, prev, list)
}
prev = entry
}
// Test that the generated code for the lock rank graph is up-to-date.
func TestLockRankGenerated(t *testing.T) {
testenv.MustHaveGoRun(t)
want, err := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "run", "mklockrank.go")).CombinedOutput()
if err != nil {
t.Fatal(err)
}
got, err := os.ReadFile("lockrank.go")
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(want, got) {
t.Fatalf("lockrank.go is out of date. Please run go generate.")
}
}

240
src/runtime/mklockrank.go Normal file
View File

@ -0,0 +1,240 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// mklockrank records the static rank graph of the locks in the
// runtime and generates the rank checking structures in lockrank.go.
package main
import (
"bytes"
"flag"
"fmt"
"go/format"
"internal/dag"
"io"
"log"
"os"
"strings"
)
// ranks describes the lock rank graph. See "go doc internal/dag" for
// the syntax.
//
// "a < b" means a must be acquired before b if both are held
// (or, if b is held, a cannot be acquired).
//
// "NONE < a" means no locks may be held when a is acquired.
//
// If a lock is not given a rank, then it is assumed to be a leaf
// lock, which means no other lock can be acquired while it is held.
// Therefore, leaf locks do not need to be given an explicit rank.
//
// TODO: It's often hard to correlate rank names to locks. Change
// these to be more consistent with the locks they label.
const ranks = `
NONE < sysmon, sweepWaiters, assistQueue, cpuprof, sweep, pollDesc, deadlock, itab, notifyList, root, rwmutexW, defer;
sysmon < scavenge, forcegc;
assistQueue, cpuprof, forcegc, pollDesc, scavenge, sweep, sweepWaiters < sched;
sched < allg, allp;
allp < timers;
itab < reflectOffs;
scavenge, sweep < hchan;
scavenge < traceBuf;
allg, hchan, reflectOffs, timers, traceBuf < fin;
traceBuf < traceStrings;
allg, hchan, notifyList, reflectOffs, timers, traceStrings < mspanSpecial, profInsert, profBlock, profMemActive, gcBitsArenas, spanSetSpine, mheapSpecial;
profMemActive < profMemFuture;
hchan, root, sched, traceStrings < trace;
fin, notifyList, trace < traceStackTab;
timers < netpollInit;
rwmutexW, sysmon < rwmutexR;
gcBitsArenas, netpollInit, profBlock, profInsert, profMemFuture, spanSetSpine, traceStackTab < gscan;
gscan, rwmutexR < stackpool;
gscan < stackLarge;
# Generally, hchan must be acquired before gscan. But in one specific
# case (in syncadjustsudogs from markroot after the g has been suspended
# by suspendG), we allow gscan to be acquired, and then an hchan lock. To
# allow this case, we use this hchanLeaf rank in syncadjustsudogs(),
# rather than hchan. By using this special rank, we don't allow any further
# locks to be acquired other than more hchan locks.
gscan < hchanLeaf;
hchan, notifyList < sudog;
defer, gscan, mspanSpecial, sudog < wbufSpans;
stackLarge, stackpool, wbufSpans < mheap;
mheap, mheapSpecial < globalAlloc;
# panic is handled specially. It is implicitly below all other locks.
deadlock < panic;
`
// cyclicRanks lists lock ranks that allow multiple locks of the same
// rank to be acquired simultaneously. The runtime enforces ordering
// within these ranks using a separate mechanism.
var cyclicRanks = map[string]bool{
// Multiple timers are locked simultaneously in destroy().
"timers": true,
// Multiple hchans are acquired in hchan.sortkey() order in
// select.
"hchan": true,
// Multiple hchanLeafs are acquired in hchan.sortkey() order in
// syncadjustsudogs().
"hchanLeaf": true,
}
func main() {
flagO := flag.String("o", "", "write to `file` instead of stdout")
flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
flag.Parse()
if flag.NArg() != 0 {
fmt.Fprintf(os.Stderr, "too many arguments")
os.Exit(2)
}
g, err := dag.Parse(ranks)
if err != nil {
log.Fatal(err)
}
var out []byte
if *flagDot {
var b bytes.Buffer
g.TransitiveReduction()
// Add cyclic edges for visualization.
for k := range cyclicRanks {
g.AddEdge(k, k)
}
// Reverse the graph. It's much easier to read this as
// a "<" partial order than a ">" partial order. This
// ways, locks are acquired from the top going down
// and time moves forward over the edges instead of
// backward.
g.Transpose()
generateDot(&b, g)
out = b.Bytes()
} else {
var b bytes.Buffer
generateGo(&b, g)
out, err = format.Source(b.Bytes())
if err != nil {
log.Fatal(err)
}
}
if *flagO != "" {
err = os.WriteFile(*flagO, out, 0666)
} else {
_, err = os.Stdout.Write(out)
}
if err != nil {
log.Fatal(err)
}
}
func generateGo(w io.Writer, g *dag.Graph) {
fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
package runtime
type lockRank int
`)
// Create numeric ranks.
topo := g.Topo()
for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
topo[i], topo[j] = topo[j], topo[i]
}
fmt.Fprintf(w, `
// Constants representing the ranks of all non-leaf runtime locks, in rank order.
// Locks with lower rank must be taken before locks with higher rank,
// in addition to satisfying the partial order in lockPartialOrder.
// A few ranks allow self-cycles, which are specified in lockPartialOrder.
const (
lockRankUnknown lockRank = iota
`)
for _, rank := range topo {
fmt.Fprintf(w, "\t%s\n", cname(rank))
}
fmt.Fprintf(w, `)
// lockRankLeafRank is the rank of lock that does not have a declared rank,
// and hence is a leaf lock.
const lockRankLeafRank lockRank = 1000
`)
// Create string table.
fmt.Fprintf(w, `
// lockNames gives the names associated with each of the above ranks.
var lockNames = []string{
`)
for _, rank := range topo {
fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
}
fmt.Fprintf(w, `}
func (rank lockRank) String() string {
if rank == 0 {
return "UNKNOWN"
}
if rank == lockRankLeafRank {
return "LEAF"
}
if rank < 0 || int(rank) >= len(lockNames) {
return "BAD RANK"
}
return lockNames[rank]
}
`)
// Create partial order structure.
fmt.Fprintf(w, `
// lockPartialOrder is the transitive closure of the lock rank graph.
// An entry for rank X lists all of the ranks that can already be held
// when rank X is acquired.
//
// Lock ranks that allow self-cycles list themselves.
var lockPartialOrder [][]lockRank = [][]lockRank{
`)
for _, rank := range topo {
list := []string{}
for _, before := range g.Edges(rank) {
list = append(list, cname(before))
}
if cyclicRanks[rank] {
list = append(list, cname(rank))
}
fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
}
fmt.Fprintf(w, "}\n")
}
// cname returns the Go const name for the given lock rank label.
func cname(label string) string {
return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
}
// generateDot emits a Graphviz dot representation of g to w.
func generateDot(w io.Writer, g *dag.Graph) {
fmt.Fprintf(w, "digraph g {\n")
// Define all nodes.
for _, node := range g.Nodes {
fmt.Fprintf(w, "%q;\n", node)
}
// Create edges.
for _, node := range g.Nodes {
for _, to := range g.Edges(node) {
fmt.Fprintf(w, "%q -> %q;\n", node, to)
}
}
fmt.Fprintf(w, "}\n")
}

View File

@ -12,6 +12,7 @@ import (
//go:generate go run wincallback.go
//go:generate go run mkduff.go
//go:generate go run mkfastlog2table.go
//go:generate go run mklockrank.go -o lockrank.go
var ticks ticksType