[dev.unified] all: merge master (635b124) into dev.unified

Merge List:

+ 2022-06-16 635b1244aa cmd/go: pass GOEXPERIMENT through to subtests
+ 2022-06-16 ef808ae1d4 expvar: don't crash if map value set to nil
+ 2022-06-16 32510eea74 go/parser: remove unused method checkBinaryExpr
+ 2022-06-16 74f1fa6ecb cmd/go: parallelize matchPackages work in each module
+ 2022-06-16 1d9d99b7ce cmd/link: consider alignment in carrier symbol size calculation
+ 2022-06-16 bcce8ef498 spec: adjust incorrect sentence in section on rune literals
+ 2022-06-16 ecc268aa26 test: add test that gofrontend fails
+ 2022-06-15 b6c1606889 internal/goarch, internal/goos: update generators for syslist.go
+ 2022-06-15 91baf5cecc reflect: fix reference comment to runtime/map.go
+ 2022-06-15 0e3d0c9581 syscall: clarify Pdeathsig documentation on Linux
+ 2022-06-15 74bf90c779 go/types, types2: add test case for issue for coverage
+ 2022-06-15 0cd0c12f57 doc/go1.19: use matching closing tag in unix build constraint heading
+ 2022-06-15 97bfc77f38 syscall, runtime/internal/syscall: always zero the higher bits of return value on linux/loong64
+ 2022-06-15 937fa5000a net/netip: add missing ) in ParsePrefix errors
+ 2022-06-15 c2c76c6f19 cmd/link: set alignment for carrier symbols
+ 2022-06-15 36147dd1e8 cmd/go/internal/modindex: disable indexing for modules outside GOROOT and the module cache
+ 2022-06-15 2a78e8afc0 test: add tests for string/[]byte/[]rune conversions
+ 2022-06-15 f9c0264107 net: avoid infinite recursion in Windows Resolver.lookupTXT
+ 2022-06-14 0dffda1383 spec: clarify "slice of bytes" and "slice of runes" through examples
+ 2022-06-14 c22a6c3b90 reflect: when StructOf overflows computing size/offset, panic
+ 2022-06-14 e1e66a03a6 cmd/compile,runtime,reflect: move embedded bit from offset to name
+ 2022-06-14 cb9bf93078 cmd/go: quote package directory when calling glob
+ 2022-06-14 cad477c922 cpu: fix typos in test case
+ 2022-06-13 c29be2d41c runtime: add HACKING section on nosplit functions
+ 2022-06-13 c5be77b687 doc/go1.19: minor edits
+ 2022-06-13 56bc3098f4 sync: improve linearity test robustness
+ 2022-06-13 1fe2810f9c sync: move lock linearity test and treat it like a performance test
+ 2022-06-13 6130461149 internal/testmath: add two-sample Welch's t-test for performance tests
+ 2022-06-13 24b9039149 doc/go1.19: prefer relative links to other parts of the Go website
+ 2022-06-13 fbc75dff2f cmd/cgo: remove -fsanitize=hwaddress hardware tags
+ 2022-06-13 5ee939b819 spec: clarify behavior of map size hint for make built-in
+ 2022-06-13 4703546a29 spec: add missing optional type arguments after TypeName in syntax
+ 2022-06-13 2c52465cb3 net: avoid darwin_arm64 bug in TestDialParallelSpuriousConnection
+ 2022-06-13 9228d7d7d5 doc/go1.19: add a release note for module indexing
+ 2022-06-13 7eeec1f6e4 cmd/compile: fix missing dict pass for type assertions
+ 2022-06-13 d27128b065 doc/go1.19: fix crypto tags
+ 2022-06-10 55590f3a2b net/http: doc: update RFC reference for appropriate HTTP codes
+ 2022-06-10 ff3db8d12d doc: fix typos in Go memory model
+ 2022-06-10 fb75c2da91 cmd/dist, cmd/internal/metadata: don't install metadata binary
+ 2022-06-10 386245b68e runtime: fix stack split at bad time when fuzzing
+ 2022-06-09 2cfbef4380 cmd/cgo: recognize clang 14 DWARF type names
+ 2022-06-09 c7ccabf3fe runtime/cgo: retry _beginthread on EACCES
+ 2022-06-09 91019cc13d runtime/cgo: merge bodies of cgo_sys_thread_start on windows
+ 2022-06-09 840e99ed74 api: promote next to go1.19
+ 2022-06-09 1a2ca95ad2 go/types, types2: only set instance context if packages match
+ 2022-06-08 b51d44c6dd cmd/go/testdata/script: fix skip on list_replace_absolute_windows
+ 2022-06-08 80f86f706d api/next: minor reformat
+ 2022-06-08 13f6be2833 runtime: use pidleget for faketime jump
+ 2022-06-08 1292176bc9 cmd/go: clean paths before using them form index functions
+ 2022-06-08 1858ea5d85 syscall: remove unused setgroups on linux/loong64
+ 2022-06-08 bdde41e3ba runtime: skip TestGdbBacktrace on gdb bug
+ 2022-06-08 432158b69a net: fix testHookDialTCP race
+ 2022-06-08 899f0a29c7 cmd/go: enable module index by default
+ 2022-06-08 f862280e30 cmd/go: properly call PackageModuleRoot to get modroot for index
+ 2022-06-08 d65166024f cmd/go: set Root and target fields for packages in GOPATH
+ 2022-06-08 4afb0b9e53 doc/go1.19: delete remaining TODOs
+ 2022-06-08 3426b7201d runtime: gofmt
+ 2022-06-08 f330a3a987 doc/go1.19: complete most remaining TODOs
+ 2022-06-08 2882786bf4 runtime: remove unused pipe and setNonblock on linux/loong64
+ 2022-06-08 decdd87bea doc/go1.19: mention riscv64 supported regabi
+ 2022-06-07 b72a6a7b86 os: document that Chdir affects fs.FS returned by DirFS with a relative path
+ 2022-06-07 30b929b1ef syscall: remove unused accept on linux/loong64
+ 2022-06-07 a7551fe245 net: use synthetic network in TestDialParallel
+ 2022-06-07 19d71acd97 doc/go1.19: document that the assembler requires -p
+ 2022-06-07 d151134851 doc/go1.19: document linker CL that switches DWARF compressed section format
+ 2022-06-07 3507805bcd go/types, types2: better error message for invalid use of constraint type
+ 2022-06-07 269bf7e855 go/types, types2: better error message if type is not in type set
+ 2022-06-07 d4fb93be87 go/types, types2: use | rather than ∪ when printing term lists
+ 2022-06-07 346698eea7 doc/go1.19: add release notes for net/http and net/url
+ 2022-06-07 7a82c6859f doc/go1.19: adjust runtime release notes
+ 2022-06-07 f3e051a184 runtime: document GOMEMLIMIT in environment variables section
+ 2022-06-07 ef2567c7dd doc/go1.19: document loong64 port
+ 2022-06-07 69bb7c6ef5 sync/atomic: clarify that 8-byte alignment of variables is due to escape
+ 2022-06-07 81033fbd8e doc/go1.19: some platforms are still on TSAN v2
+ 2022-06-07 0c3a0543c2 doc/go1.19: compiler section is complete, modulo TODOs
+ 2022-06-07 835a946137 doc/go1.19: minor edits
+ 2022-06-07 429a4041eb doc/go1.19: complete TODOs for go/types
+ 2022-06-07 d2630aa4b2 doc/go1.19: add various crypto release notes
+ 2022-06-07 77d9252ddf runtime: fix inline assembly trampoline for arm64
+ 2022-06-07 38607c5538 cmd/link: specify -Wl,-z params as documented
+ 2022-06-07 95b68e1e02 doc/go1.19: delete boringcrypto TODO
+ 2022-06-07 a79623b019 doc/go1.19: add more TODOs from updated relnote
+ 2022-06-06 acfff42802 doc/go1.19: add release notes for the soft memory limit and idle GC
+ 2022-06-06 a71ca3dfbd runtime, sync, sync/atomic: document happens-before guarantees
+ 2022-06-06 3651a6117e go/doc/comment: add heuristics for common badly formatted comments
+ 2022-06-06 4c08260c51 doc/go_mem: update revision date
+ 2022-06-06 7271a0a287 doc/go1.19: gc requires -p=importpath
+ 2022-06-06 c1e2ecbaf9 doc/go1.19: document Resolver.PreferGo
+ 2022-06-06 11195c60e6 cmd/go: use index to match packages in dependency modules
+ 2022-06-06 ea5d7cbc26 all: boringcrypto post-merge cleanup
+ 2022-06-06 6c7b223c2b go/doc/comment: do not turn ``` into “`
+ 2022-06-06 ce757e94e0 go/doc/comment: add doc comment
+ 2022-06-06 95547aee8c cmd/compile: cast riscv64 rewrite shifts to unsigned int
+ 2022-06-06 d43ddc1f3f strconv: fix typo in atof.go
+ 2022-06-06 2fa45a4fcd cmd/link/internal/loadpe: handle _main reference properly
+ 2022-06-06 fc97075949 go/types, types2: simplify implementation of validType (fix TODO)
+ 2022-06-06 07eca49055 go/types, types2: use type nest to detect type cycles (fix validType)
+ 2022-06-06 770146d5a8 doc/go1.19: add TODOs for changes to go/types
+ 2022-06-06 1b8ca75eaa runtime: fix breakpoint in ppc64x
+ 2022-06-06 9ce28b518d text/template/parse: fix data race on lexer initialization
+ 2022-06-06 47e34ca533 go/types, types2: ensure that named types never expand infinitely
+ 2022-06-06 02e69cfa96 go/types, types2: store Named instance information separately
+ 2022-06-06 1323b0e8f0 go/types, types2: eliminate methodList in favor of just using Named.mu
+ 2022-06-06 846f971daa go/types, types2: remove Named.once in favor of monotonic state
+ 2022-06-06 66cbf67345 cmd/buildid: reject rewriting legacy buildids
+ 2022-06-04 47f806ce81 strconv: clarify ParseFloat accepts Go syntax for float literals
+ 2022-06-04 2730c6af9f runtime: fix typo in libfuzzer_arm64.s
+ 2022-06-04 a32a592c8c database/sql/driver: fix typo in driver.go
+ 2022-06-04 0293c51bc5 regexp: avoid copying each instruction executed
+ 2022-06-04 865911424d doc: update Go memory model
+ 2022-06-04 fc66cae490 doc/go1.19: remove TODO about LimitedReader
+ 2022-06-04 f8a53df314 io: revert: add an Err field to LimitedReader
+ 2022-06-04 21f05284c7 cmd/go: index standard library packages

Change-Id: Ia7595c77a555fd2a0e7bb3b6b2cfbb745bd4947b
This commit is contained in:
Matthew Dempsky 2022-06-16 13:08:10 -07:00
commit 1f4e8afafe
209 changed files with 5455 additions and 2651 deletions

288
api/go1.19.txt Normal file
View File

@ -0,0 +1,288 @@
pkg crypto/x509, func ParseRevocationList([]uint8) (*RevocationList, error) #50674
pkg crypto/x509, method (*CertPool) Clone() *CertPool #35044
pkg crypto/x509, method (*CertPool) Equal(*CertPool) bool #46057
pkg crypto/x509, method (*RevocationList) CheckSignatureFrom(*Certificate) error #50674
pkg crypto/x509, type RevocationList struct, AuthorityKeyId []uint8 #50674
pkg crypto/x509, type RevocationList struct, Extensions []pkix.Extension #50674
pkg crypto/x509, type RevocationList struct, Issuer pkix.Name #50674
pkg crypto/x509, type RevocationList struct, Raw []uint8 #50674
pkg crypto/x509, type RevocationList struct, RawIssuer []uint8 #50674
pkg crypto/x509, type RevocationList struct, RawTBSRevocationList []uint8 #50674
pkg crypto/x509, type RevocationList struct, Signature []uint8 #50674
pkg debug/elf, const EM_LOONGARCH = 258 #46229
pkg debug/elf, const EM_LOONGARCH Machine #46229
pkg debug/elf, const R_LARCH_32 = 1 #46229
pkg debug/elf, const R_LARCH_32 R_LARCH #46229
pkg debug/elf, const R_LARCH_64 = 2 #46229
pkg debug/elf, const R_LARCH_64 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD16 = 48 #46229
pkg debug/elf, const R_LARCH_ADD16 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD24 = 49 #46229
pkg debug/elf, const R_LARCH_ADD24 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD32 = 50 #46229
pkg debug/elf, const R_LARCH_ADD32 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD64 = 51 #46229
pkg debug/elf, const R_LARCH_ADD64 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD8 = 47 #46229
pkg debug/elf, const R_LARCH_ADD8 R_LARCH #46229
pkg debug/elf, const R_LARCH_COPY = 4 #46229
pkg debug/elf, const R_LARCH_COPY R_LARCH #46229
pkg debug/elf, const R_LARCH_IRELATIVE = 12 #46229
pkg debug/elf, const R_LARCH_IRELATIVE R_LARCH #46229
pkg debug/elf, const R_LARCH_JUMP_SLOT = 5 #46229
pkg debug/elf, const R_LARCH_JUMP_SLOT R_LARCH #46229
pkg debug/elf, const R_LARCH_MARK_LA = 20 #46229
pkg debug/elf, const R_LARCH_MARK_LA R_LARCH #46229
pkg debug/elf, const R_LARCH_MARK_PCREL = 21 #46229
pkg debug/elf, const R_LARCH_MARK_PCREL R_LARCH #46229
pkg debug/elf, const R_LARCH_NONE = 0 #46229
pkg debug/elf, const R_LARCH_NONE R_LARCH #46229
pkg debug/elf, const R_LARCH_RELATIVE = 3 #46229
pkg debug/elf, const R_LARCH_RELATIVE R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_ADD = 35 #46229
pkg debug/elf, const R_LARCH_SOP_ADD R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_AND = 36 #46229
pkg debug/elf, const R_LARCH_SOP_AND R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_ASSERT = 30 #46229
pkg debug/elf, const R_LARCH_SOP_ASSERT R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_IF_ELSE = 37 #46229
pkg debug/elf, const R_LARCH_SOP_IF_ELSE R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_NOT = 31 #46229
pkg debug/elf, const R_LARCH_SOP_NOT R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 = 45 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 = 44 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 = 40 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 = 41 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 = 42 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 = 38 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 = 43 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_U = 46 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_U R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 = 39 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE = 23 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_DUP = 24 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_DUP R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL = 25 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL = 22 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL = 29 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD = 28 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT = 27 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL = 26 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_SL = 33 #46229
pkg debug/elf, const R_LARCH_SOP_SL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_SR = 34 #46229
pkg debug/elf, const R_LARCH_SOP_SR R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_SUB = 32 #46229
pkg debug/elf, const R_LARCH_SOP_SUB R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB16 = 53 #46229
pkg debug/elf, const R_LARCH_SUB16 R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB24 = 54 #46229
pkg debug/elf, const R_LARCH_SUB24 R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB32 = 55 #46229
pkg debug/elf, const R_LARCH_SUB32 R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB64 = 56 #46229
pkg debug/elf, const R_LARCH_SUB64 R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB8 = 52 #46229
pkg debug/elf, const R_LARCH_SUB8 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_DTPMOD32 = 6 #46229
pkg debug/elf, const R_LARCH_TLS_DTPMOD32 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_DTPMOD64 = 7 #46229
pkg debug/elf, const R_LARCH_TLS_DTPMOD64 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_DTPREL32 = 8 #46229
pkg debug/elf, const R_LARCH_TLS_DTPREL32 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_DTPREL64 = 9 #46229
pkg debug/elf, const R_LARCH_TLS_DTPREL64 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_TPREL32 = 10 #46229
pkg debug/elf, const R_LARCH_TLS_TPREL32 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_TPREL64 = 11 #46229
pkg debug/elf, const R_LARCH_TLS_TPREL64 R_LARCH #46229
pkg debug/elf, method (R_LARCH) GoString() string #46229
pkg debug/elf, method (R_LARCH) String() string #46229
pkg debug/elf, type R_LARCH int #46229
pkg debug/pe, const IMAGE_COMDAT_SELECT_ANY = 2 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_ANY ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_ASSOCIATIVE = 5 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_ASSOCIATIVE ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_EXACT_MATCH = 4 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_EXACT_MATCH ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_LARGEST = 6 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_LARGEST ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_NODUPLICATES = 1 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_NODUPLICATES ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_SAME_SIZE = 3 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_SAME_SIZE ideal-int #51686
pkg debug/pe, const IMAGE_SCN_CNT_CODE = 32 #51686
pkg debug/pe, const IMAGE_SCN_CNT_CODE ideal-int #51686
pkg debug/pe, const IMAGE_SCN_CNT_INITIALIZED_DATA = 64 #51686
pkg debug/pe, const IMAGE_SCN_CNT_INITIALIZED_DATA ideal-int #51686
pkg debug/pe, const IMAGE_SCN_CNT_UNINITIALIZED_DATA = 128 #51686
pkg debug/pe, const IMAGE_SCN_CNT_UNINITIALIZED_DATA ideal-int #51686
pkg debug/pe, const IMAGE_SCN_LNK_COMDAT = 4096 #51686
pkg debug/pe, const IMAGE_SCN_LNK_COMDAT ideal-int #51686
pkg debug/pe, const IMAGE_SCN_MEM_DISCARDABLE = 33554432 #51686
pkg debug/pe, const IMAGE_SCN_MEM_DISCARDABLE ideal-int #51686
pkg debug/pe, const IMAGE_SCN_MEM_EXECUTE = 536870912 #51686
pkg debug/pe, const IMAGE_SCN_MEM_EXECUTE ideal-int #51686
pkg debug/pe, const IMAGE_SCN_MEM_READ = 1073741824 #51686
pkg debug/pe, const IMAGE_SCN_MEM_READ ideal-int #51686
pkg debug/pe, const IMAGE_SCN_MEM_WRITE = 2147483648 #51686
pkg debug/pe, const IMAGE_SCN_MEM_WRITE ideal-int #51686
pkg debug/pe, method (*File) COFFSymbolReadSectionDefAux(int) (*COFFSymbolAuxFormat5, error) #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, Checksum uint32 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, NumLineNumbers uint16 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, NumRelocs uint16 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, SecNum uint16 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, Selection uint8 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, Size uint32 #51686
pkg encoding/binary, func AppendUvarint([]uint8, uint64) []uint8 #51644
pkg encoding/binary, func AppendVarint([]uint8, int64) []uint8 #51644
pkg encoding/binary, type AppendByteOrder interface { AppendUint16, AppendUint32, AppendUint64, String } #50601
pkg encoding/binary, type AppendByteOrder interface, AppendUint16([]uint8, uint16) []uint8 #50601
pkg encoding/binary, type AppendByteOrder interface, AppendUint32([]uint8, uint32) []uint8 #50601
pkg encoding/binary, type AppendByteOrder interface, AppendUint64([]uint8, uint64) []uint8 #50601
pkg encoding/binary, type AppendByteOrder interface, String() string #50601
pkg encoding/csv, method (*Reader) InputOffset() int64 #43401
pkg encoding/xml, method (*Decoder) InputPos() (int, int) #45628
pkg flag, func TextVar(encoding.TextUnmarshaler, string, encoding.TextMarshaler, string) #45754
pkg flag, method (*FlagSet) TextVar(encoding.TextUnmarshaler, string, encoding.TextMarshaler, string) #45754
pkg fmt, func Append([]uint8, ...interface{}) []uint8 #47579
pkg fmt, func Appendf([]uint8, string, ...interface{}) []uint8 #47579
pkg fmt, func Appendln([]uint8, ...interface{}) []uint8 #47579
pkg go/doc, method (*Package) HTML(string) []uint8 #51082
pkg go/doc, method (*Package) Markdown(string) []uint8 #51082
pkg go/doc, method (*Package) Parser() *comment.Parser #51082
pkg go/doc, method (*Package) Printer() *comment.Printer #51082
pkg go/doc, method (*Package) Synopsis(string) string #51082
pkg go/doc, method (*Package) Text(string) []uint8 #51082
pkg go/doc/comment, func DefaultLookupPackage(string) (string, bool) #51082
pkg go/doc/comment, method (*DocLink) DefaultURL(string) string #51082
pkg go/doc/comment, method (*Heading) DefaultID() string #51082
pkg go/doc/comment, method (*List) BlankBefore() bool #51082
pkg go/doc/comment, method (*List) BlankBetween() bool #51082
pkg go/doc/comment, method (*Parser) Parse(string) *Doc #51082
pkg go/doc/comment, method (*Printer) Comment(*Doc) []uint8 #51082
pkg go/doc/comment, method (*Printer) HTML(*Doc) []uint8 #51082
pkg go/doc/comment, method (*Printer) Markdown(*Doc) []uint8 #51082
pkg go/doc/comment, method (*Printer) Text(*Doc) []uint8 #51082
pkg go/doc/comment, type Block interface, unexported methods #51082
pkg go/doc/comment, type Code struct #51082
pkg go/doc/comment, type Code struct, Text string #51082
pkg go/doc/comment, type Doc struct #51082
pkg go/doc/comment, type Doc struct, Content []Block #51082
pkg go/doc/comment, type Doc struct, Links []*LinkDef #51082
pkg go/doc/comment, type DocLink struct #51082
pkg go/doc/comment, type DocLink struct, ImportPath string #51082
pkg go/doc/comment, type DocLink struct, Name string #51082
pkg go/doc/comment, type DocLink struct, Recv string #51082
pkg go/doc/comment, type DocLink struct, Text []Text #51082
pkg go/doc/comment, type Heading struct #51082
pkg go/doc/comment, type Heading struct, Text []Text #51082
pkg go/doc/comment, type Italic string #51082
pkg go/doc/comment, type Link struct #51082
pkg go/doc/comment, type Link struct, Auto bool #51082
pkg go/doc/comment, type Link struct, Text []Text #51082
pkg go/doc/comment, type Link struct, URL string #51082
pkg go/doc/comment, type LinkDef struct #51082
pkg go/doc/comment, type LinkDef struct, Text string #51082
pkg go/doc/comment, type LinkDef struct, URL string #51082
pkg go/doc/comment, type LinkDef struct, Used bool #51082
pkg go/doc/comment, type List struct #51082
pkg go/doc/comment, type List struct, ForceBlankBefore bool #51082
pkg go/doc/comment, type List struct, ForceBlankBetween bool #51082
pkg go/doc/comment, type List struct, Items []*ListItem #51082
pkg go/doc/comment, type ListItem struct #51082
pkg go/doc/comment, type ListItem struct, Content []Block #51082
pkg go/doc/comment, type ListItem struct, Number string #51082
pkg go/doc/comment, type Paragraph struct #51082
pkg go/doc/comment, type Paragraph struct, Text []Text #51082
pkg go/doc/comment, type Parser struct #51082
pkg go/doc/comment, type Parser struct, LookupPackage func(string) (string, bool) #51082
pkg go/doc/comment, type Parser struct, LookupSym func(string, string) bool #51082
pkg go/doc/comment, type Parser struct, Words map[string]string #51082
pkg go/doc/comment, type Plain string #51082
pkg go/doc/comment, type Printer struct #51082
pkg go/doc/comment, type Printer struct, DocLinkBaseURL string #51082
pkg go/doc/comment, type Printer struct, DocLinkURL func(*DocLink) string #51082
pkg go/doc/comment, type Printer struct, HeadingID func(*Heading) string #51082
pkg go/doc/comment, type Printer struct, HeadingLevel int #51082
pkg go/doc/comment, type Printer struct, TextCodePrefix string #51082
pkg go/doc/comment, type Printer struct, TextPrefix string #51082
pkg go/doc/comment, type Printer struct, TextWidth int #51082
pkg go/doc/comment, type Text interface, unexported methods #51082
pkg go/types, method (*Func) Origin() *Func #51682
pkg go/types, method (*Var) Origin() *Var #51682
pkg hash/maphash, func Bytes(Seed, []uint8) uint64 #42710
pkg hash/maphash, func String(Seed, string) uint64 #42710
pkg html/template, method (*Template) Funcs(template.FuncMap) *Template #46121
pkg html/template, type FuncMap = template.FuncMap #46121
pkg net/http, method (*MaxBytesError) Error() string #30715
pkg net/http, type MaxBytesError struct #30715
pkg net/http, type MaxBytesError struct, Limit int64 #30715
pkg net/url, func JoinPath(string, ...string) (string, error) #47005
pkg net/url, method (*URL) JoinPath(...string) *URL #47005
pkg net/url, type URL struct, OmitHost bool #46059
pkg os/exec, method (*Cmd) Environ() []string #50599
pkg os/exec, type Cmd struct, Err error #43724
pkg os/exec, var ErrDot error #43724
pkg regexp/syntax, const ErrNestingDepth = "expression nests too deeply" #51684
pkg regexp/syntax, const ErrNestingDepth ErrorCode #51684
pkg runtime/debug, func SetMemoryLimit(int64) int64 #48409
pkg sort, func Find(int, func(int) int) (int, bool) #50340
pkg sync/atomic, method (*Bool) CompareAndSwap(bool, bool) bool #50860
pkg sync/atomic, method (*Bool) Load() bool #50860
pkg sync/atomic, method (*Bool) Store(bool) #50860
pkg sync/atomic, method (*Bool) Swap(bool) bool #50860
pkg sync/atomic, method (*Int32) Add(int32) int32 #50860
pkg sync/atomic, method (*Int32) CompareAndSwap(int32, int32) bool #50860
pkg sync/atomic, method (*Int32) Load() int32 #50860
pkg sync/atomic, method (*Int32) Store(int32) #50860
pkg sync/atomic, method (*Int32) Swap(int32) int32 #50860
pkg sync/atomic, method (*Int64) Add(int64) int64 #50860
pkg sync/atomic, method (*Int64) CompareAndSwap(int64, int64) bool #50860
pkg sync/atomic, method (*Int64) Load() int64 #50860
pkg sync/atomic, method (*Int64) Store(int64) #50860
pkg sync/atomic, method (*Int64) Swap(int64) int64 #50860
pkg sync/atomic, method (*Pointer[$0]) CompareAndSwap(*$0, *$0) bool #50860
pkg sync/atomic, method (*Pointer[$0]) Load() *$0 #50860
pkg sync/atomic, method (*Pointer[$0]) Store(*$0) #50860
pkg sync/atomic, method (*Pointer[$0]) Swap(*$0) *$0 #50860
pkg sync/atomic, method (*Uint32) Add(uint32) uint32 #50860
pkg sync/atomic, method (*Uint32) CompareAndSwap(uint32, uint32) bool #50860
pkg sync/atomic, method (*Uint32) Load() uint32 #50860
pkg sync/atomic, method (*Uint32) Store(uint32) #50860
pkg sync/atomic, method (*Uint32) Swap(uint32) uint32 #50860
pkg sync/atomic, method (*Uint64) Add(uint64) uint64 #50860
pkg sync/atomic, method (*Uint64) CompareAndSwap(uint64, uint64) bool #50860
pkg sync/atomic, method (*Uint64) Load() uint64 #50860
pkg sync/atomic, method (*Uint64) Store(uint64) #50860
pkg sync/atomic, method (*Uint64) Swap(uint64) uint64 #50860
pkg sync/atomic, method (*Uintptr) Add(uintptr) uintptr #50860
pkg sync/atomic, method (*Uintptr) CompareAndSwap(uintptr, uintptr) bool #50860
pkg sync/atomic, method (*Uintptr) Load() uintptr #50860
pkg sync/atomic, method (*Uintptr) Store(uintptr) #50860
pkg sync/atomic, method (*Uintptr) Swap(uintptr) uintptr #50860
pkg sync/atomic, type Bool struct #50860
pkg sync/atomic, type Int32 struct #50860
pkg sync/atomic, type Int64 struct #50860
pkg sync/atomic, type Pointer[$0 interface{}] struct #50860
pkg sync/atomic, type Uint32 struct #50860
pkg sync/atomic, type Uint64 struct #50860
pkg sync/atomic, type Uintptr struct #50860
pkg time, method (Duration) Abs() Duration #51414
pkg time, method (Time) ZoneBounds() (Time, Time) #50062

View File

@ -1,3 +0,0 @@
pkg net/http, type MaxBytesError struct #30715
pkg net/http, type MaxBytesError struct, Limit int64 #30715
pkg net/http, method (*MaxBytesError) Error() string #30715

View File

@ -1 +0,0 @@
pkg crypto/x509, method (*CertPool) Clone() *CertPool #35044

View File

@ -1,2 +0,0 @@
pkg hash/maphash, func Bytes(Seed, []uint8) uint64 #42710
pkg hash/maphash, func String(Seed, string) uint64 #42710

View File

@ -1 +0,0 @@
pkg encoding/csv, method (*Reader) InputOffset() int64 #43401

View File

@ -1,2 +0,0 @@
pkg os/exec, type Cmd struct, Err error #43724
pkg os/exec, var ErrDot error #43724

View File

@ -1 +0,0 @@
pkg encoding/xml, method (*Decoder) InputPos() (int, int) #45628

View File

@ -1,2 +0,0 @@
pkg flag, func TextVar(encoding.TextUnmarshaler, string, encoding.TextMarshaler, string) #45754
pkg flag, method (*FlagSet) TextVar(encoding.TextUnmarshaler, string, encoding.TextMarshaler, string) #45754

View File

@ -1 +0,0 @@
pkg crypto/x509, method (*CertPool) Equal(*CertPool) bool #46057

View File

@ -1,2 +0,0 @@
pkg net/url, type URL struct, OmitHost bool #46059

View File

@ -1,2 +0,0 @@
pkg html/template, method (*Template) Funcs(template.FuncMap) *Template #46121
pkg html/template, type FuncMap = template.FuncMap #46121

View File

@ -1,105 +0,0 @@
pkg debug/elf, const EM_LOONGARCH = 258 #46229
pkg debug/elf, const EM_LOONGARCH Machine #46229
pkg debug/elf, const R_LARCH_32 = 1 #46229
pkg debug/elf, const R_LARCH_32 R_LARCH #46229
pkg debug/elf, const R_LARCH_64 = 2 #46229
pkg debug/elf, const R_LARCH_64 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD16 = 48 #46229
pkg debug/elf, const R_LARCH_ADD16 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD24 = 49 #46229
pkg debug/elf, const R_LARCH_ADD24 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD32 = 50 #46229
pkg debug/elf, const R_LARCH_ADD32 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD64 = 51 #46229
pkg debug/elf, const R_LARCH_ADD64 R_LARCH #46229
pkg debug/elf, const R_LARCH_ADD8 = 47 #46229
pkg debug/elf, const R_LARCH_ADD8 R_LARCH #46229
pkg debug/elf, const R_LARCH_COPY = 4 #46229
pkg debug/elf, const R_LARCH_COPY R_LARCH #46229
pkg debug/elf, const R_LARCH_IRELATIVE = 12 #46229
pkg debug/elf, const R_LARCH_IRELATIVE R_LARCH #46229
pkg debug/elf, const R_LARCH_JUMP_SLOT = 5 #46229
pkg debug/elf, const R_LARCH_JUMP_SLOT R_LARCH #46229
pkg debug/elf, const R_LARCH_MARK_LA = 20 #46229
pkg debug/elf, const R_LARCH_MARK_LA R_LARCH #46229
pkg debug/elf, const R_LARCH_MARK_PCREL = 21 #46229
pkg debug/elf, const R_LARCH_MARK_PCREL R_LARCH #46229
pkg debug/elf, const R_LARCH_NONE = 0 #46229
pkg debug/elf, const R_LARCH_NONE R_LARCH #46229
pkg debug/elf, const R_LARCH_RELATIVE = 3 #46229
pkg debug/elf, const R_LARCH_RELATIVE R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_ADD = 35 #46229
pkg debug/elf, const R_LARCH_SOP_ADD R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_AND = 36 #46229
pkg debug/elf, const R_LARCH_SOP_AND R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_ASSERT = 30 #46229
pkg debug/elf, const R_LARCH_SOP_ASSERT R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_IF_ELSE = 37 #46229
pkg debug/elf, const R_LARCH_SOP_IF_ELSE R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_NOT = 31 #46229
pkg debug/elf, const R_LARCH_SOP_NOT R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 = 45 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 = 44 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 = 40 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 = 41 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 = 42 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 = 38 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 = 43 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_U = 46 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_U R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 = 39 #46229
pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE = 23 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_DUP = 24 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_DUP R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL = 25 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL = 22 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL = 29 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD = 28 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT = 27 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL = 26 #46229
pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_SL = 33 #46229
pkg debug/elf, const R_LARCH_SOP_SL R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_SR = 34 #46229
pkg debug/elf, const R_LARCH_SOP_SR R_LARCH #46229
pkg debug/elf, const R_LARCH_SOP_SUB = 32 #46229
pkg debug/elf, const R_LARCH_SOP_SUB R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB16 = 53 #46229
pkg debug/elf, const R_LARCH_SUB16 R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB24 = 54 #46229
pkg debug/elf, const R_LARCH_SUB24 R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB32 = 55 #46229
pkg debug/elf, const R_LARCH_SUB32 R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB64 = 56 #46229
pkg debug/elf, const R_LARCH_SUB64 R_LARCH #46229
pkg debug/elf, const R_LARCH_SUB8 = 52 #46229
pkg debug/elf, const R_LARCH_SUB8 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_DTPMOD32 = 6 #46229
pkg debug/elf, const R_LARCH_TLS_DTPMOD32 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_DTPMOD64 = 7 #46229
pkg debug/elf, const R_LARCH_TLS_DTPMOD64 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_DTPREL32 = 8 #46229
pkg debug/elf, const R_LARCH_TLS_DTPREL32 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_DTPREL64 = 9 #46229
pkg debug/elf, const R_LARCH_TLS_DTPREL64 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_TPREL32 = 10 #46229
pkg debug/elf, const R_LARCH_TLS_TPREL32 R_LARCH #46229
pkg debug/elf, const R_LARCH_TLS_TPREL64 = 11 #46229
pkg debug/elf, const R_LARCH_TLS_TPREL64 R_LARCH #46229
pkg debug/elf, method (R_LARCH) GoString() string #46229
pkg debug/elf, method (R_LARCH) String() string #46229
pkg debug/elf, type R_LARCH int #46229

View File

@ -1,2 +0,0 @@
pkg net/url, func JoinPath(string, ...string) (string, error) #47005
pkg net/url, method (*URL) JoinPath(...string) *URL #47005

View File

@ -1,3 +0,0 @@
pkg fmt, func Append([]uint8, ...interface{}) []uint8 #47579
pkg fmt, func Appendf([]uint8, string, ...interface{}) []uint8 #47579
pkg fmt, func Appendln([]uint8, ...interface{}) []uint8 #47579

View File

@ -1 +0,0 @@
pkg runtime/debug, func SetMemoryLimit(int64) int64 #48409

View File

@ -1 +0,0 @@
pkg time, method (Time) ZoneBounds() (Time, Time) #50062

View File

@ -1 +0,0 @@
pkg sort, func Find(int, func(int) int) (int, bool) #50340

View File

@ -1 +0,0 @@
pkg os/exec, method (*Cmd) Environ() []string #50599

View File

@ -1,5 +0,0 @@
pkg encoding/binary, type AppendByteOrder interface { AppendUint16, AppendUint32, AppendUint64, String } #50601
pkg encoding/binary, type AppendByteOrder interface, AppendUint16([]uint8, uint16) []uint8 #50601
pkg encoding/binary, type AppendByteOrder interface, AppendUint32([]uint8, uint32) []uint8 #50601
pkg encoding/binary, type AppendByteOrder interface, AppendUint64([]uint8, uint64) []uint8 #50601
pkg encoding/binary, type AppendByteOrder interface, String() string #50601

View File

@ -1,9 +0,0 @@
pkg crypto/x509, func ParseRevocationList([]uint8) (*RevocationList, error) #50674
pkg crypto/x509, method (*RevocationList) CheckSignatureFrom(*Certificate) error #50674
pkg crypto/x509, type RevocationList struct, AuthorityKeyId []uint8 #50674
pkg crypto/x509, type RevocationList struct, Extensions []pkix.Extension #50674
pkg crypto/x509, type RevocationList struct, Issuer pkix.Name #50674
pkg crypto/x509, type RevocationList struct, Raw []uint8 #50674
pkg crypto/x509, type RevocationList struct, RawIssuer []uint8 #50674
pkg crypto/x509, type RevocationList struct, RawTBSRevocationList []uint8 #50674
pkg crypto/x509, type RevocationList struct, Signature []uint8 #50674

View File

@ -1,40 +0,0 @@
pkg sync/atomic, method (*Bool) CompareAndSwap(bool, bool) bool #50860
pkg sync/atomic, method (*Bool) Load() bool #50860
pkg sync/atomic, method (*Bool) Store(bool) #50860
pkg sync/atomic, method (*Bool) Swap(bool) bool #50860
pkg sync/atomic, method (*Int32) Add(int32) int32 #50860
pkg sync/atomic, method (*Int32) CompareAndSwap(int32, int32) bool #50860
pkg sync/atomic, method (*Int32) Load() int32 #50860
pkg sync/atomic, method (*Int32) Store(int32) #50860
pkg sync/atomic, method (*Int32) Swap(int32) int32 #50860
pkg sync/atomic, method (*Int64) Add(int64) int64 #50860
pkg sync/atomic, method (*Int64) CompareAndSwap(int64, int64) bool #50860
pkg sync/atomic, method (*Int64) Load() int64 #50860
pkg sync/atomic, method (*Int64) Store(int64) #50860
pkg sync/atomic, method (*Int64) Swap(int64) int64 #50860
pkg sync/atomic, method (*Pointer[$0]) CompareAndSwap(*$0, *$0) bool #50860
pkg sync/atomic, method (*Pointer[$0]) Load() *$0 #50860
pkg sync/atomic, method (*Pointer[$0]) Store(*$0) #50860
pkg sync/atomic, method (*Pointer[$0]) Swap(*$0) *$0 #50860
pkg sync/atomic, method (*Uint32) Add(uint32) uint32 #50860
pkg sync/atomic, method (*Uint32) CompareAndSwap(uint32, uint32) bool #50860
pkg sync/atomic, method (*Uint32) Load() uint32 #50860
pkg sync/atomic, method (*Uint32) Store(uint32) #50860
pkg sync/atomic, method (*Uint32) Swap(uint32) uint32 #50860
pkg sync/atomic, method (*Uint64) Add(uint64) uint64 #50860
pkg sync/atomic, method (*Uint64) CompareAndSwap(uint64, uint64) bool #50860
pkg sync/atomic, method (*Uint64) Load() uint64 #50860
pkg sync/atomic, method (*Uint64) Store(uint64) #50860
pkg sync/atomic, method (*Uint64) Swap(uint64) uint64 #50860
pkg sync/atomic, method (*Uintptr) Add(uintptr) uintptr #50860
pkg sync/atomic, method (*Uintptr) CompareAndSwap(uintptr, uintptr) bool #50860
pkg sync/atomic, method (*Uintptr) Load() uintptr #50860
pkg sync/atomic, method (*Uintptr) Store(uintptr) #50860
pkg sync/atomic, method (*Uintptr) Swap(uintptr) uintptr #50860
pkg sync/atomic, type Bool struct #50860
pkg sync/atomic, type Int32 struct #50860
pkg sync/atomic, type Int64 struct #50860
pkg sync/atomic, type Pointer[$0 interface{}] struct #50860
pkg sync/atomic, type Uint32 struct #50860
pkg sync/atomic, type Uint64 struct #50860
pkg sync/atomic, type Uintptr struct #50860

View File

@ -1,61 +0,0 @@
pkg go/doc, method (*Package) HTML(string) []uint8 #51082
pkg go/doc, method (*Package) Markdown(string) []uint8 #51082
pkg go/doc, method (*Package) Parser() *comment.Parser #51082
pkg go/doc, method (*Package) Printer() *comment.Printer #51082
pkg go/doc, method (*Package) Synopsis(string) string #51082
pkg go/doc, method (*Package) Text(string) []uint8 #51082
pkg go/doc/comment, func DefaultLookupPackage(string) (string, bool) #51082
pkg go/doc/comment, method (*DocLink) DefaultURL(string) string #51082
pkg go/doc/comment, method (*Heading) DefaultID() string #51082
pkg go/doc/comment, method (*List) BlankBefore() bool #51082
pkg go/doc/comment, method (*List) BlankBetween() bool #51082
pkg go/doc/comment, method (*Parser) Parse(string) *Doc #51082
pkg go/doc/comment, method (*Printer) Comment(*Doc) []uint8 #51082
pkg go/doc/comment, method (*Printer) HTML(*Doc) []uint8 #51082
pkg go/doc/comment, method (*Printer) Markdown(*Doc) []uint8 #51082
pkg go/doc/comment, method (*Printer) Text(*Doc) []uint8 #51082
pkg go/doc/comment, type Block interface, unexported methods #51082
pkg go/doc/comment, type Code struct #51082
pkg go/doc/comment, type Code struct, Text string #51082
pkg go/doc/comment, type Doc struct #51082
pkg go/doc/comment, type Doc struct, Content []Block #51082
pkg go/doc/comment, type Doc struct, Links []*LinkDef #51082
pkg go/doc/comment, type DocLink struct #51082
pkg go/doc/comment, type DocLink struct, ImportPath string #51082
pkg go/doc/comment, type DocLink struct, Name string #51082
pkg go/doc/comment, type DocLink struct, Recv string #51082
pkg go/doc/comment, type DocLink struct, Text []Text #51082
pkg go/doc/comment, type Heading struct #51082
pkg go/doc/comment, type Heading struct, Text []Text #51082
pkg go/doc/comment, type Italic string #51082
pkg go/doc/comment, type Link struct #51082
pkg go/doc/comment, type Link struct, Auto bool #51082
pkg go/doc/comment, type Link struct, Text []Text #51082
pkg go/doc/comment, type Link struct, URL string #51082
pkg go/doc/comment, type LinkDef struct #51082
pkg go/doc/comment, type LinkDef struct, Text string #51082
pkg go/doc/comment, type LinkDef struct, URL string #51082
pkg go/doc/comment, type LinkDef struct, Used bool #51082
pkg go/doc/comment, type List struct #51082
pkg go/doc/comment, type List struct, ForceBlankBefore bool #51082
pkg go/doc/comment, type List struct, ForceBlankBetween bool #51082
pkg go/doc/comment, type List struct, Items []*ListItem #51082
pkg go/doc/comment, type ListItem struct #51082
pkg go/doc/comment, type ListItem struct, Content []Block #51082
pkg go/doc/comment, type ListItem struct, Number string #51082
pkg go/doc/comment, type Paragraph struct #51082
pkg go/doc/comment, type Paragraph struct, Text []Text #51082
pkg go/doc/comment, type Parser struct #51082
pkg go/doc/comment, type Parser struct, LookupPackage func(string) (string, bool) #51082
pkg go/doc/comment, type Parser struct, LookupSym func(string, string) bool #51082
pkg go/doc/comment, type Parser struct, Words map[string]string #51082
pkg go/doc/comment, type Plain string #51082
pkg go/doc/comment, type Printer struct #51082
pkg go/doc/comment, type Printer struct, DocLinkBaseURL string #51082
pkg go/doc/comment, type Printer struct, DocLinkURL func(*DocLink) string #51082
pkg go/doc/comment, type Printer struct, HeadingID func(*Heading) string #51082
pkg go/doc/comment, type Printer struct, HeadingLevel int #51082
pkg go/doc/comment, type Printer struct, TextCodePrefix string #51082
pkg go/doc/comment, type Printer struct, TextPrefix string #51082
pkg go/doc/comment, type Printer struct, TextWidth int #51082
pkg go/doc/comment, type Text interface, unexported methods #51082

View File

@ -1 +0,0 @@
pkg io, type LimitedReader struct, Err error #51115

View File

@ -1 +0,0 @@
pkg time, method (Duration) Abs() Duration #51414

View File

@ -1,2 +0,0 @@
pkg encoding/binary, func AppendUvarint([]uint8, uint64) []uint8 #51644
pkg encoding/binary, func AppendVarint([]uint8, int64) []uint8 #51644

View File

@ -1,2 +0,0 @@
pkg go/types, method (*Func) Origin() *Func #51682
pkg go/types, method (*Var) Origin() *Var #51682

View File

@ -1,2 +0,0 @@
pkg regexp/syntax, const ErrNestingDepth = "expression nests too deeply" #51684
pkg regexp/syntax, const ErrNestingDepth ErrorCode #51684

View File

@ -1,36 +0,0 @@
pkg debug/pe, const IMAGE_COMDAT_SELECT_ANY = 2 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_ANY ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_ASSOCIATIVE = 5 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_ASSOCIATIVE ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_EXACT_MATCH = 4 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_EXACT_MATCH ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_LARGEST = 6 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_LARGEST ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_NODUPLICATES = 1 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_NODUPLICATES ideal-int #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_SAME_SIZE = 3 #51686
pkg debug/pe, const IMAGE_COMDAT_SELECT_SAME_SIZE ideal-int #51686
pkg debug/pe, const IMAGE_SCN_CNT_CODE = 32 #51686
pkg debug/pe, const IMAGE_SCN_CNT_CODE ideal-int #51686
pkg debug/pe, const IMAGE_SCN_CNT_INITIALIZED_DATA = 64 #51686
pkg debug/pe, const IMAGE_SCN_CNT_INITIALIZED_DATA ideal-int #51686
pkg debug/pe, const IMAGE_SCN_CNT_UNINITIALIZED_DATA = 128 #51686
pkg debug/pe, const IMAGE_SCN_CNT_UNINITIALIZED_DATA ideal-int #51686
pkg debug/pe, const IMAGE_SCN_LNK_COMDAT = 4096 #51686
pkg debug/pe, const IMAGE_SCN_LNK_COMDAT ideal-int #51686
pkg debug/pe, const IMAGE_SCN_MEM_DISCARDABLE = 33554432 #51686
pkg debug/pe, const IMAGE_SCN_MEM_DISCARDABLE ideal-int #51686
pkg debug/pe, const IMAGE_SCN_MEM_EXECUTE = 536870912 #51686
pkg debug/pe, const IMAGE_SCN_MEM_EXECUTE ideal-int #51686
pkg debug/pe, const IMAGE_SCN_MEM_READ = 1073741824 #51686
pkg debug/pe, const IMAGE_SCN_MEM_READ ideal-int #51686
pkg debug/pe, const IMAGE_SCN_MEM_WRITE = 2147483648 #51686
pkg debug/pe, const IMAGE_SCN_MEM_WRITE ideal-int #51686
pkg debug/pe, method (*File) COFFSymbolReadSectionDefAux(int) (*COFFSymbolAuxFormat5, error) #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, Checksum uint32 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, NumLineNumbers uint16 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, NumRelocs uint16 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, SecNum uint16 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, Selection uint8 #51686
pkg debug/pe, type COFFSymbolAuxFormat5 struct, Size uint32 #51686

View File

@ -18,22 +18,77 @@ Do not send CLs removing the interior tags from such phrases.
release notes. Go 1.19 is expected to be released in August 2022.
</strong>
</p>
<h2 id="language">Changes to the language</h2>
<p>
TODO: complete this section
<p><!-- https://go.dev/issue/52038 -->
There is only one small change to the language,
a <a href="https://github.com/golang/go/issues/52038">very small correction</a>
to the <a href="/ref/spec#Declarations_and_scope">scope of type parameters in method declarations</a>.
Existing programs are unaffected.
</p>
<h2 id="mem">Memory Model</h2>
<p><!-- https://go.dev/issue/50859 -->
The <a href="/ref/mem">Go memory model</a> has been revised to align Go with
the memory model used by C, C++, Java, JavaScript, Rust, and Swift.
Go only provides sequentially consistent atomics, not any of the more relaxed forms found in other languages.
Along with the memory model update,
Go 1.19 introduces <a href="#sync/atomic">new types in the <code>sync/atomic</code> package</a>
that make it easier to use atomic values, such as
<a href="/pkg/sync/atomic/#Int64">atomic.Int64</a>
and
<a href="/pkg/sync/atomic/#Pointer">atomic.Pointer[T]</a>.
</p>
<h2 id="ports">Ports</h2>
<p>
TODO: complete this section, or delete if not needed
<h3 id="loong64">Loongson 64-bit</h3>
<p><!-- https://go.dev/issue/46229 -->
Go 1.19 adds support for the Loongson 64-bit architecture LoongArch
on Linux (<code>GOOS=linux</code>, <code>GOARCH=loong64</code>).
</p>
<h3 id="riscv64">RISC-V</h3>
<p><!-- CL 402374 -->
The <code>riscv64</code> port now supports passing function arguments
and result using registers. Benchmarking shows typical performance
improvements of 10% or more on <code>riscv64</code>.
</p>
<h2 id="tools">Tools</h2>
<p>
TODO: complete this section, or delete if not needed
<h3 id="go-doc">Doc Comments</h3>
<p><!-- https://go.dev/issue/51082 --><!-- CL 384265, CL 397276, CL 397278, CL 397279, CL 397281, CL 397284 -->
Go 1.19 adds support for links, lists, and clearer headings in doc comments.
As part of this change, <a href="/cmd/gofmt"><code>gofmt</code></a>
now reformats doc comments to make their rendered meaning clearer.
See “<a href="/doc/comment">Go Doc Comments</a>
for syntax details and descriptions of common mistakes now highlighted by <code>gofmt</code>.
As another part of this change, the new package <a href="/pkg/go/doc/comment">go/doc/comment</a>
provides parsing and reformatting of doc comments
as well as support for rendering them to HTML, Markdown, and text.
</p>
<h3 id="go-unix">New <code>unix</code> build constraint</h3>
<p><!-- CL 389934 --><!-- https://go.dev/issue/20322 --><!-- https://go.dev/issue/51572 -->
The build constraint <code>unix</code> is now recognized
in <code>//go:build</code> lines. The constraint is satisfied
if the target operating system, also known as <code>GOOS</code>, is
a Unix or Unix-like system. For the 1.19 release it is satisfied
if <code>GOOS</code> is one of
<code>aix</code>, <code>android</code>, <code>darwin</code>,
<code>dragonfly</code>, <code>freebsd</code>, <code>hurd</code>,
<code>illumos</code>, <code>ios</code>, <code>linux</code>,
<code>netbsd</code>, <code>openbsd</code>, or <code>solaris</code>.
In future releases the <code>unix</code> constraint may match
additional newly supported operating systems.
</p>
<h3 id="go-command">Go command</h3>
<p>
TODO: complete this section.
</p>
<!-- https://go.dev/issue/51461 -->
<p>
@ -63,31 +118,61 @@ Do not send CLs removing the interior tags from such phrases.
and <code>GOGCCFLAGS</code> variables it reports.
</p>
<p><!-- CL 410821 -->
The <code>go</code> command now caches information necessary to load some modules,
which should result in a speed-up of some <code>go</code> <code>list</code> invocations.
</p>
<h4 id="go-unix">New <code>unix</code> build constraint</h4>
<h3 id="vet">Vet</h3>
<p><!-- CL 389934 -->
The build constraint <code>unix</code> is now recognized
in <code>//go:build</code> lines. The constraint is satisfied
if the target operating system, also known as <code>GOOS</code>, is
a Unix or Unix-like system. For the 1.19 release it is satisfied
if <code>GOOS</code> is one of
<code>aix</code>, <code>android</code>, <code>darwin</code>,
<code>dragonfly</code>, <code>freebsd</code>, <code>hurd</code>,
<code>illumos</code>, <code>ios</code>, <code>linux</code>,
<code>netbsd</code>, <code>openbsd</code>, or <code>solaris</code>.
In future releases the <code>unix</code> constraint may match
additional newly supported operating systems.
<p><!-- https://go.dev/issue/47528 -->
The <code>vet</code> checker “errorsas” now reports when
<a href="/pkg/errors/#As"><code>errors.As</code></a> is called
with a second argument of type <code>*error</code>,
a common mistake.
</p>
<h2 id="runtime">Runtime</h2>
<p><!-- https://go.dev/issue/48409 -->
TODO: soft memory limit
<p><!-- https://go.dev/issue/48409 --><!-- CL 397018 -->
The runtime now includes support for a soft memory limit. This memory limit
includes the Go heap and all other memory managed by the runtime, and
excludes external memory sources such as mappings of the binary itself,
memory managed in other languages, and memory held by the operating system on
behalf of the Go program. This limit may be managed via
<a href="/pkg/runtime/debug/#SetMemoryLimit"><code>runtime/debug.SetMemoryLimit</code></a>
or the equivalent
<a href="/pkg/runtime/#hdr-Environment_Variables"><code>GOMEMLIMIT</code></a>
environment variable. The limit works in conjunction with
<a href="/pkg/runtime/debug/#SetGCPercent"><code>runtime/debug.SetGCPercent</code></a>
/ <a href="/pkg/runtime/#hdr-Environment_Variables"><code>GOGC</code></a>,
and will be respected even if <code>GOGC=off</code>, allowing Go programs to
always make maximal use of their memory limit, improving resource efficiency
in some cases. See <a href="/doc/gc-guide">the GC guide</a> for
a detailed guide explaining the soft memory limit in more detail, as well as
a variety of common use-cases and scenarios. Please note that small memory
limits, on the order of tens of megabytes or less, are less likely to be
respected due to external latency factors, such as OS scheduling. See
<a href="https://go.dev/issue/52433">issue 52433</a> for more details. Larger
memory limits, on the order of hundreds of megabytes or more, are stable and
production-ready.
</p>
<p><!-- CL 353989 -->
In order to limit the effects of GC thrashing when the program's live heap
size approaches the soft memory limit, the Go runtime also attempts to limit
total GC CPU utilization to 50%, excluding idle time, choosing to use more
memory over preventing application progress. In practice, we expect this limit
to only play a role in exceptional cases, and the new
<a href="/pkg/runtime/metrics/#hdr-Supported_metrics">runtime metric</a>
<code>/gc/limiter/last-enabled:gc-cycle</code> reports when this last
occurred.
</p>
<p><!-- https://go.dev/issue/44163 -->
TODO: idle mark workers
The runtime now schedules many fewer GC worker goroutines on idle operating
system threads when the application is idle enough to force a periodic GC
cycle.
</p>
<p><!-- https://go.dev/issue/18138 --><!-- CL 345889 -->
@ -97,13 +182,18 @@ Do not send CLs removing the interior tags from such phrases.
space on below-average goroutines.
</p>
<p><!-- https://go.dev/issue/46279 --><!-- CL 393354 -->
<p><!-- https://go.dev/issue/46279 --><!-- CL 393354 --><!-- CL 392415 -->
On Unix operating systems, Go programs that import package
<a href="/pkg/os/">os</a> now automatically increase the open file limit
(<code>RLIMIT_NOFILE</code>) to the maximum allowed value. Programs that need
a lower limit (for compatibility with <code>select</code>, for example) can
set the limit back as needed, or lower the hard limit prior to starting the
Go program.
(<code>RLIMIT_NOFILE</code>) to the maximum allowed value;
that is, they change the soft limit to match the hard limit.
This corrects artificially low limits set on some systems for compatibility with very old C programs using the
<a href="https://en.wikipedia.org/wiki/Select_(Unix)"><i>select</i></a> system call.
Go programs are not helped by that limit, and instead even simple programs like <code>gofmt</code>
often ran out of file descriptors on such systems when processing many files in parallel.
One impact of this change is that Go programs that in turn execute very old C programs in child processes
may run those programs with too high a limit.
This can be corrected by setting the hard limit before invoking the Go program.
</p>
<p><!-- https://go.dev/issue/51485 --><!-- CL 390421 -->
@ -121,6 +211,11 @@ Do not send CLs removing the interior tags from such phrases.
functionality.
</p>
<p><!-- https://go.dev/issue/44853 -->
The <a href="/doc/go1.18#go-build-asan">address sanitizer support added in Go 1.18</a>
now handles function arguments and global variables more precisely.
</p>
<h2 id="compiler">Compiler</h2>
<p><!-- https://go.dev/issue/5496 --><!-- CL 357330, 395714, 403979 -->
@ -131,23 +226,34 @@ Do not send CLs removing the interior tags from such phrases.
on the order of 20% faster.
(<code>GOARCH=amd64</code> and <code>GOARCH=arm64</code> only)
</p>
<p>
TODO: <a href="https://go.dev/cl/402374">https://go.dev/cl/402374</a>: enable regabi on riscv64 by default
<p><!-- CL 391014 -->
The Go compiler now requires the <code>-p=importpath</code> flag to
build a linkable object file. This is already supplied by
the <code>go</code> command and by Bazel. Any other build systems
that invoke the Go compiler directly will need to make sure they
pass this flag as well.
</p>
<p>
TODO: <a href="https://go.dev/cl/391014">https://go.dev/cl/391014</a>: The Go compiler now requires the -p=importpath flag, which is already supplied by the go command and by Bazel. Any other build systems that invoke the Go compiler directly will need to make sure they pass this flag as well in order to use Go 1.19.: cmd/compile: require -p flag
</p>
<p>
TODO: complete this section, or delete if not needed
<h2 id="assembler">Assembler</h2>
<p><!-- CL 404298 -->
Like the compiler, the assembler now requires the
<code>-p=importpath</code> flag to build a linkable object file.
This is already supplied by the <code>go</code> command. Any other
build systems that invoke the Go assembler directly will need to
make sure they pass this flag as well.
</p>
<h2 id="linker">Linker</h2>
<p>
TODO: complete this section, or delete if not needed
<p><!-- https://go.dev/issue/50796, CL 380755 -->
On ELF platforms, the linker now emits compressed DWARF sections in
the standard gABI format (<code>SHF_COMPRESSED</code>), instead of
the legacy <code>.zdebug</code> format.
</p>
<h2 id="library">Core library</h2>
<h3 id="atomic_types">New atomic types</h3>
<p><!-- https://go.dev/issue/50860 --><!-- CL 381317 -->
The <a href="/pkg/sync/atomic/"><code>sync/atomic</code></a> package defines new atomic types
<a href="/pkg/sync/atomic/#Bool"><code>Bool</code></a>,
@ -163,17 +269,30 @@ Do not send CLs removing the interior tags from such phrases.
the need to convert to
<a href="/pkg/unsafe/#Pointer"><code>unsafe.Pointer</code></a> at call sites.
<a href="/pkg/sync/atomic/#Int64"><code>Int64</code></a> and
<a href="/pkg/sync/atomic/#Uint64"><code>Uint64</code></a> automatically
receive 64-bit alignment on ARM, 386, and 32-bit MIPS required for 64-bit
atomics on these systems.
<a href="/pkg/sync/atomic/#Uint64"><code>Uint64</code></a> are
automatically aligned to 64-bit boundaries in structs and allocated data,
even on 32-bit systems.
</p>
<p>
TODO: <a href="https://go.dev/issue/51940">https://go.dev/issue/51940</a>: all: move dev.boringcrypto into main branch behind GOEXPERIMENT
<h3 id="os-exec-path">PATH lookups</h3>
<p><!-- https://go.dev/issue/43724 -->
<!-- CL 381374 --><!-- CL 403274 -->
<a href="/pkg/os/exec/#Command"><code>Command</code></a> and
<a href="/pkg/os/exec/#LookPath"><code>LookPath</code></a> no longer
allow results from a PATH search to be found relative to the current directory.
This removes a <a href="/blog/path-security">common source of security problems</a>
but may also break existing programs that depend on using, say, <code>exec.Command("prog")</code>
to run a binary named <code>prog</code> (or, on Windows, <code>prog.exe</code>) in the current directory.
See the <a href="/pkg/os/exec/"><code>os/exec</code></a> package documentation for
information about how best to update such programs.
</p>
<p>
TODO: complete this section
<p><!-- https://go.dev/issue/43947 -->
On Windows, <code>Command</code> and <code>LookPath</code> now respect the
<a href="https://docs.microsoft.com/en-us/windows/win32/api/processenv/nf-processenv-needcurrentdirectoryforexepatha"><code>NoDefaultCurrentDirectoryInExePath</code></a>
environment variable, making it possible to disable
the default implicit search of “<code>.</code>” in PATH lookups on Windows systems.
</p>
<h3 id="minor_library_changes">Minor changes to the library</h3>
@ -181,41 +300,45 @@ Do not send CLs removing the interior tags from such phrases.
As always, there are various minor changes and updates to the library,
made with the Go 1 <a href="/doc/go1compat">promise of compatibility</a>
in mind.
</p>
<p>
TODO: complete this section
There are also various performance improvements, not enumerated here.
</p>
<dl id="archive/zip"><dt><a href="/pkg/archive/zip/">archive/zip</a></dt>
<dd>
<p><!-- CL 387976 -->
TODO: <a href="https://go.dev/cl/387976">https://go.dev/cl/387976</a>: permit zip files to have prefixes
<a href="/pkg/archive/zip/#Reader"><code>Reader</code></a>
now ignores non-ZIP data at the start of a ZIP file, matching most other implementations.
This is necessary to read some Java JAR files, among other uses.
</p>
</dd>
</dl><!-- archive/zip -->
<dl id="crypto/rand"><dt><a href="/pkg/crypto/rand/">crypto/rand</a></dt>
<dd>
<p><!-- CL 370894 -->
TODO: <a href="https://go.dev/cl/370894">https://go.dev/cl/370894</a>: batch and buffer calls to getrandom/getentropy
<p><!-- CL 370894 --><!-- CL 390038 -->
<a href="/pkg/crypto/rand/#Read"><code>Read</code></a> no longer buffers
random data obtained from the operating system between calls.
</p>
<p><!-- CL 375215 -->
TODO: <a href="https://go.dev/cl/375215">https://go.dev/cl/375215</a>: use fast key erasure RNG on plan9 instead of ANSI X9.31
</p>
<p><!-- CL 390038 -->
TODO: <a href="https://go.dev/cl/390038">https://go.dev/cl/390038</a>: remove all buffering
On Plan 9, <code>Read</code> has been reimplemented, replacing the ANSI
X9.31 algorithm with fast key erasure.
</p>
</dd>
</dl><!-- crypto/rand -->
<dl id="crypto/tls"><dt><a href="/pkg/crypto/tls/">crypto/tls</a></dt>
<dd>
<p><!-- CL 400974 -->
<p><!-- CL 400974 --><!-- https://go.dev/issue/45428 -->
The <code>tls10default</code> <code>GODEBUG</code> option has been
removed. It is still possible to enable TLS 1.0 client-side by setting
<code>Config.MinVersion</code>.
<a href="/pkg/crypto/tls#Config.MinVersion"><code>Config.MinVersion</code></a>.
</p>
<p><!-- CL 384894 -->
The TLS server and client now reject duplicate extensions in TLS
handshakes, as required by RFC 5246, Section 7.4.1.4 and RFC 8446, Section
4.2.
</p>
</dd>
</dl><!-- crypto/tls -->
@ -223,39 +346,154 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="crypto/x509"><dt><a href="/pkg/crypto/x509/">crypto/x509</a></dt>
<dd>
<p><!-- CL 285872 -->
TODO: <a href="https://go.dev/cl/285872">https://go.dev/cl/285872</a>: disable signing with MD5WithRSA
<a href="/pkg/crypto/x509/#CreateCertificate"><code>CreateCertificate</code></a>
no longer supports creating certificates with <code>SignatureAlgorithm</code>
set to <code>MD5WithRSA</code>.
</p>
<p><!-- CL 400494 -->
<code>CreateCertificate</code> no longer accepts negative serial numbers.
</p>
<p><!-- CL 383215 -->
<a href="/pkg/crypto/x509/#ParseCertificate"><code>ParseCertificate</code></a>
and <a href="/pkg/crypto/x509/#ParseCertificateRequest"><code>ParseCertificateRequest</code></a>
now reject certificates and CSRs which contain duplicate extensions.
</p>
<p><!-- https://go.dev/issue/46057 --><!-- https://go.dev/issue/35044 --><!-- CL 398237 --><!-- CL 400175 --><!-- CL 388915 -->
The new <a href="/pkg/crypto/x509/#CertPool.Clone"><code>CertPool.Clone</code></a>
and <a href="/pkg/crypto/x509/#CertPool.Equal"><code>CertPool.Equal</code></a>
methods allow cloning a <code>CertPool</code> and checking the equality of two
<code>CertPool</code>s respectively.
</p>
<p><!-- https://go.dev/issue/50674 --><!-- CL 390834 -->
The new function <a href="/pkg/crypto/x509/#ParseRevocationList"><code>ParseRevocationList</code></a>
provides a faster, safer to use CRL parser which returns a
<a href="/pkg/crypto/x509/#RevocationList"><code>RevocationList</code></a>.
To support this addition, <code>RevocationList</code> adds new fields
<code>RawIssuer</code>, <code>Signature</code>,
<code>AuthorityKeyId</code>, and <code>Extensions</code>.
The new method <a href="/pkg/crypto/x509/#RevocationList.CheckSignatureFrom"><code>RevocationList.CheckSignatureFrom</code></a>
checks that the signature on a CRL is a valid signature from a
<a href="/pkg/crypto/x509/#Certificate"><code>Certificate</code></a>.
With the new CRL functionality, the existing functions
<a href="/pkg/crypto/x509/#ParseCRL"><code>ParseCRL</code></a> and
<a href="/pkg/crypto/x509/#ParseDERCRL"><code>ParseDERCRL</code></a> are deprecated.
Additionally the method <a href="/pkg/crypto/x509#Certificate.CheckCRLSignature"><code>Certificate.CheckCRLSignature</code></a>
is deprecated.
</p>
<p><!-- CL 389555 -->
When building paths, <a href="/pkg/crypto/x509/#Certificate.Verify"><code>Certificate.Verify</code></a>
now considers certificates to be equal when the subjects, public keys, and SANs
are all equal. Before, it required byte-for-byte equality.
</p>
</dd>
</dl><!-- crypto/x509 -->
<dl id="crypto/x509/pkix"><dt><a href="/pkg/crypto/x509/pkix">crypto/x509/pkix</a></dt>
<dd>
<p><!-- CL 390834 -->
The types <a href="/pkg/crypto/x509/pkix#CertificateList"><code>CertificateList</code></a> and
<a href="/pkg/crypto/x509/pkix#TBSCertificateList"><code>TBSCertificateList</code></a>
have been deprecated. The new <a href="#crypto/x509"><code>crypto/x509</code> CRL functionality</a>
should be used instead.
</p>
</dd>
</dl><!-- crypto/x509/pkix -->
<dl id="debug"><dt><a href="/pkg/debug/">debug</a></dt>
<dd>
<p><!-- CL 396735 -->
The new <code>EM_LONGARCH</code> and <code>R_LARCH_*</code> constants
support the loong64 port.
</p>
</dd>
</dl><!-- debug -->
<dl id="debug/pe"><dt><a href="/pkg/debug/pe/">debug/pe</a></dt>
<dd>
<p><!-- https://go.dev/issue/51868 --><!-- CL 394534 -->
The new <a href="/pkg/debug/pe/#File.COFFSymbolReadSectionDefAux"><code>File.COFFSymbolReadSectionDefAux</code></a>
method, which returns a <a href="/pkg/debug/pe/#COFFSymbolAuxFormat5"><code>COFFSymbolAuxFormat5</code></a>,
provides access to COMDAT information in PE file sections.
These are supported by new <code>IMAGE_COMDAT_*</code> and <code>IMAGE_SCN_*</code> constants.
</p>
</dd>
</dl><!-- debug/pe -->
<dl id="encoding/binary"><dt><a href="/pkg/encoding/binary/">encoding/binary</a></dt>
<dd>
<p><!-- CL 386017 -->
TODO: <a href="https://go.dev/cl/386017">https://go.dev/cl/386017</a>: add AppendByteOrder
<p><!-- https://go.dev/issue/50601 --><!-- CL 386017 --><!-- CL 389636 -->
The new interface
<a href="/pkg/encoding/binary/#AppendByteOrder"><code>AppendByteOrder</code></a>
provides efficient methods for appending a <code>uint16</code>, <code>uint32</code>, or <code>uint64</code>
to a byte slice.
<a href="/pkg/encoding/binary/#BigEndian"><code>BigEndian</code></a> and
<a href="/pkg/encoding/binary/#LittleEndian"><code>LittleEndian</code></a> now implement this interface.
</p>
<p><!-- https://go.dev/issue/51644 --><!-- CL 400176 -->
Similarly, the new functions
<a href="/pkg/encoding/binary/#AppendUvarint"><code>AppendUvarint</code></a> and
<a href="/pkg/encoding/binary/#AppendVarint"><code>AppendVarint</code></a>
are efficient appending versions of
<a href="/pkg/encoding/binary/#PutUvarint"><code>PutUvarint</code></a> and
<a href="/pkg/encoding/binary/#PutVarint"><code>PutVarint</code></a>.
</p>
</dd>
</dl><!-- encoding/binary -->
<dl id="encoding/csv"><dt><a href="/pkg/encoding/csv/">encoding/csv</a></dt>
<dd>
<p><!-- CL 405675 -->
TODO: <a href="https://go.dev/cl/405675">https://go.dev/cl/405675</a>: add Reader.InputOffset method
<p><!-- https://go.dev/issue/43401 --><!-- CL 405675 -->
The new method
<a href="/pkg/encoding/csv/#Reader.InputOffset"><code>Reader.InputOffset</code></a>
reports the reader's current input position as a byte offset,
analogous to <code>encoding/json</code>'s
<a href="/pkg/encoding/json/#Decoder.InputOffset"><code>Decoder.InputOffset</code></a>.
</p>
</dd>
</dl><!-- encoding/csv -->
<dl id="encoding/xml"><dt><a href="/pkg/encoding/xml/">encoding/xml</a></dt>
<dd>
<p><!-- https://go.dev/issue/45628 --><!-- CL 311270 -->
The new method
<a href="/pkg/encoding/xml/#Decoder.InputPos"><code>Decoder.InputPos</code></a>
reports the reader's current input position as a line and column,
analogous to <code>encoding/csv</code>'s
<a href="/pkg/encoding/csv/#Decoder.FieldPos"><code>Decoder.FieldPos</code></a>.
</p>
</dd>
</dl><!-- encoding/xml -->
<dl id="flag"><dt><a href="/pkg/flag/">flag</a></dt>
<dd>
<p><!-- CL 313329 -->
TODO: <a href="https://go.dev/cl/313329">https://go.dev/cl/313329</a>: add TextVar function
<p><!-- https://go.dev/issue/45754 --><!-- CL 313329 -->
The new function
<a href="/pkg/flag/#TextVar"><code>TextVar</code></a>
defines a flag with a value implementing
<a href="/pkg/encoding/#TextUnmarshaler"><code>encoding.TextUnmarshaler</code></a>,
allowing command-line flag variables to have types such as
<a href="/pkg/math/big/#Int"><code>big.Int</code></a>,
<a href="/pkg/net/netip/#Addr"><code>netip.Addr</code></a>, and
<a href="/pkg/time/#Time"><code>time.Time</code></a>.
</p>
</dd>
</dl><!-- flag -->
<dl id="fmt"><dt><a href="/pkg/fmt/">fmt</a></dt>
<dd>
<p><!-- CL 406177 -->
TODO: <a href="https://go.dev/cl/406177">https://go.dev/cl/406177</a>: add Append, Appendln, Appendf
<p><!-- https://go.dev/issue/47579 --><!-- CL 406177 -->
The new functions
<a href="/pkg/fmt/#Append"><code>Append</code></a>,
<a href="/pkg/fmt/#Appendf"><code>Appendf</code></a>, and
<a href="/pkg/fmt/#Appendln"><code>Appendln</code></a>
append formatted data to byte slices.
</p>
</dd>
</dl><!-- fmt -->
@ -263,31 +501,88 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="go/parser"><dt><a href="/pkg/go/parser/">go/parser</a></dt>
<dd>
<p><!-- CL 403696 -->
TODO: <a href="https://go.dev/cl/403696">https://go.dev/cl/403696</a>: parser to accept ~x as unary expression
The parser now recognizes <code>~x</code> as a unary expression with operator
<a href="/pkg/go/token#TILDE">token.TILDE</a>,
allowing better error recovery when a type constraint such as <code>~int</code> is used in an incorrect context.
</p>
</dd>
</dl><!-- go/parser -->
<dl id="go/types"><dt><a href="/pkg/go/types/">go/types</a></dt>
<dd>
<p><!-- https://go.dev/issue/51682 --><!-- CL 395535 -->
The new methods <a href="/pkg/go/types/#Func.Origin"><code>Func.Origin</code></a>
and <a href="/pkg/go/types/#Var.Origin"><code>Var.Origin</code></a> return the
corresponding <a href="/pkg/go/types/#Object"><code>Object</code></a> of the
generic type for synthetic <a href="/pkg/go/types/#Func"><code>Func</code></a>
and <a href="/pkg/go/types/#Var"><code>Var</code></a> objects created during type
instantiation.
</p>
<p><!-- https://go.dev/issue/52728 --><!-- CL 404885 -->
It is no longer possible to produce an infinite number of distinct-but-identical
<a href="/pkg/go/types/#Named"><code>Named</code></a> type instantiations via
recursive calls to
<a href="/pkg/go/types/#Named.Underlying"><code>Named.Underlying</code></a> or
<a href="/pkg/go/types/#Named.Method"><code>Named.Method</code></a>.
</p>
</dd>
</dl><!-- go/types -->
<dl id="hash/maphash"><dt><a href="/pkg/hash/maphash/">hash/maphash</a></dt>
<dd>
<p><!-- https://go.dev/issue/42710 --><!-- CL 392494 -->
The new functions
<a href="/pkg/hash/maphash/#Bytes"><code>Bytes</code></a>
and
<a href="/pkg/hash/maphash/#String"><code>String</code></a>
provide an efficient way hash a single byte slice or string.
They are equivalent to using the more general
<a href="/pkg/hash/maphash/#Hash"><code>Hash</code></a>
with a single write, but they avoid setup overhead for small inputs.
</p>
</dd>
</dl><!-- hash/maphash -->
<dl id="html/template"><dt><a href="/pkg/html/template/">html/template</a></dt>
<dd>
<p><!-- https://go.dev/issue/46121 --><!-- CL 389156 -->
The type <a href="/pkg/html/template/#FuncMap"><code>FuncMap</code></a>
is now an alias for
<code>text/template</code>'s <a href="/pkg/text/template/#FuncMap"><code>FuncMap</code></a>
instead of its own named type.
This allows writing code that operates on a <code>FuncMap</code> from either setting.
</p>
</dd>
</dl><!-- html/template -->
<dl id="image/draw"><dt><a href="/pkg/image/draw/">image/draw</a></dt>
<dd>
<p><!-- CL 396795 -->
<code>Draw</code> with the <code>Src</code> operator preserves
<a href="/pkg/image/draw/#Draw"><code>Draw</code></a> with the
<a href="/pkg/image/draw/#Src"><code>Src</code></a> operator preserves
non-premultiplied-alpha colors when destination and source images are
both <code>*image.NRGBA</code> (or both <code>*image.NRGBA64</code>).
both <a href="/pkg/image/#NRGBA"><code>image.NRGBA</code></a>
or both <a href="/pkg/image/#NRGBA64"><code>image.NRGBA64</code></a>.
This reverts a behavior change accidentally introduced by a Go 1.18
library optimization, to match the behavior in Go 1.17 and earlier.
library optimization; the code now matches the behavior in Go 1.17 and earlier.
</p>
</dd>
</dl><!-- image/draw -->
<dl id="io"><dt><a href="/pkg/io/">io</a></dt>
<dd>
<p><!-- CL 396215 -->
TODO: <a href="https://go.dev/cl/396215">https://go.dev/cl/396215</a>: add an Err field to LimitedReader
<p><!-- https://go.dev/issue/51566 --><!-- CL 400236 -->
<a href="/pkg/io/#NopCloser"><code>NopCloser</code></a>'s result now implements
<a href="/pkg/io/#WriterTo"><code>WriterTo</code></a>
whenever its input does.
</p>
<p><!-- CL 400236 -->
TODO: <a href="https://go.dev/cl/400236">https://go.dev/cl/400236</a>: NopCloser forward WriterTo implementations if the reader supports it
<p><!-- https://go.dev/issue/50842 -->
<a href="/pkg/io/#MultiReader"><code>MultiReader</code></a>'s result now implements
<a href="/pkg/io/#WriterTo"><code>WriterTo</code></a> unconditionally.
If any underlying reader does not implement <code>WriterTo</code>,
it is simulated appropriately.
</p>
</dd>
</dl><!-- io -->
@ -303,7 +598,7 @@ Do not send CLs removing the interior tags from such phrases.
type <code>text/javascript; charset=utf-8</code>.
Applications that expect <code>text/plain</code> on Windows must
now explicitly call
<a href="/pkg/mime#AddExtensionType"><code>AddExtensionType</code></a>.
<a href="/pkg/mime/#AddExtensionType"><code>AddExtensionType</code></a>.
</p>
</dd>
</dl>
@ -322,7 +617,7 @@ Do not send CLs removing the interior tags from such phrases.
issue tracker</a>.
</p>
<p><!-- CL 396877 -->
<p><!-- https://go.dev/issue/51428 --><!-- CL 396877 -->
When a net package function or method returns an "I/O timeout"
error, the error will now satisfy <code>errors.Is(err,
context.DeadlineExceeded)</code>. When a net package function
@ -333,10 +628,23 @@ Do not send CLs removing the interior tags from such phrases.
package function or method to return an error, while preserving
backward compatibility for error messages.
</p>
</dd>
<dd>
<p><!-- CL 400654 -->
TODO: <a href="https://go.dev/cl/400654">https://go.dev/cl/400654</a>: permit use of Resolver.PreferGo, netgo on Windows and Plan 9
<p><!-- https://go.dev/issue/33097 --><!-- CL 400654 -->
<a href="/pkg/net/#Resolver.PreferGo"><code>Resolver.PreferGo</code></a>
is now implemented on Windows and Plan 9. It previously only worked on Unix
platforms. Combined with
<a href="/pkg/net/#Dialer.Resolver"><code>Dialer.Resolver</code></a> and
<a href="/pkg/net/#Resolver.Dial"><code>Resolver.Dial</code></a>, it's now
possible to write portable programs and be in control of all DNS name lookups
when dialing.
</p>
<p>
The <code>net</code> package now has initial support for the <code>netgo</code>
build tag on Windows. When used, the package uses the Go DNS client (as used
by <code>Resolver.PreferGo</code>) instead of asking Windows for
DNS results. The upstream DNS server it discovers from Windows
may not yet be correct with complex system network configurations, however.
</p>
</dd>
</dl><!-- net -->
@ -344,7 +652,22 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="net/http"><dt><a href="/pkg/net/http/">net/http</a></dt>
<dd>
<p><!-- CL 269997 -->
TODO: <a href="https://go.dev/cl/269997">https://go.dev/cl/269997</a>: allow sending 1xx responses
<a href="/pkg/net/http/#ResponseWriter"><code>ResponseWriter.WriteHeader</code></a>
now supports sending user-defined 1xx informational headers.
</p>
<p><!-- CL 361397 -->
The <code>io.ReadCloser</code> returned by
<a href="/pkg/net/http/#MaxBytesReader"><code>MaxBytesReader</code></a>
will now return the defined error type
<a href="/pkg/net/http/#MaxBytesError"><code>MaxBytesError</code></a>
when its read limit is exceeded.
</p>
<p><!-- CL 375354 -->
The HTTP client will handle a 3xx response without a
<code>Location</code> header by returning it to the caller,
rather than treating it as an error.
</p>
</dd>
</dl><!-- net/http -->
@ -352,50 +675,76 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="net/url"><dt><a href="/pkg/net/url/">net/url</a></dt>
<dd>
<p><!-- CL 374654 -->
TODO: <a href="https://go.dev/cl/374654">https://go.dev/cl/374654</a>: add JoinPath, URL.JoinPath
The new
<a href="/pkg/net/url/#JoinPath"><code>JoinPath</code></a>
function and
<a href="/pkg/net/url/#URL.JoinPath"><code>URL.JoinPath</code></a>
method create a new <code>URL</code> by joining a list of path
elements.
</p>
<p><!-- https://go.dev/issue/46059 -->
The <code>URL</code> type now distinguishes between URLs with no
authority and URLs with an empty authority. For example,
<code>http:///path</code> has an empty authority (host),
while <code>http:/path</code> has none.
</p>
<p>
The new <a href="/pkg/net/url/#URL"><code>URL</code></a> field
<code>OmitHost</code> is set to <code>true</code> when a
<code>URL</code> has an empty authority.
</p>
</dd>
</dl><!-- net/url -->
<dl id="os"><dt><a href="/pkg/os/">os</a></dt>
<dd>
<p><!-- CL 392415 -->
TODO: <a href="https://go.dev/cl/392415">https://go.dev/cl/392415</a>: raise open file rlimit at startup
</p>
</dd>
</dl><!-- os -->
<dl id="os/exec"><dt><a href="/pkg/os/exec/">os/exec</a></dt>
<dd><!-- https://go.dev/issue/50599 -->
<p>
An <code>exec.Cmd</code> with a non-empty <code>Dir</code> and a
nil <code>Env</code> now implicitly sets the <code>PWD</code> environment
<dd>
<p><!-- https://go.dev/issue/50599 --><!-- CL 401340 -->
A <a href="/pkg/os/exec/#Cmd"><code>Cmd</code></a> with a non-empty <code>Dir</code> field
and nil <code>Env</code> now implicitly sets the <code>PWD</code> environment
variable for the subprocess to match <code>Dir</code>.
</p>
<p>
The new method <code>(*exec.Cmd).Environ</code> reports the
The new method <a href="/pkg/os/exec/#Cmd.Environ"><code>Cmd.Environ</code></a> reports the
environment that would be used to run the command, including the
aforementioned <code>PWD</code> variable.
implicitly set <code>PWD</code> variable.
</p>
</dd>
</dl> <!-- os/exec -->
<dl id="reflect"><dt><a href="/pkg/reflect/">reflect</a></dt>
<dd>
<p><!-- CL 357331 -->
The method <a href="/pkg/reflect/#Value.Bytes"><code>Value.Bytes</code></a> now accepts addressable arrays in addition to slices.
<p><!-- https://go.dev/issue/47066 --><!-- CL 357331 -->
The method <a href="/pkg/reflect/#Value.Bytes"><code>Value.Bytes</code></a>
now accepts addressable arrays in addition to slices.
</p>
<p><!-- CL 400954 -->
The methods <a href="/pkg/reflect/#Value.Len"><code>Value.Len</code></a> and <a href="/pkg/reflect/#Value.Cap"><code>Value.Cap</code></a> now successfully operate on a pointer to an array and return the length of that array, to match what the <a href="https://go.dev/ref/spec#Length_and_capacity">builtin <code>len</code> and <code>cap</code> functions do</a>.
The methods <a href="/pkg/reflect/#Value.Len"><code>Value.Len</code></a>
and <a href="/pkg/reflect/#Value.Cap"><code>Value.Cap</code></a>
now successfully operate on a pointer to an array and return the length of that array,
to match what the <a href="/ref/spec#Length_and_capacity">builtin
<code>len</code> and <code>cap</code> functions do</a>.
</p>
</dd>
</dl><!-- reflect -->
<dl id="regexp/syntax"><dt><a href="/pkg/regexp/syntax/">regexp/syntax</a></dt>
<dd>
<p><!-- https://go.dev/issue/51684 --><!-- CL 401076 -->
Go 1.18 release candidate 1, Go 1.17.8, and Go 1.16.15 included a security fix
to the regular expression parser, making it reject very deeply nested expressions.
Because Go patch releases do not introduce new API,
the parser returned <a href="/pkg/regexp/syntax/#ErrInternalError"><code>syntax.ErrInternalError</code></a> in this case.
Go 1.19 adds a more specific error, <a href="/pkg/regexp/syntax/#ErrNestingDepth"><code>syntax.ErrNestingDepth</code></a>,
which the parser now returns instead.
</p>
</dd>
</dl><!-- regexp -->
<dl id="runtime"><dt><a href="/pkg/runtime/">runtime</a></dt>
<dd>
<p><!-- https://go.dev/issue/51461 -->
The <code>GOROOT</code> function now returns the empty string
The <a href="/pkg/runtime/#GOROOT"><code>GOROOT</code></a> function now returns the empty string
(instead of <code>"go"</code>) when the binary was built with
the <code>-trimpath</code> flag set and the <code>GOROOT</code>
variable is not set in the process environment.
@ -406,20 +755,27 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="runtime/metrics"><dt><a href="/pkg/runtime/metrics/">runtime/metrics</a></dt>
<dd>
<p><!-- https://go.dev/issue/47216 --><!-- CL 404305 -->
The new <code>/sched/gomaxprocs:threads</code> metric reports the current
<code>runtime.GOMAXPROCS</code> value.
The new <code>/sched/gomaxprocs:threads</code>
<a href="/pkg/runtime/metrics/#hdr-Supported_metrics">metric</a> reports
the current
<a href="/pkg/runtime/#GOMAXPROCS"><code>runtime.GOMAXPROCS</code></a>
value.
</p>
<p><!-- https://go.dev/issue/47216 --><!-- CL 404306 -->
The new <code>/cgo/go-to-c-calls:calls</code> metric reports the total
number of calls made from Go to C. This metric is identical to the <a
href="/pkg/runtime/#NumCgoCall"><code>runtime.NumCgoCall</code></a>
The new <code>/cgo/go-to-c-calls:calls</code>
<a href="/pkg/runtime/metrics/#hdr-Supported_metrics">metric</a>
reports the total number of calls made from Go to C. This metric is
identical to the
<a href="/pkg/runtime/#NumCgoCall"><code>runtime.NumCgoCall</code></a>
function.
</p>
<p><!-- https://go.dev/issue/48409 --><!-- CL 403614 -->
The new <code>/gc/limiter/last-enabled:gc-cycle</code> metric reports the
last GC cycle when the GC CPU limiter was enabled.
The new <code>/gc/limiter/last-enabled:gc-cycle</code>
<a href="/pkg/runtime/metrics/#hdr-Supported_metrics">metric</a>
reports the last GC cycle when the GC CPU limiter was enabled. See the
<a href="#runtime">runtime notes</a> for details about the GC CPU limiter.
</p>
</dd>
</dl><!-- runtime/metrics -->
@ -445,22 +801,22 @@ Do not send CLs removing the interior tags from such phrases.
<dd>
<p><!-- https://go.dev/issue/49761 --><!-- CL 333529 -->
The race detector has been upgraded to use thread sanitizer
version v3.
<ul>
<li>
Faster (typically 1.5 to 2 times faster)
</li>
<li>
Uses less memory (typically 1/2 as much)
</li>
<li>
Supports unlimited numbers of goroutines
</li>
</ul>
version v3 on all supported platforms
except <code>windows/amd64</code>
and <code>openbsd/amd64</code>, which remain on v2.
Compared to v2, it is now typically 1.5x to 2x faster, uses half
as much memory, and it supports an unlimited number of
goroutines.
</p>
<p><!-- CL 336549 -->
The race detector is now supported on S390.
The race detector is now supported on <code>GOARCH=s390x</code>.
</p>
<p><!-- https://go.dev/issue/52090 -->
Race detector support for <code>openbsd/amd64</code> has been
removed from thread sanitizer upstream, so it is unlikely to
ever be updated from v2.
</p>
</dd>
</dl><!-- runtime/race -->
@ -468,9 +824,10 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="runtime/trace"><dt><a href="/pkg/runtime/trace/">runtime/trace</a></dt>
<dd>
<p><!-- CL 400795 -->
When used together with the
<a href="/pkg/runtime/pprof#StartCPUProfile">CPU profiler</a>, the
execution trace includes CPU profile samples.
When tracing and the
<a href="/pkg/runtime/pprof#StartCPUProfile">CPU profiler</a> are
enabled simultaneously, the execution trace includes CPU profile
samples as instantaneous events.
</p>
</dd>
</dl><!-- runtime/trace -->
@ -482,6 +839,14 @@ Do not send CLs removing the interior tags from such phrases.
<a href="https://arxiv.org/pdf/2106.05123.pdf">pattern-defeating quicksort</a>, which
is faster for several common scenarios.
</p>
<p><!-- https://go.dev/issue/50340 --><!-- CL 396514 -->
The new function
<a href="/pkg/sort/#Find">Find</a>
is like
<a href="/pkg/sort/#Search">Search</a>
but often easier to use: it returns an additional boolean reporting whether an equal value was found.
</p>
</dd>
</dd>
</dl><!-- sort -->
@ -489,8 +854,9 @@ Do not send CLs removing the interior tags from such phrases.
<dd>
<p><!-- CL 397255 -->
<a href="/pkg/strconv/#Quote"><code>Quote</code></a>
and related functions now quote the rune 007F as <code>\x7f</code>,
not <code>\u007f</code>.
and related functions now quote the rune U+007F as <code>\x7f</code>,
not <code>\u007f</code>,
for consistency with other ASCII values.
</p>
</dd>
</dl><!-- strconv -->
@ -515,8 +881,39 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="time"><dt><a href="/pkg/time/">time</a></dt>
<dd>
<p><!-- CL 393515 -->
TODO: <a href="https://go.dev/cl/393515">https://go.dev/cl/393515</a>: add Duration.Abs
<p><!-- https://go.dev/issue/51414 --><!-- CL 393515 -->
The new method
<a href="/pkg/time/#Duration.Abs"><code>Duration.Abs</code></a>
provides a convenient and safe way to take the absolute value of a duration,
converting 2⁶³ to 2⁶³1.
(This boundary case can happen as the result of subtracting a recent time from the zero time.)
</p>
<p><!-- https://go.dev/issue/50062 --><!-- CL 405374 -->
The new method
<a href="/pkg/time/#Time.ZoneBounds"><code>Time.ZoneBounds</code></a>
returns the start and end times of the time zone in effect at a given time.
It can be used in a loop to enumerate all the known time zone transitions at a given location.
</p>
</dd>
</dl><!-- time -->
<!-- Silence these false positives from x/build/cmd/relnote: -->
<!-- CL 382460 -->
<!-- CL 384154 -->
<!-- CL 384554 -->
<!-- CL 392134 -->
<!-- CL 392414 -->
<!-- CL 396215 -->
<!-- CL 403058 -->
<!-- CL 410133 -->
<!-- https://go.dev/issue/27837 -->
<!-- https://go.dev/issue/38340 -->
<!-- https://go.dev/issue/42516 -->
<!-- https://go.dev/issue/45713 -->
<!-- https://go.dev/issue/46654 -->
<!-- https://go.dev/issue/48257 -->
<!-- https://go.dev/issue/50447 -->
<!-- https://go.dev/issue/50720 -->
<!-- https://go.dev/issue/50792 -->
<!-- https://go.dev/issue/51115 -->
<!-- https://go.dev/issue/51447 -->

View File

@ -1,6 +1,6 @@
<!--{
"Title": "The Go Memory Model",
"Subtitle": "Version of May 31, 2014",
"Subtitle": "Version of June 6, 2022",
"Path": "/ref/mem"
}-->
@ -8,12 +8,9 @@
p.rule {
font-style: italic;
}
span.event {
font-style: italic;
}
</style>
<h2>Introduction</h2>
<h2 id="introduction">Introduction</h2>
<p>
The Go memory model specifies the conditions under which
@ -22,7 +19,7 @@ observe values produced by writes to the same variable in a different goroutine.
</p>
<h2>Advice</h2>
<h3 id="advice">Advice</h3>
<p>
Programs that modify data being simultaneously accessed by multiple goroutines
@ -44,90 +41,237 @@ you are being too clever.
Don't be clever.
</p>
<h2>Happens Before</h2>
<h3 id="overview">Informal Overview</h3>
<p>
Within a single goroutine, reads and writes must behave
as if they executed in the order specified by the program.
That is, compilers and processors may reorder the reads and writes
executed within a single goroutine only when the reordering
does not change the behavior within that goroutine
as defined by the language specification.
Because of this reordering, the execution order observed
by one goroutine may differ from the order perceived
by another. For example, if one goroutine
executes <code>a = 1; b = 2;</code>, another might observe
the updated value of <code>b</code> before the updated value of <code>a</code>.
Go approaches its memory model in much the same way as the rest of the language,
aiming to keep the semantics simple, understandable, and useful.
This section gives a general overview of the approach and should suffice for most programmers.
The memory model is specified more formally in the next section.
</p>
<p>
To specify the requirements of reads and writes, we define
<i>happens before</i>, a partial order on the execution
of memory operations in a Go program. If event <span class="event">e<sub>1</sub></span> happens
before event <span class="event">e<sub>2</sub></span>, then we say that <span class="event">e<sub>2</sub></span> happens after <span class="event">e<sub>1</sub></span>.
Also, if <span class="event">e<sub>1</sub></span> does not happen before <span class="event">e<sub>2</sub></span> and does not happen
after <span class="event">e<sub>2</sub></span>, then we say that <span class="event">e<sub>1</sub></span> and <span class="event">e<sub>2</sub></span> happen concurrently.
</p>
<p class="rule">
Within a single goroutine, the happens-before order is the
order expressed by the program.
A <em>data race</em> is defined as
a write to a memory location happening concurrently with another read or write to that same location,
unless all the accesses involved are atomic data accesses as provided by the <code>sync/atomic</code> package.
As noted already, programmers are strongly encouraged to use appropriate synchronization
to avoid data races.
In the absence of data races, Go programs behave as if all the goroutines
were multiplexed onto a single processor.
This property is sometimes referred to as DRF-SC: data-race-free programs
execute in a sequentially consistent manner.
</p>
<p>
A read <span class="event">r</span> of a variable <code>v</code> is <i>allowed</i> to observe a write <span class="event">w</span> to <code>v</code>
if both of the following hold:
While programmers should write Go programs without data races,
there are limitations to what a Go implementation can do in response to a data race.
An implementation may always react to a data race by reporting the race and terminating the program.
Otherwise, each read of a single-word-sized or sub-word-sized memory location
must observe a value actually written to that location (perhaps by a concurrent executing goroutine)
and not yet overwritten.
These implementation constraints make Go more like Java or JavaScript,
in that most races have a limited number of outcomes,
and less like C and C++, where the meaning of any program with a race
is entirely undefined, and the compiler may do anything at all.
Go's approach aims to make errant programs more reliable and easier to debug,
while still insisting that races are errors and that tools can diagnose and report them.
</p>
<h2 id="model">Memory Model</h2>
<p>
The following formal definition of Go's memory model closely follows
the approach presented by Hans-J. Boehm and Sarita V. Adve in
<a href="https://www.hpl.hp.com/techreports/2008/HPL-2008-56.pdf">Foundations of the C++ Concurrency Memory Model</a>”,
published in PLDI 2008.
The definition of data-race-free programs and the guarantee of sequential consistency
for race-free programs are equivalent to the ones in that work.
</p>
<p>
The memory model describes the requirements on program executions,
which are made up of goroutine executions,
which in turn are made up of memory operations.
</p>
<p>
A <i>memory operation</i> is modeled by four details:
</p>
<ul>
<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
or a <i>synchronizing operation</i> such as an atomic data access,
a mutex operation, or a channel operation,
<li>its location in the program,
<li>the memory location or variable being accessed, and
<li>the values read or written by the operation.
</ul>
<p>
Some memory operations are <i>read-like</i>, including read, atomic read, mutex lock, and channel receive.
Other memory operations are <i>write-like</i>, including write, atomic write, mutex unlock, channel send, and channel close.
Some, such as atomic compare-and-swap, are both read-like and write-like.
</p>
<p>
A <i>goroutine execution</i> is modeled as a set of memory operations executed by a single goroutine.
</p>
<p>
<b>Requirement 1</b>:
The memory operations in each goroutine must correspond to a correct sequential execution of that goroutine,
given the values read from and written to memory.
That execution must be consistent with the <i>sequenced before</i> relation,
defined as the partial order requirements set out by the <a href="/ref/spec">Go language specification</a>
for Go's control flow constructs as well as the <a href="/ref/spec#Order_of_evaluation">order of evaluation for expressions</a>.
</p>
<p>
A Go <i>program execution</i> is modeled as a set of goroutine executions,
together with a mapping <i>W</i> that specifies the write-like operation that each read-like operation reads from.
(Multiple executions of the same program can have different program executions.)
</p>
<p>
<b>Requirement 2</b>:
For a given program execution, the mapping <i>W</i>, when limited to synchronizing operations,
must be explainable by some implicit total order of the synchronizing operations
that is consistent with sequencing and the values read and written by those operations.
</p>
<p>
The <i>synchronized before</i> relation is a partial order on synchronizing memory operations,
derived from <i>W</i>.
If a synchronizing read-like memory operation <i>r</i>
observes a synchronizing write-like memory operation <i>w</i>
(that is, if <i>W</i>(<i>r</i>) = <i>w</i>),
then <i>w</i> is synchronized before <i>r</i>.
Informally, the synchronized before relation is a subset of the implied total order
mentioned in the previous paragraph,
limited to the information that <i>W</i> directly observes.
</p>
<p>
The <i>happens before</i> relation is defined as the transitive closure of the
union of the sequenced before and synchronized before relations.
</p>
<p>
<b>Requirement 3</b>:
For an ordinary (non-synchronizing) data read <i>r</i> on a memory location <i>x</i>,
<i>W</i>(<i>r</i>) must be a write <i>w</i> that is <i>visible</i> to <i>r</i>,
where visible means that both of the following hold:
<ol>
<li><span class="event">r</span> does not happen before <span class="event">w</span>.</li>
<li>There is no other write <span class="event">w'</span> to <code>v</code> that happens
after <span class="event">w</span> but before <span class="event">r</span>.</li>
<li><i>w</i> happens before <i>r</i>.
<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.
</ol>
<p>
To guarantee that a read <span class="event">r</span> of a variable <code>v</code> observes a
particular write <span class="event">w</span> to <code>v</code>, ensure that <span class="event">w</span> is the only
write <span class="event">r</span> is allowed to observe.
That is, <span class="event">r</span> is <i>guaranteed</i> to observe <span class="event">w</span> if both of the following hold:
</p>
<ol>
<li><span class="event">w</span> happens before <span class="event">r</span>.</li>
<li>Any other write to the shared variable <code>v</code>
either happens before <span class="event">w</span> or after <span class="event">r</span>.</li>
</ol>
<p>
This pair of conditions is stronger than the first pair;
it requires that there are no other writes happening
concurrently with <span class="event">w</span> or <span class="event">r</span>.
A <i>read-write data race</i> on memory location <i>x</i>
consists of a read-like memory operation <i>r</i> on <i>x</i>
and a write-like memory operation <i>w</i> on <i>x</i>,
at least one of which is non-synchronizing,
which are unordered by happens before
(that is, neither <i>r</i> happens before <i>w</i>
nor <i>w</i> happens before <i>r</i>).
</p>
<p>
Within a single goroutine,
there is no concurrency, so the two definitions are equivalent:
a read <span class="event">r</span> observes the value written by the most recent write <span class="event">w</span> to <code>v</code>.
When multiple goroutines access a shared variable <code>v</code>,
they must use synchronization events to establish
happens-before conditions that ensure reads observe the
desired writes.
A <i>write-write data race</i> on memory location <i>x</i>
consists of two write-like memory operations <i>w</i> and <i>w'</i> on <i>x</i>,
at least one of which is non-synchronizing,
which are unordered by happens before.
</p>
<p>
The initialization of variable <code>v</code> with the zero value
for <code>v</code>'s type behaves as a write in the memory model.
Note that if there are no read-write or write-write data races on memory location <i>x</i>,
then any read <i>r</i> on <i>x</i> has only one possible <i>W</i>(<i>r</i>):
the single <i>w</i> that immediately precedes it in the happens before order.
</p>
<p>
Reads and writes of values larger than a single machine word
behave as multiple machine-word-sized operations in an
unspecified order.
More generally, it can be shown that any Go program that is data-race-free,
meaning it has no program executions with read-write or write-write data races,
can only have outcomes explained by some sequentially consistent interleaving
of the goroutine executions.
(The proof is the same as Section 7 of Boehm and Adve's paper cited above.)
This property is called DRF-SC.
</p>
<h2>Synchronization</h2>
<p>
The intent of the formal definition is to match
the DRF-SC guarantee provided to race-free programs
by other languages, including C, C++, Java, JavaScript, Rust, and Swift.
</p>
<h3>Initialization</h3>
<p>
Certain Go language operations such as goroutine creation and memory allocation
act as synchronization operations.
The effect of these operations on the synchronized-before partial order
is documented in the “Synchronization” section below.
Individual packages are responsible for providing similar documentation
for their own operations.
</p>
<h2 id="restrictions">Implementation Restrictions for Programs Containing Data Races</h2>
<p>
The preceding section gave a formal definition of data-race-free program execution.
This section informally describes the semantics that implementations must provide
for programs that do contain races.
</p>
<p>
First, any implementation can, upon detecting a data race,
report the race and halt execution of the program.
Implementations using ThreadSanitizer
(accessed with “<code>go</code> <code>build</code> <code>-race</code>”)
do exactly this.
</p>
<p>
Otherwise, a read <i>r</i> of a memory location <i>x</i>
that is not larger than a machine word must observe
some write <i>w</i> such that <i>r</i> does not happen before <i>w</i>
and there is no write <i>w'</i> such that <i>w</i> happens before <i>w'</i>
and <i>w'</i> happens before <i>r</i>.
That is, each read must observe a value written by a preceding or concurrent write.
</p>
<p>
Additionally, observation of acausal and “out of thin air” writes is disallowed.
</p>
<p>
Reads of memory locations larger than a single machine word
are encouraged but not required to meet the same semantics
as word-sized memory locations,
observing a single allowed write <i>w</i>.
For performance reasons,
implementations may instead treat larger operations
as a set of individual machine-word-sized operations
in an unspecified order.
This means that races on multiword data structures
can lead to inconsistent values not corresponding to a single write.
When the values depend on the consistency
of internal (pointer, length) or (pointer, type) pairs,
as can be the case for interface values, maps,
slices, and strings in most Go implementations,
such races can in turn lead to arbitrary memory corruption.
</p>
<p>
Examples of incorrect synchronization are given in the
“Incorrect synchronization” section below.
</p>
<p>
Examples of the limitations on implementations are given in the
“Incorrect compilation” section below.
</p>
<h2 id="synchronization">Synchronization</h2>
<h3 id="init">Initialization</h3>
<p>
Program initialization runs in a single goroutine,
@ -141,15 +285,15 @@ If a package <code>p</code> imports package <code>q</code>, the completion of
</p>
<p class="rule">
The start of the function <code>main.main</code> happens after
all <code>init</code> functions have finished.
The completion of all <code>init</code> functions is synchronized before
the start of the function <code>main.main</code>.
</p>
<h3>Goroutine creation</h3>
<h3 id="go">Goroutine creation</h3>
<p class="rule">
The <code>go</code> statement that starts a new goroutine
happens before the goroutine's execution begins.
is synchronized before the start of the goroutine's execution.
</p>
<p>
@ -174,11 +318,12 @@ calling <code>hello</code> will print <code>"hello, world"</code>
at some point in the future (perhaps after <code>hello</code> has returned).
</p>
<h3>Goroutine destruction</h3>
<h3 id="goexit">Goroutine destruction</h3>
<p>
The exit of a goroutine is not guaranteed to happen before
any event in the program. For example, in this program:
The exit of a goroutine is not guaranteed to be synchronized before
any event in the program.
For example, in this program:
</p>
<pre>
@ -203,7 +348,7 @@ use a synchronization mechanism such as a lock or channel
communication to establish a relative ordering.
</p>
<h3>Channel communication</h3>
<h3 id="chan">Channel communication</h3>
<p>
Channel communication is the main method of synchronization
@ -213,8 +358,8 @@ usually in a different goroutine.
</p>
<p class="rule">
A send on a channel happens before the corresponding
receive from that channel completes.
A send on a channel is synchronized before the completion of the
corresponding receive from that channel.
</p>
<p>
@ -239,13 +384,13 @@ func main() {
<p>
is guaranteed to print <code>"hello, world"</code>. The write to <code>a</code>
happens before the send on <code>c</code>, which happens before
the corresponding receive on <code>c</code> completes, which happens before
is sequenced before the send on <code>c</code>, which is synchronized before
the corresponding receive on <code>c</code> completes, which is sequenced before
the <code>print</code>.
</p>
<p class="rule">
The closing of a channel happens before a receive that returns a zero value
The closing of a channel is synchronized before a receive that returns a zero value
because the channel is closed.
</p>
@ -256,8 +401,8 @@ yields a program with the same guaranteed behavior.
</p>
<p class="rule">
A receive from an unbuffered channel happens before
the send on that channel completes.
A receive from an unbuffered channel is synchronized before the completion of
the corresponding send on that channel.
</p>
<p>
@ -283,8 +428,8 @@ func main() {
<p>
is also guaranteed to print <code>"hello, world"</code>. The write to <code>a</code>
happens before the receive on <code>c</code>, which happens before
the corresponding send on <code>c</code> completes, which happens
is sequenced before the receive on <code>c</code>, which is synchronized before
the corresponding send on <code>c</code> completes, which is sequenced
before the <code>print</code>.
</p>
@ -296,7 +441,7 @@ crash, or do something else.)
</p>
<p class="rule">
The <i>k</i>th receive on a channel with capacity <i>C</i> happens before the <i>k</i>+<i>C</i>th send from that channel completes.
The <i>k</i>th receive on a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send from that channel completes.
</p>
<p>
@ -330,7 +475,7 @@ func main() {
}
</pre>
<h3>Locks</h3>
<h3 id="locks">Locks</h3>
<p>
The <code>sync</code> package implements two lock data types,
@ -339,7 +484,7 @@ The <code>sync</code> package implements two lock data types,
<p class="rule">
For any <code>sync.Mutex</code> or <code>sync.RWMutex</code> variable <code>l</code> and <i>n</i> &lt; <i>m</i>,
call <i>n</i> of <code>l.Unlock()</code> happens before call <i>m</i> of <code>l.Lock()</code> returns.
call <i>n</i> of <code>l.Unlock()</code> is synchronized before call <i>m</i> of <code>l.Lock()</code> returns.
</p>
<p>
@ -365,19 +510,29 @@ func main() {
<p>
is guaranteed to print <code>"hello, world"</code>.
The first call to <code>l.Unlock()</code> (in <code>f</code>) happens
The first call to <code>l.Unlock()</code> (in <code>f</code>) is synchronized
before the second call to <code>l.Lock()</code> (in <code>main</code>) returns,
which happens before the <code>print</code>.
which is sequenced before the <code>print</code>.
</p>
<p class="rule">
For any call to <code>l.RLock</code> on a <code>sync.RWMutex</code> variable <code>l</code>,
there is an <i>n</i> such that the <code>l.RLock</code> happens (returns) after call <i>n</i> to
<code>l.Unlock</code> and the matching <code>l.RUnlock</code> happens
before call <i>n</i>+1 to <code>l.Lock</code>.
there is an <i>n</i> such that the <i>n</i>th call to <code>l.Unlock</code>
is synchronized before the return from <code>l.RLock</code>,
and the matching call to <code>l.RUnlock</code> is synchronized before the return from call <i>n</i>+1 to <code>l.Lock</code>.
</p>
<h3>Once</h3>
<p class="rule">
A successful call to <code>l.TryLock</code> (or <code>l.TryRLock</code>)
is equivalent to a call to <code>l.Lock</code> (or <code>l.RLock</code>).
An unsuccessful call has no synchronizing effect at all.
As far as the memory model is concerned,
<code>l.TryLock</code> (or <code>l.TryRLock</code>)
may be considered to be able to return false
even when the mutex <i>l</i> is unlocked.
</p>
<h3 id="once">Once</h3>
<p>
The <code>sync</code> package provides a safe mechanism for
@ -389,7 +544,8 @@ until <code>f()</code> has returned.
</p>
<p class="rule">
A single call of <code>f()</code> from <code>once.Do(f)</code> happens (returns) before any call of <code>once.Do(f)</code> returns.
The completion of a single call of <code>f()</code> from <code>once.Do(f)</code>
is synchronized before the return of any call of <code>once.Do(f)</code>.
</p>
<p>
@ -424,13 +580,60 @@ The result will be that <code>"hello, world"</code> will be printed
twice.
</p>
<h2>Incorrect synchronization</h2>
<h3 id="atomic">Atomic Values</h3>
<p>
Note that a read <span class="event">r</span> may observe the value written by a write <span class="event">w</span>
that happens concurrently with <span class="event">r</span>.
Even if this occurs, it does not imply that reads happening after <span class="event">r</span>
will observe writes that happened before <span class="event">w</span>.
The APIs in the <a href="/pkg/sync/atomic/"><code>sync/atomic</code></a>
package are collectively “atomic operations”
that can be used to synchronize the execution of different goroutines.
If the effect of an atomic operation <i>A</i> is observed by atomic operation <i>B</i>,
then <i>A</i> is synchronized before <i>B</i>.
All the atomic operations executed in a program behave as though executed
in some sequentially consistent order.
</p>
<p>
The preceding definition has the same semantics as C++s sequentially consistent atomics
and Javas <code>volatile</code> variables.
</p>
<h3 id="finalizer">Finalizers</h3>
<p>
The <a href="/pkg/runtime/"><code>runtime</code></a> package provides
a <code>SetFinalizer</code> function that adds a finalizer to be called when
a particular object is no longer reachable by the program.
A call to <code>SetFinalizer(x, f)</code> is synchronized before the finalization call <code>f(x)</code>.
</p>
<h3 id="more">Additional Mechanisms</h3>
<p>
The <code>sync</code> package provides additional synchronization abstractions,
including <a href="/pkg/sync/#Cond">condition variables</a>,
<a href="/pkg/sync/#Map">lock-free maps</a>,
<a href="/pkg/sync/#Pool">allocation pools</a>,
and
<a href="/pkg/sync/#WaitGroup">wait groups</a>.
The documentation for each of these specifies the guarantees it
makes concerning synchronization.
</p>
<p>
Other packages that provide synchronization abstractions
should document the guarantees they make too.
</p>
<h2 id="badsync">Incorrect synchronization</h2>
<p>
Programs with races are incorrect and
can exhibit non-sequentially consistent executions.
In particular, note that a read <i>r</i> may observe the value written by any write <i>w</i>
that executes concurrently with <i>r</i>.
Even if this occurs, it does not imply that reads happening after <i>r</i>
will observe writes that happened before <i>w</i>.
</p>
<p>
@ -566,3 +769,197 @@ value for <code>g.msg</code>.
In all these examples, the solution is the same:
use explicit synchronization.
</p>
<h2 id="badcompiler">Incorrect compilation</h2>
<p>
The Go memory model restricts compiler optimizations as much as it does Go programs.
Some compiler optimizations that would be valid in single-threaded programs are not valid in all Go programs.
In particular, a compiler must not introduce writes that do not exist in the original program,
it must not allow a single read to observe multiple values,
and it must not allow a single write to write multiple values.
</p>
<p>
All the following examples assume that `*p` and `*q` refer to
memory locations accessible to multiple goroutines.
</p>
<p>
Not introducing data races into race-free programs means not moving
writes out of conditional statements in which they appear.
For example, a compiler must not invert the conditional in this program:
</p>
<pre>
*p = 1
if cond {
*p = 2
}
</pre>
<p>
That is, the compiler must not rewrite the program into this one:
</p>
<pre>
*p = 2
if !cond {
*p = 1
}
</pre>
<p>
If <code>cond</code> is false and another goroutine is reading <code>*p</code>,
then in the original program, the other goroutine can only observe any prior value of <code>*p</code> and <code>1</code>.
In the rewritten program, the other goroutine can observe <code>2</code>, which was previously impossible.
</p>
<p>
Not introducing data races also means not assuming that loops terminate.
For example, a compiler must in general not move the accesses to <code>*p</code> or <code>*q</code>
ahead of the loop in this program:
</p>
<pre>
n := 0
for e := list; e != nil; e = e.next {
n++
}
i := *p
*q = 1
</pre>
<p>
If <code>list</code> pointed to a cyclic list,
then the original program would never access <code>*p</code> or <code>*q</code>,
but the rewritten program would.
(Moving `*p` ahead would be safe if the compiler can prove `*p` will not panic;
moving `*q` ahead would also require the compiler proving that no other
goroutine can access `*q`.)
</p>
<p>
Not introducing data races also means not assuming that called functions
always return or are free of synchronization operations.
For example, a compiler must not move the accesses to <code>*p</code> or <code>*q</code>
ahead of the function call in this program
(at least not without direct knowledge of the precise behavior of <code>f</code>):
</p>
<pre>
f()
i := *p
*q = 1
</pre>
<p>
If the call never returned, then once again the original program
would never access <code>*p</code> or <code>*q</code>, but the rewritten program would.
And if the call contained synchronizing operations, then the original program
could establish happens before edges preceding the accesses
to <code>*p</code> and <code>*q</code>, but the rewritten program would not.
</p>
<p>
Not allowing a single read to observe multiple values means
not reloading local variables from shared memory.
For example, a compiler must not discard <code>i</code> and reload it
a second time from <code>*p</code> in this program:
</p>
<pre>
i := *p
if i &lt; 0 || i &gt;= len(funcs) {
panic("invalid function index")
}
... complex code ...
// compiler must NOT reload i = *p here
funcs[i]()
</pre>
<p>
If the complex code needs many registers, a compiler for single-threaded programs
could discard <code>i</code> without saving a copy and then reload
<code>i = *p</code> just before
<code>funcs[i]()</code>.
A Go compiler must not, because the value of <code>*p</code> may have changed.
(Instead, the compiler could spill <code>i</code> to the stack.)
</p>
<p>
Not allowing a single write to write multiple values also means not using
the memory where a local variable will be written as temporary storage before the write.
For example, a compiler must not use <code>*p</code> as temporary storage in this program:
</p>
<pre>
*p = i + *p/2
</pre>
<p>
That is, it must not rewrite the program into this one:
</p>
<pre>
*p /= 2
*p += i
</pre>
<p>
If <code>i</code> and <code>*p</code> start equal to 2,
the original code does <code>*p = 3</code>,
so a racing thread can read only 2 or 3 from <code>*p</code>.
The rewritten code does <code>*p = 1</code> and then <code>*p = 3</code>,
allowing a racing thread to read 1 as well.
</p>
<p>
Note that all these optimizations are permitted in C/C++ compilers:
a Go compiler sharing a back end with a C/C++ compiler must take care
to disable optimizations that are invalid for Go.
</p>
<p>
Note that the prohibition on introducing data races
does not apply if the compiler can prove that the races
do not affect correct execution on the target platform.
For example, on essentially all CPUs, it is valid to rewrite
</p>
<pre>
n := 0
for i := 0; i < m; i++ {
n += *shared
}
</pre>
into:
<pre>
n := 0
local := *shared
for i := 0; i < m; i++ {
n += local
}
</pre>
<p>
provided it can be proved that <code>*shared</code> will not fault on access,
because the potential added read will not affect any existing concurrent reads or writes.
On the other hand, the rewrite would not be valid in a source-to-source translator.
</p>
<h2 id="conclusion">Conclusion</h2>
<p>
Go programmers writing data-race-free programs can rely on
sequentially consistent execution of those programs,
just as in essentially all other modern programming languages.
</p>
<p>
When it comes to programs with races,
both programmers and compilers should remember the advice:
don't be clever.
</p>

View File

@ -1,6 +1,6 @@
<!--{
"Title": "The Go Programming Language Specification",
"Subtitle": "Version of May 12, 2022",
"Subtitle": "Version of June 14, 2022",
"Path": "/ref/spec"
}-->
@ -502,8 +502,9 @@ After a backslash, certain single-character escapes represent special values:
</pre>
<p>
All other sequences starting with a backslash are illegal inside rune literals.
An unrecognized character following a backslash in a rune literal is illegal.
</p>
<pre class="ebnf">
rune_lit = "'" ( unicode_value | byte_value ) "'" .
unicode_value = unicode_char | little_u_value | big_u_value | escaped_char .
@ -530,6 +531,7 @@ escaped_char = `\` ( "a" | "b" | "f" | "n" | "r" | "t" | "v" | `\` | "'" | `
'\U00101234'
'\'' // rune literal containing single quote character
'aa' // illegal: too many characters
'\k' // illegal: k is not recognized after a backslash
'\xa' // illegal: too few hexadecimal digits
'\0' // illegal: too few octal digits
'\400' // illegal: octal value over 255
@ -1025,7 +1027,7 @@ be <a href="#Uniqueness_of_identifiers">unique</a>.
<pre class="ebnf">
StructType = "struct" "{" { FieldDecl ";" } "}" .
FieldDecl = (IdentifierList Type | EmbeddedField) [ Tag ] .
EmbeddedField = [ "*" ] TypeName .
EmbeddedField = [ "*" ] TypeName [ TypeArgs ] .
Tag = string_lit .
</pre>
@ -3029,7 +3031,7 @@ Each element may optionally be preceded by a corresponding key.
<pre class="ebnf">
CompositeLit = LiteralType LiteralValue .
LiteralType = StructType | ArrayType | "[" "..." "]" ElementType |
SliceType | MapType | TypeName .
SliceType | MapType | TypeName [ TypeArgs ] .
LiteralValue = "{" [ ElementList [ "," ] ] "}" .
ElementList = KeyedElement { "," KeyedElement } .
KeyedElement = [ Key ":" ] Element .
@ -5245,7 +5247,7 @@ float32(0.49999999) // 0.5 of type float32
float64(-1e-1000) // 0.0 of type float64
string('x') // "x" of type string
string(0x266c) // "♬" of type string
MyString("foo" + "bar") // "foobar" of type MyString
myString("foo" + "bar") // "foobar" of type myString
string([]byte{'a'}) // not a constant: []byte{'a'} is not a constant
(*int)(nil) // not a constant: nil is not a constant, *int is not a boolean, numeric, or string type
int(1.2) // illegal: 1.2 cannot be represented as an int
@ -5428,8 +5430,9 @@ the range of valid Unicode code points are converted to <code>"\uFFFD"</code>.
string('a') // "a"
string(-1) // "\ufffd" == "\xef\xbf\xbd"
string(0xf8) // "\u00f8" == "ø" == "\xc3\xb8"
type MyString string
MyString(0x65e5) // "\u65e5" == "日" == "\xe6\x97\xa5"
type myString string
myString(0x65e5) // "\u65e5" == "日" == "\xe6\x97\xa5"
</pre>
</li>
@ -5442,8 +5445,12 @@ string([]byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}) // "hellø"
string([]byte{}) // ""
string([]byte(nil)) // ""
type MyBytes []byte
string(MyBytes{'h', 'e', 'l', 'l', '\xc3', '\xb8'}) // "hellø"
type bytes []byte
string(bytes{'h', 'e', 'l', 'l', '\xc3', '\xb8'}) // "hellø"
type myByte byte
string([]myByte{'w', 'o', 'r', 'l', 'd', '!'}) // "world!"
myString([]myByte{'\xf0', '\x9f', '\x8c', '\x8d'}) // "🌍"
</pre>
</li>
@ -5457,8 +5464,12 @@ string([]rune{0x767d, 0x9d6c, 0x7fd4}) // "\u767d\u9d6c\u7fd4" == "白鵬翔"
string([]rune{}) // ""
string([]rune(nil)) // ""
type MyRunes []rune
string(MyRunes{0x767d, 0x9d6c, 0x7fd4}) // "\u767d\u9d6c\u7fd4" == "白鵬翔"
type runes []rune
string(runes{0x767d, 0x9d6c, 0x7fd4}) // "\u767d\u9d6c\u7fd4" == "白鵬翔"
type myRune rune
string([]myRune{0x266b, 0x266c}) // "\u266b\u266c" == "♫♬"
myString([]myRune{0x1F30E}) // "\U0001f30e" == "🌎"
</pre>
</li>
@ -5467,10 +5478,13 @@ Converting a value of a string type to a slice of bytes type
yields a slice whose successive elements are the bytes of the string.
<pre>
[]byte("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
[]byte("") // []byte{}
[]byte("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
[]byte("") // []byte{}
MyBytes("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
bytes("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
[]myByte("world!") // []myByte{'w', 'o', 'r', 'l', 'd', '!'}
[]myByte(myString("🌏")) // []myByte{'\xf0', '\x9f', '\x8c', '\x8f'}
</pre>
</li>
@ -5479,10 +5493,13 @@ Converting a value of a string type to a slice of runes type
yields a slice containing the individual Unicode code points of the string.
<pre>
[]rune(MyString("白鵬翔")) // []rune{0x767d, 0x9d6c, 0x7fd4}
[]rune("") // []rune{}
[]rune(myString("白鵬翔")) // []rune{0x767d, 0x9d6c, 0x7fd4}
[]rune("") // []rune{}
MyRunes("白鵬翔") // []rune{0x767d, 0x9d6c, 0x7fd4}
runes("白鵬翔") // []rune{0x767d, 0x9d6c, 0x7fd4}
[]myRune("♫♬") // []myRune{0x266b, 0x266c}
[]myRune(myString("🌐")) // []myRune{0x1f310}
</pre>
</li>
</ol>
@ -7141,7 +7158,7 @@ A constant size argument must be non-negative and <a href="#Representability">re
by a value of type <code>int</code>; if it is an untyped constant it is given type <code>int</code>.
If both <code>n</code> and <code>m</code> are provided and are constant, then
<code>n</code> must be no larger than <code>m</code>.
If <code>n</code> is negative or larger than <code>m</code> at run time,
For slices and channels, if <code>n</code> is negative or larger than <code>m</code> at run time,
a <a href="#Run_time_panics">run-time panic</a> occurs.
</p>

View File

@ -53,6 +53,11 @@ func main() {
log.Fatal(err)
}
// <= go 1.7 doesn't embed the contentID or actionID, so no slash is present
if !strings.Contains(id, "/") {
log.Fatalf("%s: build ID is a legacy format...binary too old for this tool", file)
}
newID := id[:strings.LastIndex(id, "/")] + "/" + buildid.HashToString(hash)
if len(newID) != len(id) {
log.Fatalf("%s: build ID length mismatch %q vs %q", file, id, newID)

View File

@ -1831,6 +1831,23 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
bo := f.ByteOrder
symtab, err := f.Symbols()
if err == nil {
// Check for use of -fsanitize=hwaddress (issue 53285).
removeTag := func(v uint64) uint64 { return v }
if goarch == "arm64" {
for i := range symtab {
if symtab[i].Name == "__hwasan_init" {
// -fsanitize=hwaddress on ARM
// uses the upper byte of a
// memory address as a hardware
// tag. Remove it so that
// we can find the associated
// data.
removeTag = func(v uint64) uint64 { return v &^ (0xff << (64 - 8)) }
break
}
}
}
for i := range symtab {
s := &symtab[i]
switch {
@ -1838,9 +1855,10 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
val := removeTag(s.Value)
if sect.Addr <= val && val < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
data := sdat[val-sect.Addr:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
@ -1852,9 +1870,10 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
val := removeTag(s.Value)
if sect.Addr <= val && val < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
data := sdat[val-sect.Addr:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
@ -1867,9 +1886,10 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
val := removeTag(s.Value)
if sect.Addr <= val && val < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
data := sdat[val-sect.Addr:]
strdata[n] = string(data)
}
}
@ -1880,9 +1900,10 @@ func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int6
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
val := removeTag(s.Value)
if sect.Addr <= val && val < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
data := sdat[val-sect.Addr:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
@ -2242,6 +2263,8 @@ var dwarfToName = map[string]string{
"long long unsigned int": "ulonglong",
"signed char": "schar",
"unsigned char": "uchar",
"unsigned long": "ulong", // Used by Clang 14; issue 53013.
"unsigned long long": "ulonglong", // Used by Clang 14; issue 53013.
}
const signedDelta = 64

View File

@ -1330,10 +1330,10 @@ func (g *genInst) dictPass(info *instInfo) {
m = convertUsingDictionary(info, info.dictParam, m.Pos(), mce.X, m, m.Type())
}
case ir.ODOTTYPE, ir.ODOTTYPE2:
if !m.Type().HasShape() {
dt := m.(*ir.TypeAssertExpr)
if !dt.Type().HasShape() && !dt.X.Type().HasShape() {
break
}
dt := m.(*ir.TypeAssertExpr)
var rtype, itab ir.Node
if dt.Type().IsInterface() || dt.X.Type().IsEmptyInterface() {
// TODO(mdempsky): Investigate executing this block unconditionally.

View File

@ -412,7 +412,7 @@ func dimportpath(p *types.Pkg) {
}
s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".")
ot := dnameData(s, 0, p.Path, "", nil, false)
ot := dnameData(s, 0, p.Path, "", nil, false, false)
objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
p.Pathsym = s
@ -461,12 +461,12 @@ func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
base.Fatalf("package mismatch for %v", ft.Sym)
}
nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name), ft.Embedded != 0)
return objw.SymPtr(lsym, ot, nsym, 0)
}
// dnameData writes the contents of a reflect.name into s at offset ot.
func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported, embedded bool) int {
if len(name) >= 1<<29 {
base.Fatalf("name too long: %d %s...", len(name), name[:1024])
}
@ -491,6 +491,9 @@ func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported b
if pkg != nil {
bits |= 1 << 2
}
if embedded {
bits |= 1 << 3
}
b := make([]byte, l)
b[0] = bits
copy(b[1:], nameLen[:nameLenLen])
@ -513,7 +516,7 @@ func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported b
var dnameCount int
// dname creates a reflect.name for a struct field or method.
func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
func dname(name, tag string, pkg *types.Pkg, exported, embedded bool) *obj.LSym {
// Write out data as "type.." to signal two things to the
// linker, first that when dynamically linking, the symbol
// should be moved to a relro section, and second that the
@ -538,11 +541,14 @@ func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
dnameCount++
}
if embedded {
sname += ".embedded"
}
s := base.Ctxt.Lookup(sname)
if len(s.P) > 0 {
return s
}
ot := dnameData(s, 0, name, tag, pkg, exported)
ot := dnameData(s, 0, name, tag, pkg, exported, embedded)
objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
return s
@ -610,7 +616,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
if !exported && a.name.Pkg != typePkg(t) {
pkg = a.name.Pkg
}
nsym := dname(a.name.Name, "", pkg, exported)
nsym := dname(a.name.Name, "", pkg, exported, false)
ot = objw.SymPtrOff(lsym, ot, nsym)
ot = dmethodptrOff(lsym, ot, writeType(a.mtype))
@ -775,7 +781,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
}
ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata
nsym := dname(p, "", nil, exported)
nsym := dname(p, "", nil, exported, false)
ot = objw.SymPtrOff(lsym, ot, nsym) // str
// ptrToThis
if sptr == nil {
@ -1074,7 +1080,7 @@ func writeType(t *types.Type) *obj.LSym {
if !exported && a.name.Pkg != tpkg {
pkg = a.name.Pkg
}
nsym := dname(a.name.Name, "", pkg, exported)
nsym := dname(a.name.Name, "", pkg, exported, false)
ot = objw.SymPtrOff(lsym, ot, nsym)
ot = objw.SymPtrOff(lsym, ot, writeType(a.type_))
@ -1180,14 +1186,7 @@ func writeType(t *types.Type) *obj.LSym {
// ../../../../runtime/type.go:/structField
ot = dnameField(lsym, ot, spkg, f)
ot = objw.SymPtr(lsym, ot, writeType(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) {
base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
}
if f.Embedded != 0 {
offsetAnon |= 1
}
ot = objw.Uintptr(lsym, ot, offsetAnon)
ot = objw.Uintptr(lsym, ot, uint64(f.Offset))
}
}
@ -1356,7 +1355,7 @@ func WriteTabs() {
// name nameOff
// typ typeOff // pointer to symbol
// }
nsym := dname(p.Sym().Name, "", nil, true)
nsym := dname(p.Sym().Name, "", nil, true, false)
t := p.Type()
if p.Class != ir.PFUNC {
t = types.NewPtr(t)

View File

@ -735,9 +735,9 @@
(NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
// Shift of a constant.
(SLLI [x] (MOVDconst [y])) && is32Bit(y << x) => (MOVDconst [y << x])
(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> x)])
(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> x])
(SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
// SLTI/SLTIU with constants.
(SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])

View File

@ -4843,19 +4843,19 @@ func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool {
func rewriteValueRISCV64_OpRISCV64SLLI(v *Value) bool {
v_0 := v.Args[0]
// match: (SLLI [x] (MOVDconst [y]))
// cond: is32Bit(y << x)
// result: (MOVDconst [y << x])
// cond: is32Bit(y << uint32(x))
// result: (MOVDconst [y << uint32(x)])
for {
x := auxIntToInt64(v.AuxInt)
if v_0.Op != OpRISCV64MOVDconst {
break
}
y := auxIntToInt64(v_0.AuxInt)
if !(is32Bit(y << x)) {
if !(is32Bit(y << uint32(x))) {
break
}
v.reset(OpRISCV64MOVDconst)
v.AuxInt = int64ToAuxInt(y << x)
v.AuxInt = int64ToAuxInt(y << uint32(x))
return true
}
return false
@ -4913,7 +4913,7 @@ func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool {
func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool {
v_0 := v.Args[0]
// match: (SRAI [x] (MOVDconst [y]))
// result: (MOVDconst [int64(y) >> x])
// result: (MOVDconst [int64(y) >> uint32(x)])
for {
x := auxIntToInt64(v.AuxInt)
if v_0.Op != OpRISCV64MOVDconst {
@ -4921,7 +4921,7 @@ func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool {
}
y := auxIntToInt64(v_0.AuxInt)
v.reset(OpRISCV64MOVDconst)
v.AuxInt = int64ToAuxInt(int64(y) >> x)
v.AuxInt = int64ToAuxInt(int64(y) >> uint32(x))
return true
}
return false
@ -4947,7 +4947,7 @@ func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool {
func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool {
v_0 := v.Args[0]
// match: (SRLI [x] (MOVDconst [y]))
// result: (MOVDconst [int64(uint64(y) >> x)])
// result: (MOVDconst [int64(uint64(y) >> uint32(x))])
for {
x := auxIntToInt64(v.AuxInt)
if v_0.Op != OpRISCV64MOVDconst {
@ -4955,7 +4955,7 @@ func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool {
}
y := auxIntToInt64(v_0.AuxInt)
v.reset(OpRISCV64MOVDconst)
v.AuxInt = int64ToAuxInt(int64(uint64(y) >> x))
v.AuxInt = int64ToAuxInt(int64(uint64(y) >> uint32(x)))
return true
}
return false

View File

@ -38,7 +38,7 @@ func pkgFor(path, source string, info *Info) (*Package, error) {
return conf.Check(f.PkgName.Value, []*syntax.File{f}, info)
}
func mustTypecheck(t *testing.T, path, source string, info *Info) string {
func mustTypecheck(t testing.TB, path, source string, info *Info) string {
pkg, err := pkgFor(path, source, info)
if err != nil {
name := path

View File

@ -72,13 +72,13 @@ func (check *Checker) instantiateSignature(pos syntax.Pos, typ *Signature, targs
}()
}
inst := check.instance(pos, typ, targs, check.bestContext(nil)).(*Signature)
inst := check.instance(pos, typ, targs, nil, check.context()).(*Signature)
assert(len(xlist) <= len(targs))
// verify instantiation lazily (was issue #50450)
check.later(func() {
tparams := typ.TypeParams().list()
if i, err := check.verify(pos, tparams, targs); err != nil {
if i, err := check.verify(pos, tparams, targs, check.context()); err != nil {
// best position for error reporting
pos := pos
if i < len(xlist) {
@ -395,7 +395,7 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
// need to compute it from the adjusted list; otherwise we can
// simply use the result signature's parameter list.
if adjusted {
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TypeParams().list(), targs), nil).(*Tuple)
sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TypeParams().list(), targs), nil, check.context()).(*Tuple)
} else {
sigParams = rsig.params
}

View File

@ -98,7 +98,7 @@ type Checker struct {
nextID uint64 // unique Id for type parameters (first valid Id is 1)
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
infoMap map[*Named]typeInfo // maps named types to their associated type info (for cycle detection)
valids instanceLookup // valid *Named (incl. instantiated) types per the validType check
// pkgPathMap maps package names to the set of distinct import paths we've
// seen for that name, anywhere in the import graph. It is used for
@ -241,7 +241,6 @@ func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
version: version,
objMap: make(map[Object]*declInfo),
impMap: make(map[importKey]*Package),
infoMap: make(map[*Named]typeInfo),
}
}

View File

@ -508,7 +508,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named
}
// type definition or generic type declaration
named := check.newNamed(obj, nil, nil, nil)
named := check.newNamed(obj, nil, nil)
def.setUnderlying(named)
if tdecl.TParamList != nil {
@ -522,8 +522,8 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named
assert(rhs != nil)
named.fromRHS = rhs
// If the underlying was not set while type-checking the right-hand side, it
// is invalid and an error should have been reported elsewhere.
// If the underlying type was not set while type-checking the right-hand
// side, it is invalid and an error should have been reported elsewhere.
if named.underlying == nil {
named.underlying = Typ[Invalid]
}
@ -635,7 +635,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
// and field names must be distinct."
base, _ := obj.typ.(*Named) // shouldn't fail but be conservative
if base != nil {
assert(base.targs.Len() == 0) // collectMethods should not be called on an instantiated type
assert(base.TypeArgs().Len() == 0) // collectMethods should not be called on an instantiated type
// See issue #52529: we must delay the expansion of underlying here, as
// base may not be fully set-up.
@ -646,8 +646,8 @@ func (check *Checker) collectMethods(obj *TypeName) {
// Checker.Files may be called multiple times; additional package files
// may add methods to already type-checked types. Add pre-existing methods
// so that we can detect redeclarations.
for i := 0; i < base.methods.Len(); i++ {
m := base.methods.At(i, nil)
for i := 0; i < base.NumMethods(); i++ {
m := base.Method(i)
assert(m.name != "_")
assert(mset.insert(m) == nil)
}
@ -679,8 +679,8 @@ func (check *Checker) collectMethods(obj *TypeName) {
func (check *Checker) checkFieldUniqueness(base *Named) {
if t, _ := base.under().(*Struct); t != nil {
var mset objset
for i := 0; i < base.methods.Len(); i++ {
m := base.methods.At(i, nil)
for i := 0; i < base.NumMethods(); i++ {
m := base.Method(i)
assert(m.name != "_")
assert(mset.insert(m) == nil)
}

View File

@ -110,11 +110,11 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
renameMap := makeRenameMap(tparams, tparams2)
for i, tparam := range tparams {
tparams2[i].bound = check.subst(pos, tparam.bound, renameMap, nil)
tparams2[i].bound = check.subst(pos, tparam.bound, renameMap, nil, check.context())
}
tparams = tparams2
params = check.subst(pos, params, renameMap, nil).(*Tuple)
params = check.subst(pos, params, renameMap, nil, check.context()).(*Tuple)
}
}
@ -188,7 +188,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
// but that doesn't impact the isParameterized check for now).
if params.Len() > 0 {
smap := makeSubstMap(tparams, targs)
params = check.subst(nopos, params, smap, nil).(*Tuple)
params = check.subst(nopos, params, smap, nil, check.context()).(*Tuple)
}
// Unify parameter and argument types for generic parameters with typed arguments
@ -224,7 +224,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
}
}
smap := makeSubstMap(tparams, targs)
inferred := check.subst(arg.Pos(), tpar, smap, nil)
inferred := check.subst(arg.Pos(), tpar, smap, nil, check.context())
if inferred != tpar {
check.errorf(arg, "%s %s of %s does not match inferred type %s for %s", kind, targ, arg.expr, inferred, tpar)
} else {
@ -434,7 +434,7 @@ func (w *tpWalker) isParameterized(typ Type) (res bool) {
return w.isParameterized(t.elem)
case *Named:
return w.isParameterizedTypeList(t.targs.list())
return w.isParameterizedTypeList(t.TypeArgs().list())
case *TypeParam:
// t must be one of w.tparams
@ -626,7 +626,7 @@ func (check *Checker) inferB(pos syntax.Pos, tparams []*TypeParam, targs []Type)
n := 0
for _, index := range dirty {
t0 := types[index]
if t1 := check.subst(nopos, t0, smap, nil); t1 != t0 {
if t1 := check.subst(nopos, t0, smap, nil, check.context()); t1 != t0 {
types[index] = t1
dirty[n] = index
n++

View File

@ -40,6 +40,9 @@ import (
// count is incorrect; for *Named types, a panic may occur later inside the
// *Named API.
func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error) {
if ctxt == nil {
ctxt = NewContext()
}
if validate {
var tparams []*TypeParam
switch t := orig.(type) {
@ -51,40 +54,72 @@ func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, e
if len(targs) != len(tparams) {
return nil, fmt.Errorf("got %d type arguments but %s has %d type parameters", len(targs), orig, len(tparams))
}
if i, err := (*Checker)(nil).verify(nopos, tparams, targs); err != nil {
if i, err := (*Checker)(nil).verify(nopos, tparams, targs, ctxt); err != nil {
return nil, &ArgumentError{i, err}
}
}
inst := (*Checker)(nil).instance(nopos, orig, targs, ctxt)
inst := (*Checker)(nil).instance(nopos, orig, targs, nil, ctxt)
return inst, nil
}
// instance creates a type or function instance using the given original type
// typ and arguments targs. For Named types the resulting instance will be
// unexpanded. check may be nil.
func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, ctxt *Context) (res Type) {
var h string
// instance instantiates the given original (generic) function or type with the
// provided type arguments and returns the resulting instance. If an identical
// instance exists already in the given contexts, it returns that instance,
// otherwise it creates a new one.
//
// If expanding is non-nil, it is the Named instance type currently being
// expanded. If ctxt is non-nil, it is the context associated with the current
// type-checking pass or call to Instantiate. At least one of expanding or ctxt
// must be non-nil.
//
// For Named types the resulting instance may be unexpanded.
func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expanding *Named, ctxt *Context) (res Type) {
// The order of the contexts below matters: we always prefer instances in the
// expanding instance context in order to preserve reference cycles.
//
// Invariant: if expanding != nil, the returned instance will be the instance
// recorded in expanding.inst.ctxt.
var ctxts []*Context
if expanding != nil {
ctxts = append(ctxts, expanding.inst.ctxt)
}
if ctxt != nil {
h = ctxt.instanceHash(orig, targs)
// typ may already have been instantiated with identical type arguments. In
// that case, re-use the existing instance.
if inst := ctxt.lookup(h, orig, targs); inst != nil {
return inst
ctxts = append(ctxts, ctxt)
}
assert(len(ctxts) > 0)
// Compute all hashes; hashes may differ across contexts due to different
// unique IDs for Named types within the hasher.
hashes := make([]string, len(ctxts))
for i, ctxt := range ctxts {
hashes[i] = ctxt.instanceHash(orig, targs)
}
// If local is non-nil, updateContexts return the type recorded in
// local.
updateContexts := func(res Type) Type {
for i := len(ctxts) - 1; i >= 0; i-- {
res = ctxts[i].update(hashes[i], orig, targs, res)
}
return res
}
// typ may already have been instantiated with identical type arguments. In
// that case, re-use the existing instance.
for i, ctxt := range ctxts {
if inst := ctxt.lookup(hashes[i], orig, targs); inst != nil {
return updateContexts(inst)
}
}
switch orig := orig.(type) {
case *Named:
tname := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
named := check.newNamed(tname, orig, nil, nil) // underlying, tparams, and methods are set when named is resolved
named.targs = newTypeList(targs)
named.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
return expandNamed(ctxt, n, pos)
}
res = named
res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily
case *Signature:
assert(expanding == nil) // function instances cannot be reached from Named types
tparams := orig.TypeParams()
if !check.validateTArgLen(pos, tparams.Len(), len(targs)) {
return Typ[Invalid]
@ -92,7 +127,7 @@ func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, ctxt *Co
if tparams.Len() == 0 {
return orig // nothing to do (minor optimization)
}
sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), ctxt).(*Signature)
sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), nil, ctxt).(*Signature)
// If the signature doesn't use its type parameters, subst
// will not make a copy. In that case, make a copy now (so
// we can set tparams to nil w/o causing side-effects).
@ -110,13 +145,8 @@ func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, ctxt *Co
panic(fmt.Sprintf("%v: cannot instantiate %v", pos, orig))
}
if ctxt != nil {
// It's possible that we've lost a race to add named to the context.
// In this case, use whichever instance is recorded in the context.
res = ctxt.update(h, orig, targs, res)
}
return res
// Update all contexts; it's possible that we've lost a race.
return updateContexts(res)
}
// validateTArgLen verifies that the length of targs and tparams matches,
@ -134,7 +164,7 @@ func (check *Checker) validateTArgLen(pos syntax.Pos, ntparams, ntargs int) bool
return true
}
func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type) (int, error) {
func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type, ctxt *Context) (int, error) {
smap := makeSubstMap(tparams, targs)
for i, tpar := range tparams {
// Ensure that we have a (possibly implicit) interface as type bound (issue #51048).
@ -143,7 +173,7 @@ func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type)
// as the instantiated type; before we can use it for bounds checking we
// need to instantiate it with the type arguments with which we instantiated
// the parameterized type.
bound := check.subst(pos, tpar.bound, smap, nil)
bound := check.subst(pos, tpar.bound, smap, nil, ctxt)
if err := check.implements(targs[i], bound); err != nil {
return i, err
}
@ -248,7 +278,7 @@ func (check *Checker) implements(V, T Type) error {
if alt != nil {
return errorf("%s does not implement %s (possibly missing ~ for %s in constraint %s)", V, T, alt, T)
} else {
return errorf("%s does not implement %s", V, T)
return errorf("%s does not implement %s (%s missing in %s)", V, T, V, Ti.typeSet().terms)
}
}

View File

@ -1,79 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types2
import "sync"
// methodList holds a list of methods that may be lazily resolved by a provided
// resolution method.
type methodList struct {
methods []*Func
// guards synchronizes the instantiation of lazy methods. For lazy method
// lists, guards is non-nil and of the length passed to newLazyMethodList.
// For non-lazy method lists, guards is nil.
guards *[]sync.Once
}
// newMethodList creates a non-lazy method list holding the given methods.
func newMethodList(methods []*Func) *methodList {
return &methodList{methods: methods}
}
// newLazyMethodList creates a lazy method list of the given length. Methods
// may be resolved lazily for a given index by providing a resolver function.
func newLazyMethodList(length int) *methodList {
guards := make([]sync.Once, length)
return &methodList{
methods: make([]*Func, length),
guards: &guards,
}
}
// isLazy reports whether the receiver is a lazy method list.
func (l *methodList) isLazy() bool {
return l != nil && l.guards != nil
}
// Add appends a method to the method list if not not already present. Add
// panics if the receiver is lazy.
func (l *methodList) Add(m *Func) {
assert(!l.isLazy())
if i, _ := lookupMethod(l.methods, m.pkg, m.name, false); i < 0 {
l.methods = append(l.methods, m)
}
}
// Lookup looks up the method identified by pkg and name in the receiver.
// Lookup panics if the receiver is lazy. If foldCase is true, method names
// are considered equal if they are equal with case folding.
func (l *methodList) Lookup(pkg *Package, name string, foldCase bool) (int, *Func) {
assert(!l.isLazy())
if l == nil {
return -1, nil
}
return lookupMethod(l.methods, pkg, name, foldCase)
}
// Len returns the length of the method list.
func (l *methodList) Len() int {
if l == nil {
return 0
}
return len(l.methods)
}
// At returns the i'th method of the method list. At panics if i is out of
// bounds, or if the receiver is lazy and resolve is nil.
func (l *methodList) At(i int, resolve func() *Func) *Func {
if !l.isLazy() {
return l.methods[i]
}
assert(resolve != nil)
(*l.guards)[i].Do(func() {
l.methods[i] = resolve()
})
return l.methods[i]
}

View File

@ -1,40 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types2
import (
"testing"
)
func TestLazyMethodList(t *testing.T) {
l := newLazyMethodList(2)
if got := l.Len(); got != 2 {
t.Fatalf("Len() = %d, want 2", got)
}
f0 := NewFunc(nopos, nil, "f0", nil)
f1 := NewFunc(nopos, nil, "f1", nil)
// Verify that methodList.At is idempotent, by calling it repeatedly with a
// resolve func that returns different pointer values (f0 or f1).
steps := []struct {
index int
resolve *Func // the *Func returned by the resolver
want *Func // the actual *Func returned by methodList.At
}{
{0, f0, f0},
{0, f1, f0},
{1, f1, f1},
{1, f0, f1},
}
for i, step := range steps {
got := l.At(step.index, func() *Func { return step.resolve })
if got != step.want {
t.Errorf("step %d: At(%d, ...) = %s, want %s", i, step.index, got.Name(), step.want.Name())
}
}
}

View File

@ -7,30 +7,136 @@ package types2
import (
"cmd/compile/internal/syntax"
"sync"
"sync/atomic"
)
// Type-checking Named types is subtle, because they may be recursively
// defined, and because their full details may be spread across multiple
// declarations (via methods). For this reason they are type-checked lazily,
// to avoid information being accessed before it is complete.
//
// Conceptually, it is helpful to think of named types as having two distinct
// sets of information:
// - "LHS" information, defining their identity: Obj() and TypeArgs()
// - "RHS" information, defining their details: TypeParams(), Underlying(),
// and methods.
//
// In this taxonomy, LHS information is available immediately, but RHS
// information is lazy. Specifically, a named type N may be constructed in any
// of the following ways:
// 1. type-checked from the source
// 2. loaded eagerly from export data
// 3. loaded lazily from export data (when using unified IR)
// 4. instantiated from a generic type
//
// In cases 1, 3, and 4, it is possible that the underlying type or methods of
// N may not be immediately available.
// - During type-checking, we allocate N before type-checking its underlying
// type or methods, so that we may resolve recursive references.
// - When loading from export data, we may load its methods and underlying
// type lazily using a provided load function.
// - After instantiating, we lazily expand the underlying type and methods
// (note that instances may be created while still in the process of
// type-checking the original type declaration).
//
// In cases 3 and 4 this lazy construction may also occur concurrently, due to
// concurrent use of the type checker API (after type checking or importing has
// finished). It is critical that we keep track of state, so that Named types
// are constructed exactly once and so that we do not access their details too
// soon.
//
// We achieve this by tracking state with an atomic state variable, and
// guarding potentially concurrent calculations with a mutex. At any point in
// time this state variable determines which data on N may be accessed. As
// state monotonically progresses, any data available at state M may be
// accessed without acquiring the mutex at state N, provided N >= M.
//
// GLOSSARY: Here are a few terms used in this file to describe Named types:
// - We say that a Named type is "instantiated" if it has been constructed by
// instantiating a generic named type with type arguments.
// - We say that a Named type is "declared" if it corresponds to a type
// declaration in the source. Instantiated named types correspond to a type
// instantiation in the source, not a declaration. But their Origin type is
// a declared type.
// - We say that a Named type is "resolved" if its RHS information has been
// loaded or fully type-checked. For Named types constructed from export
// data, this may involve invoking a loader function to extract information
// from export data. For instantiated named types this involves reading
// information from their origin.
// - We say that a Named type is "expanded" if it is an instantiated type and
// type parameters in its underlying type and methods have been substituted
// with the type arguments from the instantiation. A type may be partially
// expanded if some but not all of these details have been substituted.
// Similarly, we refer to these individual details (underlying type or
// method) as being "expanded".
// - When all information is known for a named type, we say it is "complete".
//
// Some invariants to keep in mind: each declared Named type has a single
// corresponding object, and that object's type is the (possibly generic) Named
// type. Declared Named types are identical if and only if their pointers are
// identical. On the other hand, multiple instantiated Named types may be
// identical even though their pointers are not identical. One has to use
// Identical to compare them. For instantiated named types, their obj is a
// synthetic placeholder that records their position of the corresponding
// instantiation in the source (if they were constructed during type checking).
//
// To prevent infinite expansion of named instances that are created outside of
// type-checking, instances share a Context with other instances created during
// their expansion. Via the pidgeonhole principle, this guarantees that in the
// presence of a cycle of named types, expansion will eventually find an
// existing instance in the Context and short-circuit the expansion.
//
// Once an instance is complete, we can nil out this shared Context to unpin
// memory, though this Context may still be held by other incomplete instances
// in its "lineage".
// A Named represents a named (defined) type.
type Named struct {
check *Checker
obj *TypeName // corresponding declared object for declared types; placeholder for instantiated types
orig *Named // original, uninstantiated type
fromRHS Type // type (on RHS of declaration) this *Named type is derived from (for cycle reporting)
check *Checker // non-nil during type-checking; nil otherwise
obj *TypeName // corresponding declared object for declared types; see above for instantiated types
// fromRHS holds the type (on RHS of declaration) this *Named type is derived
// from (for cycle reporting). Only used by validType, and therefore does not
// require synchronization.
fromRHS Type
// information for instantiated types; nil otherwise
inst *instance
mu sync.Mutex // guards all fields below
state_ uint32 // the current state of this type; must only be accessed atomically
underlying Type // possibly a *Named during setup; never a *Named once set up completely
tparams *TypeParamList // type parameters, or nil
targs *TypeList // type arguments (after instantiation), or nil
// methods declared for this type (not the method set of this type).
// methods declared for this type (not the method set of this type)
// Signatures are type-checked lazily.
// For non-instantiated types, this is a fully populated list of methods. For
// instantiated types, this is a 'lazy' list, and methods are instantiated
// when they are first accessed.
methods *methodList
// instantiated types, methods are individually expanded when they are first
// accessed.
methods []*Func
// resolver may be provided to lazily resolve type parameters, underlying, and methods.
resolver func(*Context, *Named) (tparams *TypeParamList, underlying Type, methods *methodList)
once sync.Once // ensures that tparams, underlying, and methods are resolved before accessing
// loader may be provided to lazily load type parameters, underlying type, and methods.
loader func(*Named) (tparams []*TypeParam, underlying Type, methods []*Func)
}
// instance holds information that is only necessary for instantiated named
// types.
type instance struct {
orig *Named // original, uninstantiated type
targs *TypeList // type arguments
expandedMethods int // number of expanded methods; expandedMethods <= len(orig.methods)
ctxt *Context // local Context; set to nil after full expansion
}
// namedState represents the possible states that a named type may assume.
type namedState uint32
const (
unresolved namedState = iota // tparams, underlying type and methods might be unavailable
resolved // resolve has run; methods might be incomplete (for instances)
complete // all data is known
)
// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
// The underlying type must not be a *Named.
@ -38,39 +144,122 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
if _, ok := underlying.(*Named); ok {
panic("underlying type must not be *Named")
}
return (*Checker)(nil).newNamed(obj, nil, underlying, newMethodList(methods))
return (*Checker)(nil).newNamed(obj, underlying, methods)
}
func (t *Named) resolve(ctxt *Context) *Named {
if t.resolver == nil {
return t
// resolve resolves the type parameters, methods, and underlying type of n.
// This information may be loaded from a provided loader function, or computed
// from an origin type (in the case of instances).
//
// After resolution, the type parameters, methods, and underlying type of n are
// accessible; but if n is an instantiated type, its methods may still be
// unexpanded.
func (n *Named) resolve() *Named {
if n.state() >= resolved { // avoid locking below
return n
}
t.once.Do(func() {
// TODO(mdempsky): Since we're passing t to the resolver anyway
// (necessary because types2 expects the receiver type for methods
// on defined interface types to be the Named rather than the
// underlying Interface), maybe it should just handle calling
// SetTypeParams, SetUnderlying, and AddMethod instead? Those
// methods would need to support reentrant calls though. It would
// also make the API more future-proof towards further extensions
// (like SetTypeParams).
t.tparams, t.underlying, t.methods = t.resolver(ctxt, t)
t.fromRHS = t.underlying // for cycle detection
})
return t
// TODO(rfindley): if n.check is non-nil we can avoid locking here, since
// type-checking is not concurrent. Evaluate if this is worth doing.
n.mu.Lock()
defer n.mu.Unlock()
if n.state() >= resolved {
return n
}
if n.inst != nil {
assert(n.underlying == nil) // n is an unresolved instance
assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil
orig := n.inst.orig
orig.resolve()
underlying := n.expandUnderlying()
n.tparams = orig.tparams
n.underlying = underlying
n.fromRHS = orig.fromRHS // for cycle detection
if len(orig.methods) == 0 {
n.setState(complete) // nothing further to do
n.inst.ctxt = nil
} else {
n.setState(resolved)
}
return n
}
// TODO(mdempsky): Since we're passing n to the loader anyway
// (necessary because types2 expects the receiver type for methods
// on defined interface types to be the Named rather than the
// underlying Interface), maybe it should just handle calling
// SetTypeParams, SetUnderlying, and AddMethod instead? Those
// methods would need to support reentrant calls though. It would
// also make the API more future-proof towards further extensions.
if n.loader != nil {
assert(n.underlying == nil)
assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil
tparams, underlying, methods := n.loader(n)
n.tparams = bindTParams(tparams)
n.underlying = underlying
n.fromRHS = underlying // for cycle detection
n.methods = methods
n.loader = nil
}
n.setState(complete)
return n
}
// state atomically accesses the current state of the receiver.
func (n *Named) state() namedState {
return namedState(atomic.LoadUint32(&n.state_))
}
// setState atomically stores the given state for n.
// Must only be called while holding n.mu.
func (n *Named) setState(state namedState) {
atomic.StoreUint32(&n.state_, uint32(state))
}
// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, methods *methodList) *Named {
typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, methods: methods}
if typ.orig == nil {
typ.orig = typ
}
func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods}
if obj.typ == nil {
obj.typ = typ
}
// Ensure that typ is always expanded and sanity-checked.
// Ensure that typ is always sanity-checked.
if check != nil {
check.needsCleanup(typ)
}
return typ
}
// newNamedInstance creates a new named instance for the given origin and type
// arguments, recording pos as the position of its synthetic object (for error
// reporting).
//
// If set, expanding is the named type instance currently being expanded, that
// led to the creation of this instance.
func (check *Checker) newNamedInstance(pos syntax.Pos, orig *Named, targs []Type, expanding *Named) *Named {
assert(len(targs) > 0)
obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
inst := &instance{orig: orig, targs: newTypeList(targs)}
// Only pass the expanding context to the new instance if their packages
// match. Since type reference cycles are only possible within a single
// package, this is sufficient for the purposes of short-circuiting cycles.
// Avoiding passing the context in other cases prevents unnecessary coupling
// of types across packages.
if expanding != nil && expanding.Obj().pkg == obj.pkg {
inst.ctxt = expanding.inst.ctxt
}
typ := &Named{check: check, obj: obj, inst: inst}
obj.typ = typ
// Ensure that typ is always sanity-checked.
if check != nil {
check.needsCleanup(typ)
}
@ -78,18 +267,18 @@ func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, meth
}
func (t *Named) cleanup() {
assert(t.orig.orig == t.orig)
assert(t.inst == nil || t.inst.orig.inst == nil)
// Ensure that every defined type created in the course of type-checking has
// either non-*Named underlying, or is unresolved.
// either non-*Named underlying type, or is unexpanded.
//
// This guarantees that we don't leak any types whose underlying is *Named,
// because any unresolved instances will lazily compute their underlying by
// substituting in the underlying of their origin. The origin must have
// either been imported or type-checked and expanded here, and in either case
// its underlying will be fully expanded.
// This guarantees that we don't leak any types whose underlying type is
// *Named, because any unexpanded instances will lazily compute their
// underlying type by substituting in the underlying type of their origin.
// The origin must have either been imported or type-checked and expanded
// here, and in either case its underlying type will be fully expanded.
switch t.underlying.(type) {
case nil:
if t.resolver == nil {
if t.TypeArgs().Len() == 0 {
panic("nil underlying")
}
case *Named:
@ -100,51 +289,91 @@ func (t *Named) cleanup() {
// Obj returns the type name for the declaration defining the named type t. For
// instantiated types, this is same as the type name of the origin type.
func (t *Named) Obj() *TypeName { return t.orig.obj } // for non-instances this is the same as t.obj
func (t *Named) Obj() *TypeName {
if t.inst == nil {
return t.obj
}
return t.inst.orig.obj
}
// Origin returns the generic type from which the named type t is
// instantiated. If t is not an instantiated type, the result is t.
func (t *Named) Origin() *Named { return t.orig }
// TODO(gri) Come up with a better representation and API to distinguish
// between parameterized instantiated and non-instantiated types.
func (t *Named) Origin() *Named {
if t.inst == nil {
return t
}
return t.inst.orig
}
// TypeParams returns the type parameters of the named type t, or nil.
// The result is non-nil for an (originally) generic type even if it is instantiated.
func (t *Named) TypeParams() *TypeParamList { return t.resolve(nil).tparams }
func (t *Named) TypeParams() *TypeParamList { return t.resolve().tparams }
// SetTypeParams sets the type parameters of the named type t.
// t must not have type arguments.
func (t *Named) SetTypeParams(tparams []*TypeParam) {
assert(t.targs.Len() == 0)
t.resolve(nil).tparams = bindTParams(tparams)
assert(t.inst == nil)
t.resolve().tparams = bindTParams(tparams)
}
// TypeArgs returns the type arguments used to instantiate the named type t.
func (t *Named) TypeArgs() *TypeList { return t.targs }
// NumMethods returns the number of explicit methods defined for t.
//
// For an ordinary or instantiated type t, the receiver base type of these
// methods will be the named type t. For an uninstantiated generic type t, each
// method receiver will be instantiated with its receiver type parameters.
func (t *Named) NumMethods() int { return t.resolve(nil).methods.Len() }
// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
func (t *Named) Method(i int) *Func {
t.resolve(nil)
return t.methods.At(i, func() *Func {
return t.instantiateMethod(i)
})
func (t *Named) TypeArgs() *TypeList {
if t.inst == nil {
return nil
}
return t.inst.targs
}
// instantiateMethod instantiates the i'th method for an instantiated receiver.
func (t *Named) instantiateMethod(i int) *Func {
assert(t.TypeArgs().Len() > 0) // t must be an instance
// NumMethods returns the number of explicit methods defined for t.
func (t *Named) NumMethods() int {
return len(t.Origin().resolve().methods)
}
// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
//
// For an ordinary or instantiated type t, the receiver base type of this
// method is the named type t. For an uninstantiated generic type t, each
// method receiver is instantiated with its receiver type parameters.
func (t *Named) Method(i int) *Func {
t.resolve()
if t.state() >= complete {
return t.methods[i]
}
assert(t.inst != nil) // only instances should have incomplete methods
orig := t.inst.orig
t.mu.Lock()
defer t.mu.Unlock()
if len(t.methods) != len(orig.methods) {
assert(len(t.methods) == 0)
t.methods = make([]*Func, len(orig.methods))
}
if t.methods[i] == nil {
assert(t.inst.ctxt != nil) // we should still have a context remaining from the resolution phase
t.methods[i] = t.expandMethod(i)
t.inst.expandedMethods++
// Check if we've created all methods at this point. If we have, mark the
// type as fully expanded.
if t.inst.expandedMethods == len(orig.methods) {
t.setState(complete)
t.inst.ctxt = nil // no need for a context anymore
}
}
return t.methods[i]
}
// expandMethod substitutes type arguments in the i'th method for an
// instantiated receiver.
func (t *Named) expandMethod(i int) *Func {
// t.orig.methods is not lazy. origm is the method instantiated with its
// receiver type parameters (the "origin" method).
origm := t.orig.Method(i)
origm := t.inst.orig.Method(i)
assert(origm != nil)
check := t.check
@ -171,10 +400,13 @@ func (t *Named) instantiateMethod(i int) *Func {
// We can only substitute if we have a correspondence between type arguments
// and type parameters. This check is necessary in the presence of invalid
// code.
if origSig.RecvTypeParams().Len() == t.targs.Len() {
ctxt := check.bestContext(nil)
smap := makeSubstMap(origSig.RecvTypeParams().list(), t.targs.list())
sig = check.subst(origm.pos, origSig, smap, ctxt).(*Signature)
if origSig.RecvTypeParams().Len() == t.inst.targs.Len() {
smap := makeSubstMap(origSig.RecvTypeParams().list(), t.inst.targs.list())
var ctxt *Context
if check != nil {
ctxt = check.context()
}
sig = check.subst(origm.pos, origSig, smap, t, ctxt).(*Signature)
}
if sig == origSig {
@ -198,14 +430,14 @@ func (t *Named) instantiateMethod(i int) *Func {
// SetUnderlying sets the underlying type and marks t as complete.
// t must not have type arguments.
func (t *Named) SetUnderlying(underlying Type) {
assert(t.targs.Len() == 0)
assert(t.inst == nil)
if underlying == nil {
panic("underlying type must not be nil")
}
if _, ok := underlying.(*Named); ok {
panic("underlying type must not be *Named")
}
t.resolve(nil).underlying = underlying
t.resolve().underlying = underlying
if t.fromRHS == nil {
t.fromRHS = underlying // for cycle detection
}
@ -214,19 +446,21 @@ func (t *Named) SetUnderlying(underlying Type) {
// AddMethod adds method m unless it is already in the method list.
// t must not have type arguments.
func (t *Named) AddMethod(m *Func) {
assert(t.targs.Len() == 0)
t.resolve(nil)
if t.methods == nil {
t.methods = newMethodList(nil)
assert(t.inst == nil)
t.resolve()
if i, _ := lookupMethod(t.methods, m.pkg, m.name, false); i < 0 {
t.methods = append(t.methods, m)
}
t.methods.Add(m)
}
func (t *Named) Underlying() Type { return t.resolve(nil).underlying }
func (t *Named) Underlying() Type { return t.resolve().underlying }
func (t *Named) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
// Implementation
//
// TODO(rfindley): reorganize the loading and expansion methods under this
// heading.
// under returns the expanded underlying type of n0; possibly by following
// forward chains of named types. If an underlying type is found, resolve
@ -275,7 +509,7 @@ func (n0 *Named) under() Type {
check := n0.check
n := n0
seen := make(map[*Named]int) // types that need their underlying resolved
seen := make(map[*Named]int) // types that need their underlying type resolved
var path []Object // objects encountered, for cycle reporting
loop:
@ -323,11 +557,11 @@ func (n *Named) setUnderlying(typ Type) {
}
func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
n.resolve(nil)
n.resolve()
// If n is an instance, we may not have yet instantiated all of its methods.
// Look up the method index in orig, and only instantiate method at the
// matching index (if any).
i, _ := n.orig.methods.Lookup(pkg, name, foldCase)
i, _ := lookupMethod(n.Origin().methods, pkg, name, foldCase)
if i < 0 {
return -1, nil
}
@ -335,83 +569,83 @@ func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Fu
return i, n.Method(i)
}
// bestContext returns the best available context. In order of preference:
// - the given ctxt, if non-nil
// - check.ctxt, if check is non-nil
// - a new Context
func (check *Checker) bestContext(ctxt *Context) *Context {
if ctxt != nil {
return ctxt
// context returns the type-checker context.
func (check *Checker) context() *Context {
if check.ctxt == nil {
check.ctxt = NewContext()
}
if check != nil {
if check.ctxt == nil {
check.ctxt = NewContext()
}
return check.ctxt
}
return NewContext()
return check.ctxt
}
// expandNamed ensures that the underlying type of n is instantiated.
// The underlying type will be Typ[Invalid] if there was an error.
func expandNamed(ctxt *Context, n *Named, instPos syntax.Pos) (tparams *TypeParamList, underlying Type, methods *methodList) {
// expandUnderlying substitutes type arguments in the underlying type n.orig,
// returning the result. Returns Typ[Invalid] if there was an error.
func (n *Named) expandUnderlying() Type {
check := n.check
if check != nil && check.conf.Trace {
check.trace(instPos, "-- expandNamed %s", n)
check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n)
check.indent++
defer func() {
check.indent--
check.trace(instPos, "=> %s (tparams = %s, under = %s)", n, tparams.list(), underlying)
check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying)
}()
}
n.orig.resolve(ctxt)
assert(n.orig.underlying != nil)
assert(n.inst.orig.underlying != nil)
if n.inst.ctxt == nil {
n.inst.ctxt = NewContext()
}
if _, unexpanded := n.orig.underlying.(*Named); unexpanded {
// We should only get an unexpanded underlying here during type checking
orig := n.inst.orig
targs := n.inst.targs
if _, unexpanded := orig.underlying.(*Named); unexpanded {
// We should only get a Named underlying type here during type checking
// (for example, in recursive type declarations).
assert(check != nil)
}
// Mismatching arg and tparam length may be checked elsewhere.
if n.orig.tparams.Len() == n.targs.Len() {
// We must always have a context, to avoid infinite recursion.
ctxt = check.bestContext(ctxt)
h := ctxt.instanceHash(n.orig, n.targs.list())
// ensure that an instance is recorded for h to avoid infinite recursion.
ctxt.update(h, n.orig, n.TypeArgs().list(), n)
smap := makeSubstMap(n.orig.tparams.list(), n.targs.list())
underlying = n.check.subst(instPos, n.orig.underlying, smap, ctxt)
// If the underlying of n is an interface, we need to set the receiver of
// its methods accurately -- we set the receiver of interface methods on
// the RHS of a type declaration to the defined type.
if iface, _ := underlying.(*Interface); iface != nil {
if methods, copied := replaceRecvType(iface.methods, n.orig, n); copied {
// If the underlying doesn't actually use type parameters, it's possible
// that it wasn't substituted. In this case we need to create a new
// *Interface before modifying receivers.
if iface == n.orig.underlying {
old := iface
iface = check.newInterface()
iface.embeddeds = old.embeddeds
iface.complete = old.complete
iface.implicit = old.implicit // should be false but be conservative
underlying = iface
}
iface.methods = methods
}
}
} else {
underlying = Typ[Invalid]
if orig.tparams.Len() != targs.Len() {
// Mismatching arg and tparam length may be checked elsewhere.
return Typ[Invalid]
}
return n.orig.tparams, underlying, newLazyMethodList(n.orig.methods.Len())
// Ensure that an instance is recorded before substituting, so that we
// resolve n for any recursive references.
h := n.inst.ctxt.instanceHash(orig, targs.list())
n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n)
assert(n == n2)
smap := makeSubstMap(orig.tparams.list(), targs.list())
var ctxt *Context
if check != nil {
ctxt = check.context()
}
underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt)
// If the underlying type of n is an interface, we need to set the receiver of
// its methods accurately -- we set the receiver of interface methods on
// the RHS of a type declaration to the defined type.
if iface, _ := underlying.(*Interface); iface != nil {
if methods, copied := replaceRecvType(iface.methods, orig, n); copied {
// If the underlying type doesn't actually use type parameters, it's
// possible that it wasn't substituted. In this case we need to create
// a new *Interface before modifying receivers.
if iface == orig.underlying {
old := iface
iface = check.newInterface()
iface.embeddeds = old.embeddeds
iface.complete = old.complete
iface.implicit = old.implicit // should be false but be conservative
underlying = iface
}
iface.methods = methods
}
}
return underlying
}
// safeUnderlying returns the underlying of typ without expanding instances, to
// avoid infinite recursion.
// safeUnderlying returns the underlying type of typ without expanding
// instances, to avoid infinite recursion.
//
// TODO(rfindley): eliminate this function or give it a better name.
func safeUnderlying(typ Type) Type {

View File

@ -0,0 +1,120 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types2_test
import (
"testing"
"cmd/compile/internal/syntax"
. "cmd/compile/internal/types2"
)
func BenchmarkNamed(b *testing.B) {
const src = `
package p
type T struct {
P int
}
func (T) M(int) {}
func (T) N() (i int) { return }
type G[P any] struct {
F P
}
func (G[P]) M(P) {}
func (G[P]) N() (p P) { return }
type Inst = G[int]
`
pkg, err := pkgFor("p", src, nil)
if err != nil {
b.Fatal(err)
}
var (
T = pkg.Scope().Lookup("T").Type()
G = pkg.Scope().Lookup("G").Type()
SrcInst = pkg.Scope().Lookup("Inst").Type()
UserInst = mustInstantiate(b, G, Typ[Int])
)
tests := []struct {
name string
typ Type
}{
{"nongeneric", T},
{"generic", G},
{"src instance", SrcInst},
{"user instance", UserInst},
}
b.Run("Underlying", func(b *testing.B) {
for _, test := range tests {
b.Run(test.name, func(b *testing.B) {
// Access underlying once, to trigger any lazy calculation.
_ = test.typ.Underlying()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = test.typ.Underlying()
}
})
}
})
}
func mustInstantiate(tb testing.TB, orig Type, targs ...Type) Type {
inst, err := Instantiate(nil, orig, targs, true)
if err != nil {
tb.Fatal(err)
}
return inst
}
// Test that types do not expand infinitely, as in golang/go#52715.
func TestFiniteTypeExpansion(t *testing.T) {
const src = `
package p
type Tree[T any] struct {
*Node[T]
}
func (*Tree[R]) N(r R) R { return r }
type Node[T any] struct {
*Tree[T]
}
func (Node[Q]) M(Q) {}
type Inst = *Tree[int]
`
f, err := parseSrc("foo.go", src)
if err != nil {
t.Fatal(err)
}
pkg := NewPackage("p", f.PkgName.Value)
if err := NewChecker(nil, pkg, nil).Files([]*syntax.File{f}); err != nil {
t.Fatal(err)
}
firstFieldType := func(n *Named) *Named {
return n.Underlying().(*Struct).Field(0).Type().(*Pointer).Elem().(*Named)
}
Inst := pkg.Scope().Lookup("Inst").Type().(*Pointer).Elem().(*Named)
Node := firstFieldType(Inst)
Tree := firstFieldType(Node)
if !Identical(Inst, Tree) {
t.Fatalf("Not a cycle: got %v, want %v", Tree, Inst)
}
if Inst != Tree {
t.Errorf("Duplicate instances in cycle: %s (%p) -> %s (%p) -> %s (%p)", Inst, Inst, Node, Node, Tree, Tree)
}
}

View File

@ -279,19 +279,7 @@ func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName
// lazily calls resolve to finish constructing the Named object.
func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
obj := NewTypeName(pos, pkg, name, nil)
resolve := func(_ *Context, t *Named) (*TypeParamList, Type, *methodList) {
tparams, underlying, methods := load(t)
switch underlying.(type) {
case nil, *Named:
panic(fmt.Sprintf("invalid underlying type %T", t.underlying))
}
return bindTParams(tparams), underlying, newMethodList(methods)
}
NewNamed(obj, nil, nil).resolver = resolve
NewNamed(obj, nil, nil).loader = load
return obj
}

View File

@ -102,7 +102,7 @@ func isTypeParam(t Type) bool {
func isGeneric(t Type) bool {
// A parameterized type is only generic if it doesn't have an instantiation already.
named, _ := t.(*Named)
return named != nil && named.obj != nil && named.targs == nil && named.TypeParams() != nil
return named != nil && named.obj != nil && named.inst == nil && named.TypeParams().Len() > 0
}
// Comparable reports whether values of type T are comparable.
@ -283,18 +283,19 @@ func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
}
smap := makeSubstMap(ytparams, targs)
var check *Checker // ok to call subst on a nil *Checker
var check *Checker // ok to call subst on a nil *Checker
ctxt := NewContext() // need a non-nil Context for the substitution below
// Constraints must be pair-wise identical, after substitution.
for i, xtparam := range xtparams {
ybound := check.subst(nopos, ytparams[i].bound, smap, nil)
ybound := check.subst(nopos, ytparams[i].bound, smap, nil, ctxt)
if !identical(xtparam.bound, ybound, cmpTags, p) {
return false
}
}
yparams = check.subst(nopos, y.params, smap, nil).(*Tuple)
yresults = check.subst(nopos, y.results, smap, nil).(*Tuple)
yparams = check.subst(nopos, y.params, smap, nil, ctxt).(*Tuple)
yresults = check.subst(nopos, y.results, smap, nil, ctxt).(*Tuple)
}
return x.variadic == y.variadic &&
@ -401,7 +402,7 @@ func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
if len(xargs) > 0 {
// Instances are identical if their original type and type arguments
// are identical.
if !Identical(x.orig, y.orig) {
if !Identical(x.Origin(), y.Origin()) {
return false
}
for i, xa := range xargs {

View File

@ -143,7 +143,7 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
// recvTPar.bound is (possibly) parameterized in the context of the
// receiver type declaration. Substitute parameters for the current
// context.
tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil)
tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil, check.context())
}
} else if len(tparams) < len(recvTParams) {
// Reporting an error here is a stop-gap measure to avoid crashes in the

View File

@ -31,7 +31,7 @@ func TestSizeof(t *testing.T) {
{Interface{}, 40, 80},
{Map{}, 16, 32},
{Chan{}, 12, 24},
{Named{}, 56, 104},
{Named{}, 60, 112},
{TypeParam{}, 28, 48},
{term{}, 12, 24},

View File

@ -48,8 +48,11 @@ func (m substMap) lookup(tpar *TypeParam) Type {
// incoming type. If a substitution took place, the result type is different
// from the incoming type.
//
// If the given context is non-nil, it is used in lieu of check.Config.Context.
func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, ctxt *Context) Type {
// If expanding is non-nil, it is the instance type currently being expanded.
// One of expanding or ctxt must be non-nil.
func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, expanding *Named, ctxt *Context) Type {
assert(expanding != nil || ctxt != nil)
if smap.empty() {
return typ
}
@ -64,19 +67,21 @@ func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, ctxt *Conte
// general case
subst := subster{
pos: pos,
smap: smap,
check: check,
ctxt: check.bestContext(ctxt),
pos: pos,
smap: smap,
check: check,
expanding: expanding,
ctxt: ctxt,
}
return subst.typ(typ)
}
type subster struct {
pos syntax.Pos
smap substMap
check *Checker // nil if called via Instantiate
ctxt *Context
pos syntax.Pos
smap substMap
check *Checker // nil if called via Instantiate
expanding *Named // if non-nil, the instance that is being expanded
ctxt *Context
}
func (subst *subster) typ(typ Type) Type {
@ -176,7 +181,7 @@ func (subst *subster) typ(typ Type) Type {
// In this case the interface will not be substituted here, because its
// method signatures do not depend on the type parameter P, but we still
// need to create new interface methods to hold the instantiated
// receiver. This is handled by expandNamed.
// receiver. This is handled by Named.expandUnderlying.
iface.methods, _ = replaceRecvType(methods, t, iface)
return iface
}
@ -207,19 +212,20 @@ func (subst *subster) typ(typ Type) Type {
}
}
// subst is called by expandNamed, so in this function we need to be
// subst is called during expansion, so in this function we need to be
// careful not to call any methods that would cause t to be expanded: doing
// so would result in deadlock.
//
// So we call t.orig.TypeParams() rather than t.TypeParams() here and
// below.
if t.orig.TypeParams().Len() == 0 {
// So we call t.Origin().TypeParams() rather than t.TypeParams().
orig := t.Origin()
n := orig.TypeParams().Len()
if n == 0 {
dump(">>> %s is not parameterized", t)
return t // type is not parameterized
}
var newTArgs []Type
if t.targs.Len() != t.orig.TypeParams().Len() {
if t.TypeArgs().Len() != n {
return Typ[Invalid] // error reported elsewhere
}
@ -228,14 +234,14 @@ func (subst *subster) typ(typ Type) Type {
// For each (existing) type argument targ, determine if it needs
// to be substituted; i.e., if it is or contains a type parameter
// that has a type argument for it.
for i, targ := range t.targs.list() {
for i, targ := range t.TypeArgs().list() {
dump(">>> %d targ = %s", i, targ)
new_targ := subst.typ(targ)
if new_targ != targ {
dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
if newTArgs == nil {
newTArgs = make([]Type, t.orig.TypeParams().Len())
copy(newTArgs, t.targs.list())
newTArgs = make([]Type, n)
copy(newTArgs, t.TypeArgs().list())
}
newTArgs[i] = new_targ
}
@ -246,25 +252,11 @@ func (subst *subster) typ(typ Type) Type {
return t // nothing to substitute
}
// before creating a new named type, check if we have this one already
h := subst.ctxt.instanceHash(t.orig, newTArgs)
dump(">>> new type hash: %s", h)
if named := subst.ctxt.lookup(h, t.orig, newTArgs); named != nil {
dump(">>> found %s", named)
return named
}
// Create a new instance and populate the context to avoid endless
// recursion. The position used here is irrelevant because validation only
// occurs on t (we don't call validType on named), but we use subst.pos to
// help with debugging.
return subst.check.instance(subst.pos, t.orig, newTArgs, subst.ctxt)
// Note that if we were to expose substitution more generally (not just in
// the context of a declaration), we'd have to substitute in
// named.underlying as well.
//
// But this is unnecessary for now.
return subst.check.instance(subst.pos, orig, newTArgs, subst.expanding, subst.ctxt)
case *TypeParam:
return subst.smap.lookup(t)

View File

@ -25,7 +25,7 @@ func (xl termlist) String() string {
var buf bytes.Buffer
for i, x := range xl {
if i > 0 {
buf.WriteString(" ")
buf.WriteString(" | ")
}
buf.WriteString(x.String())
}

View File

@ -12,7 +12,7 @@ import (
// maketl makes a term list from a string of the term list.
func maketl(s string) termlist {
s = strings.ReplaceAll(s, " ", "")
names := strings.Split(s, "")
names := strings.Split(s, "|")
r := make(termlist, len(names))
for i, n := range names {
r[i] = testTerm(n)
@ -33,10 +33,10 @@ func TestTermlistString(t *testing.T) {
"int",
"~int",
"myInt",
"∅ ∅",
"𝓤 𝓤",
"∅ 𝓤 int",
"∅ 𝓤 int myInt",
"∅ | ∅",
"𝓤 | 𝓤",
"∅ | 𝓤 | int",
"∅ | 𝓤 | int | myInt",
} {
if got := maketl(want).String(); got != want {
t.Errorf("(%v).String() == %v", want, got)
@ -47,12 +47,12 @@ func TestTermlistString(t *testing.T) {
func TestTermlistIsEmpty(t *testing.T) {
for test, want := range map[string]bool{
"∅": true,
"∅ ∅": true,
"∅ 𝓤": false,
"∅ myInt": false,
"∅ | ∅": true,
"∅ | ∅ | 𝓤": false,
"∅ | ∅ | myInt": false,
"𝓤": false,
"𝓤 int": false,
"𝓤 myInt ∅": false,
"𝓤 | int": false,
"𝓤 | myInt | ∅": false,
} {
xl := maketl(test)
got := xl.isEmpty()
@ -65,13 +65,13 @@ func TestTermlistIsEmpty(t *testing.T) {
func TestTermlistIsAll(t *testing.T) {
for test, want := range map[string]bool{
"∅": false,
"∅ ∅": false,
"int ~string": false,
"~int myInt": false,
"∅ 𝓤": true,
"∅ | ∅": false,
"int | ~string": false,
"~int | myInt": false,
"∅ | ∅ | 𝓤": true,
"𝓤": true,
"𝓤 int": true,
"myInt 𝓤": true,
"𝓤 | int": true,
"myInt | 𝓤": true,
} {
xl := maketl(test)
got := xl.isAll()
@ -86,17 +86,17 @@ func TestTermlistNorm(t *testing.T) {
xl, want string
}{
{"∅", "∅"},
{"∅ ∅", "∅"},
{"∅ int", "int"},
{"∅ myInt", "myInt"},
{"𝓤 int", "𝓤"},
{"𝓤 myInt", "𝓤"},
{"int myInt", "int myInt"},
{"~int int", "~int"},
{"~int myInt", "~int"},
{"int ~string int", "int ~string"},
{"~int string 𝓤 ~string int", "𝓤"},
{"~int string myInt ~string int", "~int ~string"},
{"∅ | ∅", "∅"},
{"∅ | int", "int"},
{"∅ | myInt", "myInt"},
{"𝓤 | int", "𝓤"},
{"𝓤 | myInt", "𝓤"},
{"int | myInt", "int | myInt"},
{"~int | int", "~int"},
{"~int | myInt", "~int"},
{"int | ~string | int", "int | ~string"},
{"~int | string | 𝓤 | ~string | int", "𝓤"},
{"~int | string | myInt | ~string | int", "~int | ~string"},
} {
xl := maketl(test.xl)
got := maketl(test.xl).norm()
@ -116,15 +116,15 @@ func TestTermlistUnion(t *testing.T) {
{"∅", "int", "int"},
{"𝓤", "~int", "𝓤"},
{"int", "~int", "~int"},
{"int", "string", "int string"},
{"int", "myInt", "int myInt"},
{"int", "string", "int | string"},
{"int", "myInt", "int | myInt"},
{"~int", "myInt", "~int"},
{"int string", "~string", "int ~string"},
{"~int string", "~string int", "~int ~string"},
{"~int string ∅", "~string int", "~int ~string"},
{"~int myInt ∅", "~string int", "~int ~string"},
{"~int string 𝓤", "~string int", "𝓤"},
{"~int string myInt", "~string int", "~int ~string"},
{"int | string", "~string", "int | ~string"},
{"~int | string", "~string | int", "~int | ~string"},
{"~int | string | ∅", "~string | int", "~int | ~string"},
{"~int | myInt | ∅", "~string | int", "~int | ~string"},
{"~int | string | 𝓤", "~string | int", "𝓤"},
{"~int | string | myInt", "~string | int", "~int | ~string"},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
@ -150,12 +150,12 @@ func TestTermlistIntersect(t *testing.T) {
{"int", "string", "∅"},
{"int", "myInt", "∅"},
{"~int", "myInt", "myInt"},
{"int string", "~string", "string"},
{"~int string", "~string int", "int string"},
{"~int string ∅", "~string int", "int string"},
{"~int myInt ∅", "~string int", "int"},
{"~int string 𝓤", "~string int", "int ~string"},
{"~int string myInt", "~string int", "int string"},
{"int | string", "~string", "string"},
{"~int | string", "~string | int", "int | string"},
{"~int | string | ∅", "~string | int", "int | string"},
{"~int | myInt | ∅", "~string | int", "int"},
{"~int | string | 𝓤", "~string | int", "int | ~string"},
{"~int | string | myInt", "~string | int", "int | string"},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
@ -174,12 +174,12 @@ func TestTermlistEqual(t *testing.T) {
{"∅", "∅", true},
{"∅", "𝓤", false},
{"𝓤", "𝓤", true},
{"𝓤 int", "𝓤", true},
{"𝓤 int", "string 𝓤", true},
{"𝓤 myInt", "string 𝓤", true},
{"int ~string", "string int", false},
{"~int string", "string myInt", false},
{"int ~string ∅", "string int ~string", true},
{"𝓤 | int", "𝓤", true},
{"𝓤 | int", "string | 𝓤", true},
{"𝓤 | myInt", "string | 𝓤", true},
{"int | ~string", "string | int", false},
{"~int | string", "string | myInt", false},
{"int | ~string | ∅", "string | int | ~string", true},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
@ -201,11 +201,11 @@ func TestTermlistIncludes(t *testing.T) {
{"int", "string", false},
{"~int", "string", false},
{"~int", "myInt", true},
{"int string", "string", true},
{"~int string", "int", true},
{"~int string", "myInt", true},
{"~int myInt ∅", "myInt", true},
{"myInt 𝓤", "int", true},
{"int | string", "string", true},
{"~int | string", "int", true},
{"~int | string", "myInt", true},
{"~int | myInt | ∅", "myInt", true},
{"myInt | ∅ | 𝓤", "int", true},
} {
xl := maketl(test.xl)
yl := testTerm(test.typ).typ
@ -236,12 +236,12 @@ func TestTermlistSupersetOf(t *testing.T) {
{"myInt", "~int", false},
{"int", "string", false},
{"~int", "string", false},
{"int string", "string", true},
{"int string", "~string", false},
{"~int string", "int", true},
{"~int string", "myInt", true},
{"~int string ∅", "string", true},
{"~string 𝓤", "myInt", true},
{"int | string", "string", true},
{"int | string", "~string", false},
{"~int | string", "int", true},
{"~int | string", "myInt", true},
{"~int | string | ∅", "string", true},
{"~string | ∅ | 𝓤", "myInt", true},
} {
xl := maketl(test.xl)
y := testTerm(test.typ)
@ -261,18 +261,18 @@ func TestTermlistSubsetOf(t *testing.T) {
{"∅", "𝓤", true},
{"𝓤", "∅", false},
{"𝓤", "𝓤", true},
{"int", "int string", true},
{"~int", "int string", false},
{"~int", "myInt string", false},
{"myInt", "~int string", true},
{"~int", "string string int ~int", true},
{"myInt", "string string ~int", true},
{"int string", "string", false},
{"int string", "string int", true},
{"int ~string", "string int", false},
{"myInt ~string", "string int 𝓤", true},
{"int ~string", "string int string", false},
{"int myInt", "string ~int string", true},
{"int", "int | string", true},
{"~int", "int | string", false},
{"~int", "myInt | string", false},
{"myInt", "~int | string", true},
{"~int", "string | string | int | ~int", true},
{"myInt", "string | string | ~int", true},
{"int | string", "string", false},
{"int | string", "string | int", true},
{"int | ~string", "string | int", false},
{"myInt | ~string", "string | int | 𝓤", true},
{"int | ~string", "string | int | ∅ | string", false},
{"int | myInt", "string | ~int | ∅ | string", true},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)

View File

@ -0,0 +1,16 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p
type number interface {
~float64 | ~int | ~int32
float64 | ~int32
}
func f[T number]() {}
func _() {
_ = f[int /* ERROR int does not implement number \(int missing in float64 | ~int32\)*/]
}

View File

@ -0,0 +1,16 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p
type (
T1 interface{ comparable }
T2 interface{ int }
)
var (
_ comparable // ERROR cannot use type comparable outside a type constraint: interface is \(or embeds\) comparable
_ T1 // ERROR cannot use type T1 outside a type constraint: interface is \(or embeds\) comparable
_ T2 // ERROR cannot use type T2 outside a type constraint: interface contains type constraints
)

View File

@ -0,0 +1,19 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p
// version 1
var x1 T1[B1]
type T1[_ any] struct{}
type A1 T1[B1]
type B1 = T1[A1]
// version 2
type T2[_ any] struct{}
type A2 T2[B2]
type B2 = T2[A2]
var x2 T2[B2]

View File

@ -0,0 +1,62 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p
// correctness check: ensure that cycles through generic instantiations are detected
type T[P any] struct {
_ P
}
type S /* ERROR illegal cycle */ struct {
_ T[S]
}
// simplified test 1
var _ A1[A1[string]]
type A1[P any] struct {
_ B1[P]
}
type B1[P any] struct {
_ P
}
// simplified test 2
var _ B2[A2]
type A2 struct {
_ B2[string]
}
type B2[P any] struct {
_ C2[P]
}
type C2[P any] struct {
_ P
}
// test case from issue
type T23 interface {
~struct {
Field0 T13[T15]
}
}
type T1[P1 interface {
}] struct {
Field2 P1
}
type T13[P2 interface {
}] struct {
Field2 T1[P2]
}
type T15 struct {
Field0 T13[string]
}

View File

@ -21,13 +21,13 @@ func TestTypeSetString(t *testing.T) {
"{}": "𝓤",
"{int}": "{int}",
"{~int}": "{~int}",
"{int|string}": "{int string}",
"{int|string}": "{int | string}",
"{int; string}": "∅",
"{comparable}": "{comparable}",
"{comparable; int}": "{int}",
"{~int; comparable}": "{~int}",
"{int|string; comparable}": "{int string}",
"{int|string; comparable}": "{int | string}",
"{comparable; int; string}": "∅",
"{m()}": "{func (p.T).m()}",
@ -37,7 +37,7 @@ func TestTypeSetString(t *testing.T) {
"{m1(); comparable; m2() int }": "{comparable; func (p.T).m1(); func (p.T).m2() int}",
"{comparable; error}": "{comparable; func (error).Error() string}",
"{m(); comparable; int|float32|string}": "{func (p.T).m(); int float32 string}",
"{m(); comparable; int|float32|string}": "{func (p.T).m(); int | float32 | string}",
"{m1(); int; m2(); comparable }": "{func (p.T).m1(); func (p.T).m2(); int}",
"{E}; type E interface{}": "𝓤",

View File

@ -284,9 +284,9 @@ func (w *typeWriter) typ(typ Type) {
w.string(strconv.Itoa(w.ctxt.getID(t)))
}
w.typeName(t.obj) // when hashing written for readability of the hash only
if t.targs != nil {
if t.inst != nil {
// instantiated type
w.typeList(t.targs.list())
w.typeList(t.inst.targs.list())
} else if w.ctxt == nil && t.TypeParams().Len() != 0 { // For type hashing, don't need to format the TypeParams
// parameterized type
w.tParamList(t.TypeParams().list())

View File

@ -167,9 +167,9 @@ func (check *Checker) validVarType(e syntax.Expr, typ Type) {
tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position?
if !tset.IsMethodSet() {
if tset.comparable {
check.softErrorf(pos, "interface is (or embeds) comparable")
check.softErrorf(pos, "cannot use type %s outside a type constraint: interface is (or embeds) comparable", typ)
} else {
check.softErrorf(pos, "interface contains type constraints")
check.softErrorf(pos, "cannot use type %s outside a type constraint: interface contains type constraints", typ)
}
}
}
@ -432,57 +432,19 @@ func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *
return Typ[Invalid]
}
// enableTypeTypeInference controls whether to infer missing type arguments
// using constraint type inference. See issue #51527.
const enableTypeTypeInference = false
// create the instance
ctxt := check.bestContext(nil)
h := ctxt.instanceHash(orig, targs)
// targs may be incomplete, and require inference. In any case we should de-duplicate.
inst, _ := ctxt.lookup(h, orig, targs).(*Named)
// If inst is non-nil, we can't just return here. Inst may have been
// constructed via recursive substitution, in which case we wouldn't do the
// validation below. Ensure that the validation (and resulting errors) runs
// for each instantiated type in the source.
if inst == nil {
// x may be a selector for an imported type; use its start pos rather than x.Pos().
tname := NewTypeName(syntax.StartPos(x), orig.obj.pkg, orig.obj.name, nil)
inst = check.newNamed(tname, orig, nil, nil) // underlying, methods and tparams are set when named is resolved
inst.targs = newTypeList(targs)
inst = ctxt.update(h, orig, targs, inst).(*Named)
}
inst := check.instance(x.Pos(), orig, targs, nil, check.context()).(*Named)
def.setUnderlying(inst)
inst.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
tparams := n.orig.TypeParams().list()
targs := n.targs.list()
if enableTypeTypeInference && len(targs) < len(tparams) {
// If inference fails, len(inferred) will be 0, and inst.underlying will
// be set to Typ[Invalid] in expandNamed.
inferred := check.infer(x.Pos(), tparams, targs, nil, nil)
if len(inferred) > len(targs) {
n.targs = newTypeList(inferred)
}
}
return expandNamed(ctxt, n, x.Pos())
}
// orig.tparams may not be set up, so we need to do expansion later.
check.later(func() {
// This is an instance from the source, not from recursive substitution,
// and so it must be resolved during type-checking so that we can report
// errors.
inst.resolve(ctxt)
// Since check is non-nil, we can still mutate inst. Unpinning the resolver
// frees some memory.
inst.resolver = nil
check.recordInstance(x, inst.TypeArgs().list(), inst)
if check.validateTArgLen(x.Pos(), inst.tparams.Len(), inst.targs.Len()) {
if i, err := check.verify(x.Pos(), inst.tparams.list(), inst.targs.list()); err != nil {
if check.validateTArgLen(x.Pos(), inst.TypeParams().Len(), inst.TypeArgs().Len()) {
if i, err := check.verify(x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), check.context()); err != nil {
// best position for error reporting
pos := x.Pos()
if i < len(xlist) {
@ -490,10 +452,13 @@ func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *
}
check.softErrorf(pos, "%s", err)
} else {
check.mono.recordInstance(check.pkg, x.Pos(), inst.tparams.list(), inst.targs.list(), xlist)
check.mono.recordInstance(check.pkg, x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), xlist)
}
}
// TODO(rfindley): remove this call: we don't need to call validType here,
// as cycles can only occur for types used inside a Named type declaration,
// and so it suffices to call validType from declared types.
check.validType(inst)
}).describef(x, "resolve instance %s", inst)

View File

@ -546,8 +546,8 @@ func (u *unifier) nify(x, y Type, p *ifacePair) (result bool) {
case *Named:
// TODO(gri) This code differs now from the parallel code in Checker.identical. Investigate.
if y, ok := y.(*Named); ok {
xargs := x.targs.list()
yargs := y.targs.list()
xargs := x.TypeArgs().list()
yargs := y.TypeArgs().list()
if len(xargs) != len(yargs) {
return false

View File

@ -5,29 +5,24 @@
package types2
// validType verifies that the given type does not "expand" indefinitely
// producing a cycle in the type graph. Cycles are detected by marking
// defined types.
// producing a cycle in the type graph.
// (Cycles involving alias types, as in "type A = [10]A" are detected
// earlier, via the objDecl cycle detection mechanism.)
func (check *Checker) validType(typ *Named) {
check.validType0(typ, nil, nil)
}
type typeInfo uint
// validType0 checks if the given type is valid. If typ is a type parameter
// its value is looked up in the provided environment. The environment is
// nil if typ is not part of (the RHS of) an instantiated type, in that case
// any type parameter encountered must be from an enclosing function and can
// be ignored. The path is the list of type names that lead to the current typ.
func (check *Checker) validType0(typ Type, env *tparamEnv, path []Object) typeInfo {
const (
unknown typeInfo = iota
marked
valid
invalid
)
// its value is looked up in the type argument list of the instantiated
// (enclosing) type, if it exists. Otherwise the type parameter must be from
// an enclosing function and can be ignored.
// The nest list describes the stack (the "nest in memory") of types which
// contain (or embed in the case of interfaces) other types. For instance, a
// struct named S which contains a field of named type F contains (the memory
// of) F in S, leading to the nest S->F. If a type appears in its own nest
// (say S->F->S) we have an invalid recursive type. The path list is the full
// path of named types in a cycle, it is only needed for error reporting.
func (check *Checker) validType0(typ Type, nest, path []*Named) bool {
switch t := typ.(type) {
case nil:
// We should never see a nil type but be conservative and panic
@ -37,112 +32,204 @@ func (check *Checker) validType0(typ Type, env *tparamEnv, path []Object) typeIn
}
case *Array:
return check.validType0(t.elem, env, path)
return check.validType0(t.elem, nest, path)
case *Struct:
for _, f := range t.fields {
if check.validType0(f.typ, env, path) == invalid {
return invalid
if !check.validType0(f.typ, nest, path) {
return false
}
}
case *Union:
for _, t := range t.terms {
if check.validType0(t.typ, env, path) == invalid {
return invalid
if !check.validType0(t.typ, nest, path) {
return false
}
}
case *Interface:
for _, etyp := range t.embeddeds {
if check.validType0(etyp, env, path) == invalid {
return invalid
if !check.validType0(etyp, nest, path) {
return false
}
}
case *Named:
// Exit early if we already know t is valid.
// This is purely an optimization but it prevents excessive computation
// times in pathological cases such as testdata/fixedbugs/issue6977.go.
// (Note: The valids map could also be allocated locally, once for each
// validType call.)
if check.valids.lookup(t) != nil {
break
}
// Don't report a 2nd error if we already know the type is invalid
// (e.g., if a cycle was detected earlier, via under).
// Note: ensure that t.orig is fully resolved by calling Underlying().
if t.Underlying() == Typ[Invalid] {
check.infoMap[t] = invalid
return invalid
return false
}
switch check.infoMap[t] {
case unknown:
check.infoMap[t] = marked
check.infoMap[t] = check.validType0(t.orig.fromRHS, env.push(t), append(path, t.obj))
case marked:
// We have seen type t before and thus must have a cycle.
check.infoMap[t] = invalid
// t cannot be in an imported package otherwise that package
// would have reported a type cycle and couldn't have been
// imported in the first place.
assert(t.obj.pkg == check.pkg)
t.underlying = Typ[Invalid] // t is in the current package (no race possibility)
// Find the starting point of the cycle and report it.
for i, tn := range path {
if tn == t.obj {
check.cycleError(path[i:])
return invalid
// If the current type t is also found in nest, (the memory of) t is
// embedded in itself, indicating an invalid recursive type.
for _, e := range nest {
if Identical(e, t) {
// t cannot be in an imported package otherwise that package
// would have reported a type cycle and couldn't have been
// imported in the first place.
assert(t.obj.pkg == check.pkg)
t.underlying = Typ[Invalid] // t is in the current package (no race possibility)
// Find the starting point of the cycle and report it.
// Because each type in nest must also appear in path (see invariant below),
// type t must be in path since it was found in nest. But not every type in path
// is in nest. Specifically t may appear in path with an earlier index than the
// index of t in nest. Search again.
for start, p := range path {
if Identical(p, t) {
check.cycleError(makeObjList(path[start:]))
return false
}
}
panic("cycle start not found")
}
panic("cycle start not found")
}
return check.infoMap[t]
// No cycle was found. Check the RHS of t.
// Every type added to nest is also added to path; thus every type that is in nest
// must also be in path (invariant). But not every type in path is in nest, since
// nest may be pruned (see below, *TypeParam case).
if !check.validType0(t.Origin().fromRHS, append(nest, t), append(path, t)) {
return false
}
check.valids.add(t) // t is valid
case *TypeParam:
// A type parameter stands for the type (argument) it was instantiated with.
// Check the corresponding type argument for validity if we have one.
if env != nil {
if targ := env.tmap[t]; targ != nil {
// Type arguments found in targ must be looked
// up in the enclosing environment env.link.
return check.validType0(targ, env.link, path)
// Check the corresponding type argument for validity if we are in an
// instantiated type.
if len(nest) > 0 {
inst := nest[len(nest)-1] // the type instance
// Find the corresponding type argument for the type parameter
// and proceed with checking that type argument.
for i, tparam := range inst.TypeParams().list() {
// The type parameter and type argument lists should
// match in length but be careful in case of errors.
if t == tparam && i < inst.TypeArgs().Len() {
targ := inst.TypeArgs().At(i)
// The type argument must be valid in the enclosing
// type (where inst was instantiated), hence we must
// check targ's validity in the type nest excluding
// the current (instantiated) type (see the example
// at the end of this file).
// For error reporting we keep the full path.
return check.validType0(targ, nest[:len(nest)-1], path)
}
}
}
}
return valid
return true
}
// A tparamEnv provides the environment for looking up the type arguments
// with which type parameters for a given instance were instantiated.
// If we don't have an instance, the corresponding tparamEnv is nil.
type tparamEnv struct {
tmap substMap
link *tparamEnv
// makeObjList returns the list of type name objects for the given
// list of named types.
func makeObjList(tlist []*Named) []Object {
olist := make([]Object, len(tlist))
for i, t := range tlist {
olist[i] = t.obj
}
return olist
}
func (env *tparamEnv) push(typ *Named) *tparamEnv {
// If typ is not an instantiated type there are no typ-specific
// type parameters to look up and we don't need an environment.
targs := typ.TypeArgs()
if targs == nil {
return nil // no instance => nil environment
}
// Populate tmap: remember the type argument for each type parameter.
// We cannot use makeSubstMap because the number of type parameters
// and arguments may not match due to errors in the source (too many
// or too few type arguments). Populate tmap "manually".
tparams := typ.TypeParams()
n, m := targs.Len(), tparams.Len()
if n > m {
n = m // too many targs
}
tmap := make(substMap, n)
for i := 0; i < n; i++ {
tmap[tparams.At(i)] = targs.At(i)
}
return &tparamEnv{tmap: tmap, link: env}
}
// TODO(gri) Alternative implementation:
// We may not need to build a stack of environments to
// look up the type arguments for type parameters. The
// same information should be available via the path:
// We should be able to just walk the path backwards
// and find the type arguments in the instance objects.
// Here is an example illustrating why we need to exclude the
// instantiated type from nest when evaluating the validity of
// a type parameter. Given the declarations
//
// var _ A[A[string]]
//
// type A[P any] struct { _ B[P] }
// type B[P any] struct { _ P }
//
// we want to determine if the type A[A[string]] is valid.
// We start evaluating A[A[string]] outside any type nest:
//
// A[A[string]]
// nest =
// path =
//
// The RHS of A is now evaluated in the A[A[string]] nest:
//
// struct{_ B[P₁]}
// nest = A[A[string]]
// path = A[A[string]]
//
// The struct has a single field of type B[P₁] with which
// we continue:
//
// B[P₁]
// nest = A[A[string]]
// path = A[A[string]]
//
// struct{_ P₂}
// nest = A[A[string]]->B[P]
// path = A[A[string]]->B[P]
//
// Eventutally we reach the type parameter P of type B (P₂):
//
// P₂
// nest = A[A[string]]->B[P]
// path = A[A[string]]->B[P]
//
// The type argument for P of B is the type parameter P of A (P₁).
// It must be evaluated in the type nest that existed when B was
// instantiated:
//
// P₁
// nest = A[A[string]] <== type nest at B's instantiation time
// path = A[A[string]]->B[P]
//
// If we'd use the current nest it would correspond to the path
// which will be wrong as we will see shortly. P's type argument
// is A[string], which again must be evaluated in the type nest
// that existed when A was instantiated with A[string]. That type
// nest is empty:
//
// A[string]
// nest = <== type nest at A's instantiation time
// path = A[A[string]]->B[P]
//
// Evaluation then proceeds as before for A[string]:
//
// struct{_ B[P₁]}
// nest = A[string]
// path = A[A[string]]->B[P]->A[string]
//
// Now we reach B[P] again. If we had not adjusted nest, it would
// correspond to path, and we would find B[P] in nest, indicating
// a cycle, which would clearly be wrong since there's no cycle in
// A[string]:
//
// B[P₁]
// nest = A[string]
// path = A[A[string]]->B[P]->A[string] <== path contains B[P]!
//
// But because we use the correct type nest, evaluation proceeds without
// errors and we get the evaluation sequence:
//
// struct{_ P₂}
// nest = A[string]->B[P]
// path = A[A[string]]->B[P]->A[string]->B[P]
// P₂
// nest = A[string]->B[P]
// path = A[A[string]]->B[P]->A[string]->B[P]
// P₁
// nest = A[string]
// path = A[A[string]]->B[P]->A[string]->B[P]
// string
// nest =
// path = A[A[string]]->B[P]->A[string]->B[P]
//
// At this point we're done and A[A[string]] and is valid.

View File

@ -290,7 +290,7 @@ func (t *tester) maybeLogMetadata() error {
//
// TODO(prattmic): If we split dist bootstrap and dist test then this
// could be simplified to directly use internal/sysinfo here.
return t.dirCmd(filepath.Join(goroot, "src/cmd/internal/metadata"), "go", []string{"run", "."}).Run()
return t.dirCmd(filepath.Join(goroot, "src/cmd/internal/metadata"), "go", []string{"run", "main.go"}).Run()
}
// short returns a -short flag value to use with 'go test'

View File

@ -22,6 +22,7 @@ import (
"cmd/go/internal/lockedfile"
"cmd/go/internal/modfetch"
"cmd/go/internal/modload"
"cmd/go/internal/str"
"cmd/go/internal/work"
)
@ -141,7 +142,7 @@ func runClean(ctx context.Context, cmd *base.Command, args []string) {
// The top cache directory may have been created with special permissions
// and not something that we want to remove. Also, we'd like to preserve
// the access log for future analysis, even if the cache is cleared.
subdirs, _ := filepath.Glob(filepath.Join(dir, "[0-9a-f][0-9a-f]"))
subdirs, _ := filepath.Glob(filepath.Join(str.QuoteGlob(dir), "[0-9a-f][0-9a-f]"))
printedErrors := false
if len(subdirs) > 0 {
if cfg.BuildN || cfg.BuildX {

View File

@ -14,7 +14,6 @@ import (
"go/build"
"go/scanner"
"go/token"
"internal/goroot"
"io/fs"
"os"
"os/exec"
@ -872,7 +871,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
if !cfg.ModulesEnabled {
buildMode = build.ImportComment
}
if modroot := modload.PackageModRoot(ctx, r.dir); modroot != "" {
if modroot := modload.PackageModRoot(ctx, r.path); modroot != "" {
if mi, err := modindex.Get(modroot); err == nil {
data.p, data.err = mi.Import(cfg.BuildContext, mi.RelPath(r.dir), buildMode)
goto Happy
@ -2064,7 +2063,7 @@ func resolveEmbed(pkgdir string, patterns []string) (files []string, pmap map[st
}
// Glob to find matches.
match, err := fsys.Glob(pkgdir + string(filepath.Separator) + filepath.FromSlash(glob))
match, err := fsys.Glob(str.QuoteGlob(pkgdir) + string(filepath.Separator) + filepath.FromSlash(glob))
if err != nil {
return nil, nil, err
}
@ -3072,7 +3071,7 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args
return nil, fmt.Errorf("%s: argument must be a package path, not a meta-package", arg)
case path.Clean(p) != p:
return nil, fmt.Errorf("%s: argument must be a clean package path", arg)
case !strings.Contains(p, "...") && search.IsStandardImportPath(p) && goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, p):
case !strings.Contains(p, "...") && search.IsStandardImportPath(p) && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, p):
return nil, fmt.Errorf("%s: argument must not be a package in the standard library", arg)
default:
patterns[i] = p

View File

@ -26,6 +26,7 @@ import (
"cmd/go/internal/lockedfile"
"cmd/go/internal/par"
"cmd/go/internal/robustio"
"cmd/go/internal/str"
"cmd/go/internal/trace"
"golang.org/x/mod/module"
@ -102,7 +103,7 @@ func download(ctx context.Context, mod module.Version) (dir string, err error) {
// active.
parentDir := filepath.Dir(dir)
tmpPrefix := filepath.Base(dir) + ".tmp-"
if old, err := filepath.Glob(filepath.Join(parentDir, tmpPrefix+"*")); err == nil {
if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(parentDir), str.QuoteGlob(tmpPrefix)+"*")); err == nil {
for _, path := range old {
RemoveAll(path) // best effort
}
@ -224,7 +225,7 @@ func downloadZip(ctx context.Context, mod module.Version, zipfile string) (err e
// This is only safe to do because the lock file ensures that their
// writers are no longer active.
tmpPattern := filepath.Base(zipfile) + "*.tmp"
if old, err := filepath.Glob(filepath.Join(filepath.Dir(zipfile), tmpPattern)); err == nil {
if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(filepath.Dir(zipfile)), tmpPattern)); err == nil {
for _, path := range old {
os.Remove(path) // best effort
}

View File

@ -177,6 +177,37 @@ func hasSubdir(root, dir string) (rel string, ok bool) {
return filepath.ToSlash(dir[len(root):]), true
}
// gopath returns the list of Go path directories.
func (ctxt *Context) gopath() []string {
var all []string
for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
if p == "" || p == ctxt.GOROOT {
// Empty paths are uninteresting.
// If the path is the GOROOT, ignore it.
// People sometimes set GOPATH=$GOROOT.
// Do not get confused by this common mistake.
continue
}
if strings.HasPrefix(p, "~") {
// Path segments starting with ~ on Unix are almost always
// users who have incorrectly quoted ~ while setting GOPATH,
// preventing it from expanding to $HOME.
// The situation is made more confusing by the fact that
// bash allows quoted ~ in $PATH (most shells do not).
// Do not get confused by this, and do not try to use the path.
// It does not exist, and printing errors about it confuses
// those users even more, because they think "sure ~ exists!".
// The go command diagnoses this situation and prints a
// useful error.
// On Windows, ~ is used in short names, such as c:\progra~1
// for c:\program files.
continue
}
all = append(all, p)
}
return all
}
var defaultToolTags, defaultReleaseTags []string
// A Package describes the Go package found in a directory.
@ -887,8 +918,8 @@ func (ctxt *Context) eval(x constraint.Expr, allTags map[string]bool) bool {
// $GOARCH
// boringcrypto
// ctxt.Compiler
// linux (if GOOS = android)
// solaris (if GOOS = illumos)
// linux (if GOOS == android)
// solaris (if GOOS == illumos)
// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
//
// It records all consulted tags in allTags.

View File

@ -1,7 +1,31 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"go/build"
"go/build/constraint"
"go/token"
"internal/goroot"
"internal/unsafeheader"
"io/fs"
"math"
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"sort"
"strings"
"sync"
"unsafe"
"cmd/go/internal/base"
"cmd/go/internal/cache"
"cmd/go/internal/cfg"
@ -9,30 +33,21 @@ import (
"cmd/go/internal/imports"
"cmd/go/internal/par"
"cmd/go/internal/str"
"encoding/binary"
"errors"
"fmt"
"go/build"
"go/build/constraint"
"go/token"
"internal/unsafeheader"
"io/fs"
"math"
"os"
"path/filepath"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"unsafe"
)
// enabled is used to flag off the behavior of the module index on tip.
// It will be removed before the release.
// TODO(matloob): Remove enabled once we have more confidence on the
// module index.
var enabled, _ = strconv.ParseBool(os.Getenv("GOINDEX"))
var enabled = func() bool {
debug := strings.Split(os.Getenv("GODEBUG"), ",")
for _, f := range debug {
if f == "goindex=0" {
return false
}
}
return true
}()
// ModuleIndex represents and encoded module index file. It is used to
// do the equivalent of build.Import of packages in the module and answer other
@ -47,38 +62,56 @@ type ModuleIndex struct {
var fcache par.Cache
func moduleHash(modroot string, ismodcache bool) (cache.ActionID, error) {
h := cache.NewHash("moduleIndex")
fmt.Fprintf(h, "module index %s %v", indexVersion, modroot)
if ismodcache {
return h.Sum(), nil
// We expect modules stored within the module cache to be checksummed and
// immutable, and we expect released Go modules to change only infrequently
// (when the Go version changes).
if !ismodcache || !str.HasFilePathPrefix(modroot, cfg.GOROOT) {
return cache.ActionID{}, ErrNotIndexed
}
// walkdir happens in deterministic order.
err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error {
if modroot == path {
// Check for go.mod in root directory, and return ErrNotIndexed
// if it doesn't exist. Outside the module cache, it's not a module
// if it doesn't have a go.mod file.
}
if err := moduleWalkErr(modroot, path, info, err); err != nil {
return err
}
if info.IsDir() {
return nil
}
fmt.Fprintf(h, "file %v %v\n", info.Name(), info.ModTime())
if info.Mode()&fs.ModeSymlink != 0 {
targ, err := fsys.Stat(path)
if err != nil {
h := cache.NewHash("moduleIndex")
fmt.Fprintf(h, "module index %s %s %v\n", runtime.Version(), indexVersion, modroot)
if strings.HasPrefix(runtime.Version(), "devel ") {
// This copy of the standard library is a development version, not a
// release. It could be based on a Git commit (like "devel go1.19-2a78e8afc0
// Wed Jun 15 00:06:24 2022 +0000") with or without changes on top of that
// commit, or it could be completly artificial due to lacking a `git` binary
// (like "devel gomote.XXXXX", as synthesized by "gomote push" as of
// 2022-06-15). Compute an inexpensive hash of its files using mtimes so
// that during development we can continue to exercise the logic for cached
// GOROOT indexes.
//
// mtimes may be granular, imprecise, and loosely updated (see
// https://apenwarr.ca/log/20181113), but we don't expect Go contributors to
// be mucking around with the import graphs in GOROOT often enough for mtime
// collisions to matter essentially ever.
//
// Note that fsys.Walk walks paths in deterministic order, so this hash
// should be completely deterministic if the files are unchanged.
err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error {
if err := moduleWalkErr(modroot, path, info, err); err != nil {
return err
}
fmt.Fprintf(h, "target %v %v\n", targ.Name(), targ.ModTime())
if info.IsDir() {
return nil
}
fmt.Fprintf(h, "file %v %v\n", info.Name(), info.ModTime())
if info.Mode()&fs.ModeSymlink != 0 {
targ, err := fsys.Stat(path)
if err != nil {
return err
}
fmt.Fprintf(h, "target %v %v\n", targ.Name(), targ.ModTime())
}
return nil
})
if err != nil {
return cache.ActionID{}, err
}
return nil
})
if err != nil {
return cache.ActionID{}, err
}
return h.Sum(), nil
}
@ -97,10 +130,7 @@ func Get(modroot string) (*ModuleIndex, error) {
if modroot == "" {
panic("modindex.Get called with empty modroot")
}
if str.HasFilePathPrefix(modroot, cfg.GOROOT) {
// TODO(matloob): add a case for stdlib here.
return nil, ErrNotIndexed
}
modroot = filepath.Clean(modroot)
isModCache := str.HasFilePathPrefix(modroot, cfg.GOMODCACHE)
return openIndex(modroot, isModCache)
}
@ -121,7 +151,7 @@ func openIndex(modroot string, ismodcache bool) (*ModuleIndex, error) {
data, _, err := cache.Default().GetMmap(id)
if err != nil {
// Couldn't read from modindex. Assume we couldn't read from
// the index because the module has't been indexed yet.
// the index because the module hasn't been indexed yet.
data, err = indexModule(modroot)
if err != nil {
return result{nil, err}
@ -206,7 +236,7 @@ func (mi *ModuleIndex) Packages() []string {
// RelPath returns the path relative to the module's root.
func (mi *ModuleIndex) RelPath(path string) string {
return filepath.Clean(str.TrimFilePathPrefix(path, mi.modroot))
return str.TrimFilePathPrefix(filepath.Clean(path), mi.modroot) // mi.modroot is already clean
}
// ImportPackage is the equivalent of build.Import given the information in ModuleIndex.
@ -225,9 +255,6 @@ func (mi *ModuleIndex) Import(bctxt build.Context, relpath string, mode build.Im
p.ImportPath = "."
p.Dir = filepath.Join(mi.modroot, rp.dir)
if rp.error != "" {
return p, errors.New(rp.error)
}
var pkgerr error
switch ctxt.Compiler {
@ -241,6 +268,78 @@ func (mi *ModuleIndex) Import(bctxt build.Context, relpath string, mode build.Im
return p, fmt.Errorf("import %q: import of unknown directory", p.Dir)
}
// goroot and gopath
inTestdata := func(sub string) bool {
return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || str.HasPathPrefix(sub, "testdata")
}
if !inTestdata(relpath) {
// In build.go, p.Root should only be set in the non-local-import case, or in
// GOROOT or GOPATH. Since module mode only calls Import with path set to "."
// and the module index doesn't apply outside modules, the GOROOT case is
// the only case where GOROOT needs to be set.
// But: p.Root is actually set in the local-import case outside GOROOT, if
// the directory is contained in GOPATH/src
// TODO(#37015): fix that behavior in go/build and remove the gopath case
// below.
if ctxt.GOROOT != "" && str.HasFilePathPrefix(p.Dir, cfg.GOROOTsrc) && p.Dir != cfg.GOROOTsrc {
p.Root = ctxt.GOROOT
p.Goroot = true
modprefix := str.TrimFilePathPrefix(mi.modroot, cfg.GOROOTsrc)
p.ImportPath = relpath
if modprefix != "" {
p.ImportPath = filepath.Join(modprefix, p.ImportPath)
}
}
for _, root := range ctxt.gopath() {
// TODO(matloob): do we need to reimplement the conflictdir logic?
// TODO(matloob): ctxt.hasSubdir evaluates symlinks, so it
// can be slower than we'd like. Find out if we can drop this
// logic before the release.
if sub, ok := ctxt.hasSubdir(filepath.Join(root, "src"), p.Dir); ok {
p.ImportPath = sub
p.Root = root
}
}
}
if p.Root != "" {
// Set GOROOT-specific fields (sometimes for modules in a GOPATH directory).
// The fields set below (SrcRoot, PkgRoot, BinDir, PkgTargetRoot, and PkgObj)
// are only set in build.Import if p.Root != "". As noted in the comment
// on setting p.Root above, p.Root should only be set in the GOROOT case for the
// set of packages we care about, but is also set for modules in a GOPATH src
// directory.
var pkgtargetroot string
var pkga string
suffix := ""
if ctxt.InstallSuffix != "" {
suffix = "_" + ctxt.InstallSuffix
}
switch ctxt.Compiler {
case "gccgo":
pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
dir, elem := path.Split(p.ImportPath)
pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a"
case "gc":
pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
pkga = pkgtargetroot + "/" + p.ImportPath + ".a"
}
p.SrcRoot = ctxt.joinPath(p.Root, "src")
p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
p.BinDir = ctxt.joinPath(p.Root, "bin")
if pkga != "" {
p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot)
p.PkgObj = ctxt.joinPath(p.Root, pkga)
}
}
if rp.error != nil {
if errors.Is(rp.error, errCannotFindPackage) && ctxt.Compiler == "gccgo" && p.Goroot {
return p, nil
}
return p, rp.error
}
if mode&build.FindOnly != 0 {
return p, pkgerr
}
@ -444,8 +543,31 @@ func (mi *ModuleIndex) Import(bctxt build.Context, relpath string, mode build.Im
return p, pkgerr
}
// IsDirWithGoFiles is the equivalent of fsys.IsDirWithGoFiles using the information in the
// RawPackage.
// IsStandardPackage reports whether path is a standard package
// for the goroot and compiler using the module index if possible,
// and otherwise falling back to internal/goroot.IsStandardPackage
func IsStandardPackage(goroot_, compiler, path string) bool {
if !enabled || compiler != "gc" {
return goroot.IsStandardPackage(goroot_, compiler, path)
}
reldir := filepath.FromSlash(path) // relative dir path in module index for package
modroot := filepath.Join(goroot_, "src")
if str.HasFilePathPrefix(reldir, "cmd") {
reldir = str.TrimFilePathPrefix(reldir, "cmd")
modroot = filepath.Join(modroot, "cmd")
}
mod, err := Get(modroot)
if err != nil {
return goroot.IsStandardPackage(goroot_, compiler, path)
}
pkgs := mod.Packages()
i := sort.SearchStrings(pkgs, reldir)
return i != len(pkgs) && pkgs[i] == reldir
}
// IsDirWithGoFiles is the equivalent of fsys.IsDirWithGoFiles using the information in the index.
func (mi *ModuleIndex) IsDirWithGoFiles(relpath string) (_ bool, err error) {
rp := mi.indexPackage(relpath)
@ -462,7 +584,7 @@ func (mi *ModuleIndex) IsDirWithGoFiles(relpath string) (_ bool, err error) {
return false, nil
}
// ScanDir implements imports.ScanDir using the information in the RawPackage.
// ScanDir implements imports.ScanDir using the information in the index.
func (mi *ModuleIndex) ScanDir(path string, tags map[string]bool) (sortedImports []string, sortedTestImports []string, err error) {
rp := mi.indexPackage(path)
@ -556,13 +678,15 @@ func shouldBuild(sf *sourceFile, tags map[string]bool) bool {
// index package holds the information needed to access information in the
// index about a package.
type indexPackage struct {
error string
error error
dir string // directory of the package relative to the modroot
// Source files
sourceFiles []*sourceFile
}
var errCannotFindPackage = errors.New("cannot find package")
// indexPackage returns an indexPackage constructed using the information in the ModuleIndex.
func (mi *ModuleIndex) indexPackage(path string) *indexPackage {
defer func() {
@ -572,13 +696,15 @@ func (mi *ModuleIndex) indexPackage(path string) *indexPackage {
}()
offset, ok := mi.packages[path]
if !ok {
return &indexPackage{error: fmt.Sprintf("cannot find package %q in:\n\t%s", path, filepath.Join(mi.modroot, path))}
return &indexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(mi.modroot, path))}
}
// TODO(matloob): do we want to lock on the module index?
d := mi.od.decoderAt(offset)
rp := new(indexPackage)
rp.error = d.string()
if errstr := d.string(); errstr != "" {
rp.error = errors.New(errstr)
}
rp.dir = d.string()
numSourceFiles := d.uint32()
rp.sourceFiles = make([]*sourceFile, numSourceFiles)

View File

@ -4,6 +4,7 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/fsys"
"cmd/go/internal/par"
"cmd/go/internal/str"
"encoding/json"
"errors"
"fmt"
@ -54,10 +55,10 @@ func indexModule(modroot string) ([]byte, error) {
if !info.IsDir() {
return nil
}
rel, err := filepath.Rel(modroot, path)
if err != nil {
panic(err)
if !str.HasFilePathPrefix(path, modroot) {
panic(fmt.Errorf("path %v in walk doesn't have modroot %v as prefix:", path, modroot))
}
rel := str.TrimFilePathPrefix(path, modroot)
packages = append(packages, importRaw(modroot, rel))
return nil
})

View File

@ -9,7 +9,6 @@ import (
"encoding/hex"
"errors"
"fmt"
"internal/goroot"
"io/fs"
"os"
"path/filepath"
@ -18,6 +17,7 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/modfetch"
"cmd/go/internal/modindex"
"cmd/go/internal/modinfo"
"cmd/go/internal/search"
@ -39,7 +39,7 @@ func findStandardImportPath(path string) string {
panic("findStandardImportPath called with empty path")
}
if search.IsStandardImportPath(path) {
if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
if modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
return filepath.Join(cfg.GOROOT, "src", path)
}
}

View File

@ -9,7 +9,6 @@ import (
"errors"
"fmt"
"go/build"
"internal/goroot"
"io/fs"
"os"
pathpkg "path"
@ -281,7 +280,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
// Is the package in the standard library?
pathIsStd := search.IsStandardImportPath(path)
if pathIsStd && goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
if pathIsStd && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
for _, mainModule := range MainModules.Versions() {
if MainModules.InGorootSrc(mainModule) {
if dir, ok, err := dirInModule(path, MainModules.PathPrefix(mainModule), MainModules.ModRoot(mainModule), true); err != nil {

View File

@ -6,15 +6,22 @@ package modload
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
"sync"
"cmd/go/internal/cfg"
"cmd/go/internal/fsys"
"cmd/go/internal/imports"
"cmd/go/internal/modindex"
"cmd/go/internal/par"
"cmd/go/internal/search"
"golang.org/x/mod/module"
@ -40,9 +47,15 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
treeCanMatch = search.TreeCanMatchPattern(m.Pattern())
}
var mu sync.Mutex
have := map[string]bool{
"builtin": true, // ignore pseudo-package that exists only for documentation
}
addPkg := func(p string) {
mu.Lock()
m.Pkgs = append(m.Pkgs, p)
mu.Unlock()
}
if !cfg.BuildContext.CgoEnabled {
have["runtime/cgo"] = true // ignore during walk
}
@ -53,6 +66,8 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
pruneGoMod
)
q := par.NewQueue(runtime.GOMAXPROCS(0))
walkPkgs := func(root, importPathRoot string, prune pruning) {
root = filepath.Clean(root)
err := fsys.Walk(root, func(path string, fi fs.FileInfo, err error) error {
@ -107,9 +122,11 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
if !have[name] {
have[name] = true
if isMatch(name) {
if _, _, err := scanDir(root, path, tags); err != imports.ErrNoGo {
m.Pkgs = append(m.Pkgs, name)
}
q.Add(func() {
if _, _, err := scanDir(root, path, tags); err != imports.ErrNoGo {
addPkg(name)
}
})
}
}
@ -123,6 +140,12 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
}
}
// Wait for all in-flight operations to complete before returning.
defer func() {
<-q.Idle()
sort.Strings(m.Pkgs) // sort everything we added for determinism
}()
if filter == includeStd {
walkPkgs(cfg.GOROOTsrc, "", pruneGoMod)
if treeCanMatch("cmd") {
@ -165,6 +188,12 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
}
modPrefix = mod.Path
}
if mi, err := modindex.Get(root); err == nil {
walkFromIndex(mi, modPrefix, isMatch, treeCanMatch, tags, have, addPkg)
continue
} else if !errors.Is(err, modindex.ErrNotIndexed) {
m.AddError(err)
}
prune := pruneVendor
if isLocal {
@ -176,6 +205,54 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
return
}
// walkFromIndex matches packages in a module using the module index. modroot
// is the module's root directory on disk, index is the ModuleIndex for the
// module, and importPathRoot is the module's path prefix.
func walkFromIndex(index *modindex.ModuleIndex, importPathRoot string, isMatch, treeCanMatch func(string) bool, tags, have map[string]bool, addPkg func(string)) {
loopPackages:
for _, reldir := range index.Packages() {
// Avoid .foo, _foo, and testdata subdirectory trees.
p := reldir
for {
elem, rest, found := strings.Cut(p, string(filepath.Separator))
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
continue loopPackages
}
if found && elem == "vendor" {
// Ignore this path if it contains the element "vendor" anywhere
// except for the last element (packages named vendor are allowed
// for historical reasons). Note that found is true when this
// isn't the last path element.
continue loopPackages
}
if !found {
// Didn't find the separator, so we're considering the last element.
break
}
p = rest
}
// Don't use GOROOT/src.
if reldir == "" && importPathRoot == "" {
continue
}
name := path.Join(importPathRoot, filepath.ToSlash(reldir))
if !treeCanMatch(name) {
continue
}
if !have[name] {
have[name] = true
if isMatch(name) {
if _, _, err := index.ScanDir(reldir, tags); err != imports.ErrNoGo {
addPkg(name)
}
}
}
}
}
// MatchInModule identifies the packages matching the given pattern within the
// given module version, which does not need to be in the build list or module
// requirement graph.

View File

@ -66,3 +66,21 @@ func TrimFilePathPrefix(s, prefix string) string {
}
return trimmed[1:]
}
// QuoteGlob returns s with all Glob metacharacters quoted.
// We don't try to handle backslash here, as that can appear in a
// file path on Windows.
func QuoteGlob(s string) string {
if !strings.ContainsAny(s, `*?[]`) {
return s
}
var sb strings.Builder
for _, c := range s {
switch c {
case '*', '?', '[', ']':
sb.WriteByte('\\')
}
sb.WriteRune(c)
}
return sb.String()
}

View File

@ -170,7 +170,7 @@ func (ts *testScript) setup() {
"GOCACHE=" + testGOCACHE,
"GODEBUG=" + os.Getenv("GODEBUG"),
"GOEXE=" + cfg.ExeSuffix,
"GOINDEX=true",
"GOEXPERIMENT=" + os.Getenv("GOEXPERIMENT"),
"GOOS=" + runtime.GOOS,
"GOPATH=" + filepath.Join(ts.workdir, "gopath"),
"GOPROXY=" + proxyURL,

View File

@ -0,0 +1,18 @@
# issue 53314
[windows] skip
cd [pkg]
go build
-- [pkg]/go.mod --
module m
go 1.19
-- [pkg]/x.go --
package p
import _ "embed"
//go:embed t.txt
var S string
-- [pkg]//t.txt --

View File

@ -0,0 +1,38 @@
# Test a replacement with an absolute path (so the path isn't
# cleaned by having filepath.Abs called on it). This checks
# whether the modindex logic cleans the modroot path before using
# it.
[!windows] skip
[short] skip
go run print_go_mod.go # use this program to write a go.mod with an absolute path
cp stdout go.mod
go list -modfile=go.mod all
-- print_go_mod.go --
//go:build ignore
package main
import (
"fmt"
"os"
)
func main() {
work := os.Getenv("WORK")
fmt.Printf(`module example.com/mod
require b.com v0.0.0
replace b.com => %s\gopath\src/modb
`, work)
}
-- a.go --
package a
import _ "b.com/b"
-- modb/go.mod --
module b.com
-- modb/b/b.go --
package b

View File

@ -5,6 +5,12 @@
// Metadata prints basic system metadata to include in test logs. This is
// separate from cmd/dist so it does not need to build with the bootstrap
// toolchain.
// This program is only used by cmd/dist. Add an "ignore" build tag so it
// is not installed. cmd/dist does "go run main.go" directly.
//go:build ignore
package main
import (

View File

@ -1,41 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package notsha256_test
import (
"crypto/sha256"
"fmt"
"io"
"log"
"os"
)
func ExampleSum256() {
sum := sha256.Sum256([]byte("hello world\n"))
fmt.Printf("%x", sum)
// Output: a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447
}
func ExampleNew() {
h := sha256.New()
h.Write([]byte("hello world\n"))
fmt.Printf("%x", h.Sum(nil))
// Output: a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447
}
func ExampleNew_file() {
f, err := os.Open("file.txt")
if err != nil {
log.Fatal(err)
}
defer f.Close()
h := sha256.New()
if _, err := io.Copy(h, f); err != nil {
log.Fatal(err)
}
fmt.Printf("%x", h.Sum(nil))
}

View File

@ -1854,6 +1854,9 @@ func (state *dodataState) allocateDataSections(ctxt *Link) {
}
for _, symn := range sym.ReadOnly {
symnStartValue := state.datsize
if len(state.data[symn]) != 0 {
symnStartValue = aligndatsize(state, symnStartValue, state.data[symn][0])
}
state.assignToSection(sect, symn, sym.SRODATA)
setCarrierSize(symn, state.datsize-symnStartValue)
if ctxt.HeadType == objabi.Haix {
@ -1935,6 +1938,9 @@ func (state *dodataState) allocateDataSections(ctxt *Link) {
symn := sym.RelROMap[symnro]
symnStartValue := state.datsize
if len(state.data[symn]) != 0 {
symnStartValue = aligndatsize(state, symnStartValue, state.data[symn][0])
}
for _, s := range state.data[symn] {
outer := ldr.OuterSym(s)

View File

@ -132,6 +132,15 @@ func decodetypeName(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs
return string(data[1+nameLenLen : 1+nameLenLen+int(nameLen)])
}
func decodetypeNameEmbedded(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs, off int) bool {
r := decodeRelocSym(ldr, symIdx, relocs, int32(off))
if r == 0 {
return false
}
data := ldr.Data(r)
return data[0]&(1<<3) != 0
}
func decodetypeFuncInType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs, i int) loader.Sym {
uadd := commonsize(arch) + 4
if arch.PtrSize == 8 {
@ -204,12 +213,18 @@ func decodetypeStructFieldType(ldr *loader.Loader, arch *sys.Arch, symIdx loader
return decodeRelocSym(ldr, symIdx, &relocs, int32(off+arch.PtrSize))
}
func decodetypeStructFieldOffsAnon(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) int64 {
func decodetypeStructFieldOffset(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) int64 {
off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i)
data := ldr.Data(symIdx)
return int64(decodeInuxi(arch, data[off+2*arch.PtrSize:], arch.PtrSize))
}
func decodetypeStructFieldEmbedded(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) bool {
off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i)
relocs := ldr.Relocs(symIdx)
return decodetypeNameEmbedded(ldr, symIdx, &relocs, off)
}
// decodetypeStr returns the contents of an rtype's str field (a nameOff).
func decodetypeStr(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) string {
relocs := ldr.Relocs(symIdx)

View File

@ -682,9 +682,9 @@ func (d *dwctxt) newtype(gotype loader.Sym) *dwarf.DWDie {
}
fld := d.newdie(die, dwarf.DW_ABRV_STRUCTFIELD, f)
d.newrefattr(fld, dwarf.DW_AT_type, d.defgotype(s))
offsetAnon := decodetypeStructFieldOffsAnon(d.ldr, d.arch, gotype, i)
newmemberoffsetattr(fld, int32(offsetAnon>>1))
if offsetAnon&1 != 0 { // is embedded field
offset := decodetypeStructFieldOffset(d.ldr, d.arch, gotype, i)
newmemberoffsetattr(fld, int32(offset))
if decodetypeStructFieldEmbedded(d.ldr, d.arch, gotype, i) {
newattr(fld, dwarf.DW_AT_go_embedded_field, dwarf.DW_CLS_FLAG, 1, 0)
}
}

View File

@ -1463,12 +1463,12 @@ func (ctxt *Link) hostlink() {
// We force all symbol resolution to be done at program startup
// because lazy PLT resolution can use large amounts of stack at
// times we cannot allow it to do so.
argv = append(argv, "-Wl,-znow")
argv = append(argv, "-Wl,-z,now")
// Do not let the host linker generate COPY relocations. These
// can move symbols out of sections that rely on stable offsets
// from the beginning of the section (like sym.STYPE).
argv = append(argv, "-Wl,-znocopyreloc")
argv = append(argv, "-Wl,-z,nocopyreloc")
if buildcfg.GOOS == "android" {
// Use lld to avoid errors from default linker (issue #38838)

View File

@ -475,16 +475,19 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind {
s = ldr.CreateSymForUpdate("type.*", 0)
s.SetType(sym.STYPE)
s.SetSize(0)
s.SetAlign(int32(ctxt.Arch.PtrSize))
symtype = s.Sym()
s = ldr.CreateSymForUpdate("typerel.*", 0)
s.SetType(sym.STYPERELRO)
s.SetSize(0)
s.SetAlign(int32(ctxt.Arch.PtrSize))
symtyperel = s.Sym()
} else {
s = ldr.CreateSymForUpdate("type.*", 0)
s.SetType(sym.STYPE)
s.SetSize(0)
s.SetAlign(int32(ctxt.Arch.PtrSize))
symtype = s.Sym()
symtyperel = s.Sym()
}
@ -496,6 +499,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind {
s := ldr.CreateSymForUpdate(name, 0)
s.SetType(t)
s.SetSize(0)
s.SetAlign(int32(ctxt.Arch.PtrSize))
s.SetLocal(true)
setCarrierSym(t, s.Sym())
return s.Sym()

View File

@ -551,7 +551,13 @@ func (state *peLoaderState) readpesym(pesym *pe.COFFSymbol) (*loader.SymbolBuild
name = strings.TrimPrefix(name, "__imp_") // __imp_Name => Name
}
}
if state.arch.Family == sys.I386 && name[0] == '_' {
// A note on the "_main" exclusion below: the main routine
// defined by the Go runtime is named "_main", not "main", so
// when reading references to _main from a host object we want
// to avoid rewriting "_main" to "main" in this specific
// instance. See #issuecomment-1143698749 on #35006 for more
// details on this problem.
if state.arch.Family == sys.I386 && name[0] == '_' && name != "_main" {
name = name[1:] // _Name => Name
}
}

View File

@ -6,12 +6,11 @@ package aes
import (
"crypto/cipher"
"crypto/internal/boring"
"crypto/internal/subtle"
"strconv"
)
import "crypto/internal/boring"
// The AES block size in bytes.
const BlockSize = 16

View File

@ -8,13 +8,12 @@ package aes
import (
"crypto/cipher"
"crypto/internal/boring"
"crypto/internal/subtle"
"internal/cpu"
"internal/goarch"
)
import "crypto/internal/boring"
// defined in asm_*.s
//go:noescape

View File

@ -24,6 +24,7 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/elliptic"
"crypto/internal/boring"
"crypto/internal/boring/bbig"
"crypto/internal/randutil"
"crypto/sha512"
@ -31,8 +32,6 @@ import (
"io"
"math/big"
"crypto/internal/boring"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/crypto/cryptobyte/asn1"
)

View File

@ -22,12 +22,11 @@ timing side-channels:
package hmac
import (
"crypto/internal/boring"
"crypto/subtle"
"hash"
)
import "crypto/internal/boring"
// FIPS 198-1:
// https://csrc.nist.gov/publications/fips/fips198-1/FIPS-198-1_final.pdf

View File

@ -72,9 +72,6 @@ type extraModes interface {
NewCBCDecrypter(iv []byte) cipher.BlockMode
NewCTR(iv []byte) cipher.Stream
NewGCM(nonceSize, tagSize int) (cipher.AEAD, error)
// Invented for BoringCrypto.
NewGCMTLS() (cipher.AEAD, error)
}
var _ extraModes = (*aesCipher)(nil)
@ -235,8 +232,8 @@ func (c *aesCipher) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
return c.newGCM(false)
}
func (c *aesCipher) NewGCMTLS() (cipher.AEAD, error) {
return c.newGCM(true)
func NewGCMTLS(c cipher.Block) (cipher.AEAD, error) {
return c.(*aesCipher).newGCM(true)
}
func (c *aesCipher) newGCM(tls bool) (cipher.AEAD, error) {

View File

@ -50,6 +50,7 @@ func SHA512([]byte) [64]byte { panic("boringcrypto: not available") }
func NewHMAC(h func() hash.Hash, key []byte) hash.Hash { panic("boringcrypto: not available") }
func NewAESCipher(key []byte) (cipher.Block, error) { panic("boringcrypto: not available") }
func NewGCMTLS(cipher.Block) (cipher.AEAD, error) { panic("boringcrypto: not available") }
type PublicKeyECDSA struct{ _ int }
type PrivateKeyECDSA struct{ _ int }

View File

@ -10,6 +10,7 @@
package rand
import (
"crypto/internal/boring"
"errors"
"io"
"os"
@ -19,8 +20,6 @@ import (
"time"
)
import "crypto/internal/boring"
const urandomDevice = "/dev/urandom"
func init() {

View File

@ -6,16 +6,14 @@ package rsa
import (
"crypto"
"crypto/internal/boring"
"crypto/internal/randutil"
"crypto/subtle"
"errors"
"io"
"math/big"
"crypto/internal/randutil"
)
import "crypto/internal/boring"
// This file implements encryption and decryption using PKCS #1 v1.5 padding.
// PKCS1v15DecrypterOpts is for passing options to PKCS #1 v1.5 decryption using
@ -32,7 +30,7 @@ type PKCS1v15DecryptOptions struct {
// scheme from PKCS #1 v1.5. The message must be no longer than the
// length of the public modulus minus 11 bytes.
//
// The rand parameter is used as a source of entropy to ensure that
// The random parameter is used as a source of entropy to ensure that
// encrypting the same message twice doesn't result in the same
// ciphertext.
//
@ -84,14 +82,14 @@ func EncryptPKCS1v15(random io.Reader, pub *PublicKey, msg []byte) ([]byte, erro
}
// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS #1 v1.5.
// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
// If random != nil, it uses RSA blinding to avoid timing side-channel attacks.
//
// Note that whether this function returns an error or not discloses secret
// information. If an attacker can cause this function to run repeatedly and
// learn whether each instance returned an error then they can decrypt and
// forge signatures as if they had the private key. See
// DecryptPKCS1v15SessionKey for a way of solving this problem.
func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error) {
func DecryptPKCS1v15(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error) {
if err := checkPub(&priv.PublicKey); err != nil {
return nil, err
}
@ -108,7 +106,7 @@ func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byt
return out, nil
}
valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext)
valid, out, index, err := decryptPKCS1v15(random, priv, ciphertext)
if err != nil {
return nil, err
}
@ -119,7 +117,7 @@ func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byt
}
// DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS #1 v1.5.
// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
// If random != nil, it uses RSA blinding to avoid timing side-channel attacks.
// It returns an error if the ciphertext is the wrong length or if the
// ciphertext is greater than the public modulus. Otherwise, no error is
// returned. If the padding is valid, the resulting plaintext message is copied
@ -137,7 +135,7 @@ func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byt
// a random value was used (because it'll be different for the same ciphertext)
// and thus whether the padding was correct. This defeats the point of this
// function. Using at least a 16-byte key will protect against this attack.
func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error {
func DecryptPKCS1v15SessionKey(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error {
if err := checkPub(&priv.PublicKey); err != nil {
return err
}
@ -146,7 +144,7 @@ func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []by
return ErrDecryption
}
valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext)
valid, em, index, err := decryptPKCS1v15(random, priv, ciphertext)
if err != nil {
return err
}
@ -163,12 +161,12 @@ func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []by
}
// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if
// rand is not nil. It returns one or zero in valid that indicates whether the
// random is not nil. It returns one or zero in valid that indicates whether the
// plaintext was correctly structured. In either case, the plaintext is
// returned in em so that it may be read independently of whether it was valid
// in order to maintain constant memory access patterns. If the plaintext was
// valid then index contains the index of the original message in em.
func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
func decryptPKCS1v15(random io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
k := priv.Size()
if k < 11 {
err = ErrDecryption
@ -188,7 +186,7 @@ func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid
} else {
c := new(big.Int).SetBytes(ciphertext)
var m *big.Int
m, err = decrypt(rand, priv, c)
m, err = decrypt(random, priv, c)
if err != nil {
return
}
@ -220,15 +218,15 @@ func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid
}
// nonZeroRandomBytes fills the given slice with non-zero random octets.
func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
_, err = io.ReadFull(rand, s)
func nonZeroRandomBytes(s []byte, random io.Reader) (err error) {
_, err = io.ReadFull(random, s)
if err != nil {
return
}
for i := 0; i < len(s); i++ {
for s[i] == 0 {
_, err = io.ReadFull(rand, s[i:i+1])
_, err = io.ReadFull(random, s[i:i+1])
if err != nil {
return
}
@ -268,7 +266,7 @@ var hashPrefixes = map[crypto.Hash][]byte{
// function. If hash is zero, hashed is signed directly. This isn't
// advisable except for interoperability.
//
// If rand is not nil then RSA blinding will be used to avoid timing
// If random is not nil then RSA blinding will be used to avoid timing
// side-channel attacks.
//
// This function is deterministic. Thus, if the set of possible

View File

@ -9,14 +9,13 @@ package rsa
import (
"bytes"
"crypto"
"crypto/internal/boring"
"errors"
"hash"
"io"
"math/big"
)
import "crypto/internal/boring"
// Per RFC 8017, Section 9.1
//
// EM = MGF1 xor DB || H( 8*0x00 || mHash || salt ) || 0xbc
@ -298,6 +297,7 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte,
}
return boring.SignRSAPSS(bkey, hash, digest, saltLength)
}
boring.UnreachableExceptTests()
salt := make([]byte, saltLength)
if _, err := io.ReadFull(rand, salt); err != nil {

View File

@ -4,14 +4,13 @@
package tls
import "crypto/internal/boring"
import (
"crypto"
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/hmac"
"crypto/internal/boring"
"crypto/rc4"
"crypto/sha1"
"crypto/sha256"
@ -517,12 +516,9 @@ func aeadAESGCM(key, noncePrefix []byte) aead {
if err != nil {
panic(err)
}
type gcmtls interface {
NewGCMTLS() (cipher.AEAD, error)
}
var aead cipher.AEAD
if aesTLS, ok := aes.(gcmtls); ok {
aead, err = aesTLS.NewGCMTLS()
if boring.Enabled {
aead, err = boring.NewGCMTLS(aes)
} else {
boring.Unreachable()
aead, err = cipher.NewGCM(aes)

View File

@ -192,9 +192,9 @@ type Execer interface {
// DB.Exec will first prepare a query, execute the statement, and then
// close the statement.
//
// ExecerContext may return ErrSkip.
// ExecContext may return ErrSkip.
//
// ExecerContext must honor the context timeout and return when the context is canceled.
// ExecContext must honor the context timeout and return when the context is canceled.
type ExecerContext interface {
ExecContext(ctx context.Context, query string, args []NamedValue) (Result, error)
}
@ -219,9 +219,9 @@ type Queryer interface {
// DB.Query will first prepare a query, execute the statement, and then
// close the statement.
//
// QueryerContext may return ErrSkip.
// QueryContext may return ErrSkip.
//
// QueryerContext must honor the context timeout and return when the context is canceled.
// QueryContext must honor the context timeout and return when the context is canceled.
type QueryerContext interface {
QueryContext(ctx context.Context, query string, args []NamedValue) (Rows, error)
}

View File

@ -118,7 +118,12 @@ func (v *Map) String() string {
if !first {
fmt.Fprintf(&b, ", ")
}
fmt.Fprintf(&b, "%q: %v", kv.Key, kv.Value)
fmt.Fprintf(&b, "%q: ", kv.Key)
if kv.Value != nil {
fmt.Fprintf(&b, "%v", kv.Value)
} else {
fmt.Fprint(&b, "null")
}
first = false
})
fmt.Fprintf(&b, "}")
@ -224,7 +229,8 @@ func (v *Map) Do(f func(KeyValue)) {
defer v.keysMu.RUnlock()
for _, k := range v.keys {
i, _ := v.m.Load(k)
f(KeyValue{k, i.(Var)})
val, _ := i.(Var)
f(KeyValue{k, val})
}
}

View File

@ -261,6 +261,29 @@ func TestMapCounter(t *testing.T) {
}
}
func TestMapNil(t *testing.T) {
RemoveAll()
const key = "key"
m := NewMap("issue527719")
m.Set(key, nil)
s := m.String()
var j any
if err := json.Unmarshal([]byte(s), &j); err != nil {
t.Fatalf("m.String() == %q isn't valid JSON: %v", s, err)
}
m2, ok := j.(map[string]any)
if !ok {
t.Fatalf("m.String() produced %T, wanted a map", j)
}
v, ok := m2[key]
if !ok {
t.Fatalf("missing %q in %v", key, m2)
}
if v != nil {
t.Fatalf("m[%q] = %v, want nil", key, v)
}
}
func BenchmarkMapSet(b *testing.B) {
m := new(Map).Init()

View File

@ -1883,11 +1883,13 @@ func (ctxt *Context) eval(x constraint.Expr, allTags map[string]bool) bool {
// cgo (if cgo is enabled)
// $GOOS
// $GOARCH
// boringcrypto
// ctxt.Compiler
// linux (if GOOS = android)
// solaris (if GOOS = illumos)
// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
// darwin (if GOOS = ios)
// unix (if this is a Unix GOOS)
// boringcrypto (if GOEXPERIMENT=boringcrypto is enabled)
// tag (if tag is listed in ctxt.BuildTags, ctxt.ToolTags, or ctxt.ReleaseTags)
//
// It records all consulted tags in allTags.
func (ctxt *Context) matchTag(name string, allTags map[string]bool) bool {

View File

@ -543,7 +543,10 @@ var depsRules = `
internal/fuzz, internal/testlog, runtime/pprof, regexp
< testing/internal/testdeps;
OS, flag, testing, internal/cfg
MATH, errors, testing
< internal/testmath;
OS, flag, testing, internal/cfg, internal/testmath
< internal/testenv;
OS, encoding/base64

Some files were not shown because too many files have changed in this diff Show More