Bump github.com/go-redis/redis/v8 from 8.3.3 to 8.11.5 (#35)

Bumps [github.com/go-redis/redis/v8](https://github.com/go-redis/redis) from 8.3.3 to 8.11.5.
- [Release notes](https://github.com/go-redis/redis/releases)
- [Changelog](https://github.com/go-redis/redis/blob/master/CHANGELOG.md)
- [Commits](https://github.com/go-redis/redis/compare/v8.3.3...v8.11.5)

---
updated-dependencies:
- dependency-name: github.com/go-redis/redis/v8
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2022-07-27 09:57:23 -07:00 committed by GitHub
parent 1bd93308c2
commit c0d7bf41de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
115 changed files with 3352 additions and 9490 deletions

5
go.mod
View File

@ -10,7 +10,7 @@ require (
github.com/go-fed/httpsig v1.1.0
github.com/go-http-utils/etag v0.0.0-20161124023236-513ea8f21eb1
github.com/go-playground/validator/v10 v10.11.0
github.com/go-redis/redis/v8 v8.3.3
github.com/go-redis/redis/v8 v8.11.5
github.com/google/uuid v1.3.0
github.com/gorilla/handlers v1.5.1
github.com/gorilla/mux v1.8.0
@ -36,7 +36,7 @@ require (
require (
github.com/cactus/go-statsd-client/v5 v5.0.0 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.2 // indirect
@ -75,7 +75,6 @@ require (
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
go.opentelemetry.io/otel v0.13.0 // indirect
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect

13
go.sum
View File

@ -47,8 +47,9 @@ github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEq
github.com/cactus/go-statsd-client/v5 v5.0.0 h1:KqvIQtc9qt34uq+nu4nd1PwingWfBt/IISgtUQ2nSJk=
github.com/cactus/go-statsd-client/v5 v5.0.0/go.mod h1:COEvJ1E+/E2L4q6QE5CkjWPi4eeDw9maJBMIuMPBZbY=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
@ -121,8 +122,9 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw=
github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-redis/redis/v8 v8.3.3 h1:e0CL9fsFDK92pkIJH2XAeS/NwO2VuIOAoJvI6yktZFk=
github.com/go-redis/redis/v8 v8.3.3/go.mod h1:jszGxBCez8QA1HWSmQxJO9Y82kNibbUmeYhKWrBejTU=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg=
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
@ -310,16 +312,16 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/nicksnyder/go-i18n/v2 v2.2.0 h1:MNXbyPvd141JJqlU6gJKrczThxJy+kdCNivxZpBQFkw=
github.com/nicksnyder/go-i18n/v2 v2.2.0/go.mod h1:4OtLfzqyAxsscyCb//3gfqSvBc81gImX91LrZzczN1o=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU=
@ -416,7 +418,6 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opentelemetry.io/otel v0.13.0 h1:2isEnyzjjJZq6r2EKMsFj4TxiQiexsM04AVhwbR/oBA=
go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=

View File

@ -1,8 +0,0 @@
language: go
go:
- "1.x"
- master
env:
- TAGS=""
- TAGS="-tags purego"
script: go test $TAGS -v ./...

View File

@ -1,7 +1,7 @@
# xxhash
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

View File

@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
b = b[len(d.mem):]
d.n = int(d.total % uint64(len(d.mem)))
return nil
}

View File

@ -6,7 +6,7 @@
// Register allocation:
// AX h
// CX pointer to advance through b
// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
@ -16,39 +16,39 @@
// R12 tmp
// R13 prime1v
// R14 prime2v
// R15 prime4v
// DI prime4v
// round reads from and advances the buffer pointer in CX.
// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (CX), R12 \
ADDQ $8, CX \
MOVQ (SI), R12 \
ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ R15, acc
ADDQ DI, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), R15
MOVQ ·prime4v(SB), DI
// Load slice.
MOVQ b_base+0(FP), CX
MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
LEAQ (CX)(DX*1), BX
LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
XORQ R11, R11
SUBQ R13, R11
// Loop until CX > BX.
// Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ CX, BX
CMPQ SI, BX
JLE blockLoop
MOVQ R8, AX
@ -100,16 +100,16 @@ noBlocks:
afterBlocks:
ADDQ DX, AX
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
CMPQ CX, BX
CMPQ SI, BX
JG fourByte
wordLoop:
// Calculate k1.
MOVQ (CX), R8
ADDQ $8, CX
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
@ -117,18 +117,18 @@ wordLoop:
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ R15, AX
ADDQ DI, AX
CMPQ CX, BX
CMPQ SI, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
CMPQ CX, BX
CMPQ SI, BX
JG singles
MOVL (CX), R8
ADDQ $4, CX
MOVL (SI), R8
ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
@ -138,19 +138,19 @@ fourByte:
singles:
ADDQ $4, BX
CMPQ CX, BX
CMPQ SI, BX
JGE finalize
singlesLoop:
MOVBQZX (CX), R12
ADDQ $1, CX
MOVBQZX (SI), R12
ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
CMPQ CX, BX
CMPQ SI, BX
JL singlesLoop
finalize:
@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
MOVQ ·prime2v(SB), R14
// Load slice.
MOVQ b_base+8(FP), CX
MOVQ b_base+8(FP), SI
MOVQ b_len+16(FP), DX
LEAQ (CX)(DX*1), BX
LEAQ (SI)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
@ -199,7 +199,7 @@ blockLoop:
round(R10)
round(R11)
CMPQ CX, BX
CMPQ SI, BX
JLE blockLoop
// Copy vN back to d.
@ -208,8 +208,8 @@ blockLoop:
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
// The number of bytes written is CX minus the old base pointer.
SUBQ b_base+8(FP), CX
MOVQ CX, ret+32(FP)
// The number of bytes written is SI minus the old base pointer.
SUBQ b_base+8(FP), SI
MOVQ SI, ret+32(FP)
RET

View File

@ -6,41 +6,52 @@
package xxhash
import (
"reflect"
"unsafe"
)
// Notes:
//
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
// for some discussion about these unsafe conversions.
//
// In the future it's possible that compiler optimizations will make these
// unsafe operations unnecessary: https://golang.org/issue/2205.
// XxxString functions unnecessary by realizing that calls such as
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
//
// Both of these wrapper functions still incur function call overhead since they
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
// for strings to squeeze out a bit more speed. Mid-stack inlining should
// eventually fix this.
// var b []byte
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
// bh.Len = len(s)
// bh.Cap = len(s)
//
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
// weight to this sequence of expressions that any function that uses it will
// not be inlined. Instead, the functions below use a different unsafe
// conversion designed to minimize the inliner weight and allow both to be
// inlined. There is also a test (TestInlining) which verifies that these are
// inlined.
//
// See https://github.com/golang/go/issues/42739 for discussion.
// Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
var b []byte
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
return Sum64(b)
}
// WriteString adds more data to d. It always returns len(s), nil.
// It may be faster than Write([]byte(s)) by avoiding a copy.
func (d *Digest) WriteString(s string) (n int, err error) {
var b []byte
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
return d.Write(b)
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
// d.Write always returns len(s), nil.
// Ignoring the return output and returning these fixed values buys a
// savings of 6 in the inliner's cost model.
return len(s), nil
}
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
// of the first two words is the same as the layout of a string.
type sliceHeader struct {
s string
cap int
}

View File

@ -2,20 +2,3 @@ run:
concurrency: 8
deadline: 5m
tests: false
linters:
enable-all: true
disable:
- funlen
- gochecknoglobals
- gochecknoinits
- gocognit
- goconst
- godox
- gosec
- maligned
- wsl
- gomnd
- goerr113
- exhaustive
- nestif
- nlreturn

View File

@ -1,20 +0,0 @@
dist: xenial
language: go
services:
- redis-server
go:
- 1.14.x
- 1.15.x
- tip
matrix:
allow_failures:
- go: tip
go_import_path: github.com/go-redis/redis
before_install:
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s --
-b $(go env GOPATH)/bin v1.31.0

View File

@ -1,11 +1,91 @@
# Changelog
## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
### Bug Fixes
* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
### Features
* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
### Features
* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
## v8.11
- Remove OpenTelemetry metrics.
- Supports more redis commands and options.
## v8.10
- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
decision:
- Traces become smaller and less noisy.
- It may be costly to process those 3 extra spans for each query.
- go-redis no longer depends on OpenTelemetry.
Eventually we hope to replace the information that we no longer collect with OpenTelemetry
Metrics.
## v8.9
- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
`WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
## v8.8
- To make updating easier, extra modules now have the same version as go-redis does. That means that
you need to update your imports:
```
github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
```
## v8.5
- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
struct:
```go
err := rdb.HGetAll(ctx, "hash").Scan(&data)
err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
```
- Please check [redismock](https://github.com/go-redis/redismock) by
[monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
## v8
- Documentation at https://redis.uptrace.dev/
- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
using `context.Context` yet, the simplest option is to define global package variable
`var ctx = context.TODO()` and use it when `ctx` is required.

View File

@ -1,10 +1,11 @@
all: testdeps
PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
test: testdeps
go test ./...
go test ./... -short -race
go test ./... -run=NONE -bench=. -benchmem
env GOOS=linux GOARCH=386 go test ./...
go vet
golangci-lint run
testdeps: testdata/redis/src/redis-server
@ -15,7 +16,20 @@ bench: testdeps
testdata/redis:
mkdir -p $@
wget -qO- http://download.redis.io/redis-stable.tar.gz | tar xvz --strip-components=1 -C $@
wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
testdata/redis/src/redis-server: testdata/redis
cd $< && make all
fmt:
gofmt -w -s ./
goimports -w -local github.com/go-redis/redis ./
go_mod_tidy:
go get -u && go mod tidy
set -e; for dir in $(PACKAGE_DIRS); do \
echo "go mod tidy in $${dir}"; \
(cd "$${dir}" && \
go get -u && \
go mod tidy); \
done

View File

@ -1,23 +1,32 @@
# Redis client for Golang
# Redis client for Go
[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
OpenTelemetry and ClickHouse. Give it a star as well!
- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions.
## Resources
- [Discussions](https://github.com/go-redis/redis/discussions)
- [Documentation](https://redis.uptrace.dev)
- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
Other projects you may like:
- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
## Ecosystem
- [Distributed Locks](https://github.com/bsm/redislock).
- [Redis Cache](https://github.com/go-redis/cache).
- [Rate limiting](https://github.com/go-redis/redis_rate).
- [Redis Mock](https://github.com/go-redis/redismock)
- [Distributed Locks](https://github.com/bsm/redislock)
- [Redis Cache](https://github.com/go-redis/cache)
- [Rate limiting](https://github.com/go-redis/redis_rate)
## Features
@ -26,16 +35,16 @@
[circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and
[TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
[TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup)
- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
without using cluster mode and Redis Sentinel.
- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation).
- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
## Installation
@ -47,7 +56,7 @@ module:
go mod init github.com/my/repo
```
And then install go-redis (note _v8_ in the import; omitting it is a popular mistake):
And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
```shell
go get github.com/go-redis/redis/v8
@ -59,6 +68,7 @@ go get github.com/go-redis/redis/v8
import (
"context"
"github.com/go-redis/redis/v8"
"fmt"
)
var ctx = context.Background()
@ -129,9 +139,37 @@ vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello")
res, err := rdb.Do(ctx, "set", "key", "value").Result()
```
## See also
## Run the test
- [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux)
- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
- [Golang msgpack](https://github.com/vmihailenco/msgpack)
- [Golang message task queue](https://github.com/vmihailenco/taskq)
go-redis will start a redis-server and run the test cases.
The paths of redis-server bin file and redis config file are defined in `main_test.go`:
```
var (
redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
)
```
For local testing, you can change the variables to refer to your local files, or create a soft link
to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
```
ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
```
Lastly, run:
```
go test
```
## Contributors
Thanks to all the people who already contributed!
<a href="https://github.com/go-redis/redis/graphs/contributors">
<img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
</a>

15
vendor/github.com/go-redis/redis/v8/RELEASING.md generated vendored Normal file
View File

@ -0,0 +1,15 @@
# Releasing
1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
```shell
TAG=v1.0.0 ./scripts/release.sh
```
2. Open a pull request and wait for the build to finish.
3. Merge the pull request and run `tag.sh` to create tags for packages:
```shell
TAG=v1.0.0 ./scripts/tag.sh
```

View File

@ -68,6 +68,9 @@ type ClusterOptions struct {
ReadTimeout time.Duration
WriteTimeout time.Duration
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
// PoolSize applies per cluster node and not for the whole cluster.
PoolSize int
MinIdleConns int
@ -86,12 +89,12 @@ func (opt *ClusterOptions) init() {
opt.MaxRedirects = 3
}
if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
if opt.RouteByLatency || opt.RouteRandomly {
opt.ReadOnly = true
}
if opt.PoolSize == 0 {
opt.PoolSize = 5 * runtime.NumCPU()
opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
}
switch opt.ReadTimeout {
@ -146,6 +149,7 @@ func (opt *ClusterOptions) clientOptions() *Options {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
@ -153,9 +157,13 @@ func (opt *ClusterOptions) clientOptions() *Options {
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: disableIdleCheck,
readOnly: opt.ReadOnly,
TLSConfig: opt.TLSConfig,
// If ClusterSlots is populated, then we probably have an artificial
// cluster whose nodes are not in clustering mode (otherwise there isn't
// much use for ClusterSlots config). This means we cannot execute the
// READONLY command against that node -- setting readOnly to false in such
// situations in the options below will prevent that from happening.
readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
}
}
@ -291,8 +299,9 @@ func (c *clusterNodes) Close() error {
func (c *clusterNodes) Addrs() ([]string, error) {
var addrs []string
c.mu.RLock()
closed := c.closed
closed := c.closed //nolint:ifshort
if !closed {
if len(c.activeAddrs) > 0 {
addrs = c.activeAddrs
@ -343,7 +352,7 @@ func (c *clusterNodes) GC(generation uint32) {
}
}
func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
node, err := c.get(addr)
if err != nil {
return nil, err
@ -407,7 +416,7 @@ func (c *clusterNodes) Random() (*clusterNode, error) {
}
n := rand.Intn(len(addrs))
return c.Get(addrs[n])
return c.GetOrCreate(addrs[n])
}
//------------------------------------------------------------------------------
@ -465,7 +474,7 @@ func newClusterState(
addr = replaceLoopbackHost(addr, originHost)
}
node, err := c.nodes.Get(addr)
node, err := c.nodes.GetOrCreate(addr)
if err != nil {
return nil, err
}
@ -586,8 +595,16 @@ func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
if len(nodes) == 0 {
return c.nodes.Random()
}
n := rand.Intn(len(nodes))
return nodes[n], nil
if len(nodes) == 1 {
return nodes[0], nil
}
randomNodes := rand.Perm(len(nodes))
for _, idx := range randomNodes {
if node := nodes[idx]; !node.Failing() {
return node, nil
}
}
return nodes[randomNodes[0]], nil
}
func (c *clusterState) slotNodes(slot int) []*clusterNode {
@ -628,14 +645,14 @@ func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error)
return state, nil
}
func (c *clusterStateHolder) LazyReload(ctx context.Context) {
func (c *clusterStateHolder) LazyReload() {
if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
return
}
go func() {
defer atomic.StoreUint32(&c.reloading, 0)
_, err := c.Reload(ctx)
_, err := c.Reload(context.Background())
if err != nil {
return
}
@ -645,14 +662,15 @@ func (c *clusterStateHolder) LazyReload(ctx context.Context) {
func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
v := c.state.Load()
if v != nil {
state := v.(*clusterState)
if time.Since(state.createdAt) > 10*time.Second {
c.LazyReload(ctx)
}
return state, nil
if v == nil {
return c.Reload(ctx)
}
return c.Reload(ctx)
state := v.(*clusterState)
if time.Since(state.createdAt) > 10*time.Second {
c.LazyReload()
}
return state, nil
}
func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
@ -728,7 +746,7 @@ func (c *ClusterClient) Options() *ClusterOptions {
// ReloadState reloads cluster state. If available it calls ClusterSlots func
// to get cluster slots information.
func (c *ClusterClient) ReloadState(ctx context.Context) {
c.state.LazyReload(ctx)
c.state.LazyReload()
}
// Close closes the cluster client, releasing any open resources.
@ -789,7 +807,7 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
}
if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
if isReadOnly {
c.state.LazyReload(ctx)
c.state.LazyReload()
}
node = nil
continue
@ -806,8 +824,10 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
var addr string
moved, ask, addr = isMovedError(lastErr)
if moved || ask {
c.state.LazyReload()
var err error
node, err = c.nodes.Get(addr)
node, err = c.nodes.GetOrCreate(addr)
if err != nil {
return err
}
@ -1004,7 +1024,7 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
for _, idx := range rand.Perm(len(addrs)) {
addr := addrs[idx]
node, err := c.nodes.Get(addr)
node, err := c.nodes.GetOrCreate(addr)
if err != nil {
if firstErr == nil {
firstErr = err
@ -1218,13 +1238,13 @@ func (c *ClusterClient) checkMovedErr(
return false
}
node, err := c.nodes.Get(addr)
node, err := c.nodes.GetOrCreate(addr)
if err != nil {
return false
}
if moved {
c.state.LazyReload(ctx)
c.state.LazyReload()
failedCmds.Add(node, cmd)
return true
}
@ -1252,10 +1272,13 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro
}
func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline)
return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
}
func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
// Trim multi .. exec.
cmds = cmds[1 : len(cmds)-1]
state, err := c.state.Get(ctx)
if err != nil {
setCmdsErr(cmds, err)
@ -1291,6 +1314,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
if err == nil {
return
}
if attempt < c.opt.MaxRedirects {
if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
setCmdsErr(cmds, err)
@ -1400,13 +1424,13 @@ func (c *ClusterClient) cmdsMoved(
addr string,
failedCmds *cmdsMap,
) error {
node, err := c.nodes.Get(addr)
node, err := c.nodes.GetOrCreate(addr)
if err != nil {
return err
}
if moved {
c.state.LazyReload(ctx)
c.state.LazyReload()
for _, cmd := range cmds {
failedCmds.Add(node, cmd)
}
@ -1455,7 +1479,7 @@ func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...s
moved, ask, addr := isMovedError(err)
if moved || ask {
node, err = c.nodes.Get(addr)
node, err = c.nodes.GetOrCreate(addr)
if err != nil {
return err
}
@ -1464,7 +1488,7 @@ func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...s
if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
if isReadOnly {
c.state.LazyReload(ctx)
c.state.LazyReload()
}
node, err = c.slotMasterNode(ctx, slot)
if err != nil {
@ -1567,7 +1591,7 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo,
for _, idx := range perm {
addr := addrs[idx]
node, err := c.nodes.Get(addr)
node, err := c.nodes.GetOrCreate(addr)
if err != nil {
if firstErr == nil {
firstErr = err
@ -1631,7 +1655,7 @@ func (c *ClusterClient) cmdNode(
return nil, err
}
if (c.opt.RouteByLatency || c.opt.RouteRandomly) && cmdInfo != nil && cmdInfo.ReadOnly {
if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
return c.slotReadOnlyNode(state, slot)
}
return state.slotMasterNode(slot)
@ -1655,6 +1679,35 @@ func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterN
return state.slotMasterNode(slot)
}
// SlaveForKey gets a client for a replica node to run any command on it.
// This is especially useful if we want to run a particular lua script which has
// only read only commands on the replica.
// This is because other redis commands generally have a flag that points that
// they are read only and automatically run on the replica nodes
// if ClusterOptions.ReadOnly flag is set to true.
func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
state, err := c.state.Get(ctx)
if err != nil {
return nil, err
}
slot := hashtag.Slot(key)
node, err := c.slotReadOnlyNode(state, slot)
if err != nil {
return nil, err
}
return node.Client, err
}
// MasterForKey return a client to the master node for a particular key.
func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
slot := hashtag.Slot(key)
node, err := c.slotMasterNode(ctx, slot)
if err != nil {
return nil, err
}
return node.Client, err
}
func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
for _, n := range nodes {
if n == node {

View File

@ -2,24 +2,108 @@ package redis
import (
"context"
"sync"
"sync/atomic"
)
func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
cmd := NewIntCmd(ctx, "dbsize")
var size int64
err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
n, err := master.DBSize(ctx).Result()
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
var size int64
err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
n, err := master.DBSize(ctx).Result()
if err != nil {
return err
}
atomic.AddInt64(&size, n)
return nil
})
if err != nil {
return err
cmd.SetErr(err)
} else {
cmd.val = size
}
return nil
})
return cmd
}
func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
cmd := NewStringCmd(ctx, "script", "load", script)
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
mu := &sync.Mutex{}
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptLoad(ctx, script).Result()
if err != nil {
return err
}
mu.Lock()
if cmd.Val() == "" {
cmd.val = val
}
mu.Unlock()
return nil
})
if err != nil {
cmd.SetErr(err)
}
return nil
})
return cmd
}
func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
cmd := NewStatusCmd(ctx, "script", "flush")
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
return shard.ScriptFlush(ctx).Err()
})
if err != nil {
cmd.SetErr(err)
}
return nil
})
return cmd
}
func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
args := make([]interface{}, 2+len(hashes))
args[0] = "script"
args[1] = "exists"
for i, hash := range hashes {
args[2+i] = hash
}
cmd := NewBoolSliceCmd(ctx, args...)
result := make([]bool, len(hashes))
for i := range result {
result[i] = true
}
_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
mu := &sync.Mutex{}
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptExists(ctx, hashes...).Result()
if err != nil {
return err
}
mu.Lock()
for i, v := range val {
result[i] = result[i] && v
}
mu.Unlock()
return nil
})
if err != nil {
cmd.SetErr(err)
} else {
cmd.val = result
}
atomic.AddInt64(&size, n)
return nil
})
if err != nil {
cmd.SetErr(err)
return cmd
}
cmd.val = size
return cmd
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,7 @@ import (
"github.com/go-redis/redis/v8/internal/proto"
)
// ErrClosed performs any operation on the closed client will return this error.
var ErrClosed = pool.ErrClosed
type Error interface {
@ -52,6 +53,9 @@ func shouldRetry(err error, retryTimeout bool) bool {
if strings.HasPrefix(s, "CLUSTERDOWN ") {
return true
}
if strings.HasPrefix(s, "TRYAGAIN ") {
return true
}
return false
}
@ -61,15 +65,28 @@ func isRedisError(err error) bool {
return ok
}
func isBadConn(err error, allowTimeout bool) bool {
if err == nil {
func isBadConn(err error, allowTimeout bool, addr string) bool {
switch err {
case nil:
return false
case context.Canceled, context.DeadlineExceeded:
return true
}
if isRedisError(err) {
// Close connections in read only state in case domain addr is used
// and domain resolves to a different Redis Server. See #790.
return isReadOnlyError(err)
switch {
case isReadOnlyError(err):
// Close connections in read only state in case domain addr is used
// and domain resolves to a different Redis Server. See #790.
return true
case isMovedSameConnAddr(err, addr):
// Close connections when we are asked to move to the same addr
// of the connection. Force a DNS resolution when all connections
// of the pool are recycled
return true
default:
return false
}
}
if allowTimeout {
@ -112,6 +129,14 @@ func isReadOnlyError(err error) bool {
return strings.HasPrefix(err.Error(), "READONLY ")
}
func isMovedSameConnAddr(err error, addr string) bool {
redisError := err.Error()
if !strings.HasPrefix(redisError, "MOVED ") {
return false
}
return strings.HasSuffix(redisError, " "+addr)
}
//------------------------------------------------------------------------------
type timeoutError interface {

View File

@ -60,7 +60,7 @@ func RandomSlot() int {
return rand.Intn(slotNumber)
}
// hashSlot returns a consistent slot number between 0 and 16383
// Slot returns a consistent slot number between 0 and 16383
// for any given string key.
func Slot(key string) int {
if key == "" {

View File

@ -0,0 +1,201 @@
package hscan
import (
"errors"
"fmt"
"reflect"
"strconv"
)
// decoderFunc represents decoding functions for default built-in types.
type decoderFunc func(reflect.Value, string) error
var (
// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
decoders = []decoderFunc{
reflect.Bool: decodeBool,
reflect.Int: decodeInt,
reflect.Int8: decodeInt8,
reflect.Int16: decodeInt16,
reflect.Int32: decodeInt32,
reflect.Int64: decodeInt64,
reflect.Uint: decodeUint,
reflect.Uint8: decodeUint8,
reflect.Uint16: decodeUint16,
reflect.Uint32: decodeUint32,
reflect.Uint64: decodeUint64,
reflect.Float32: decodeFloat32,
reflect.Float64: decodeFloat64,
reflect.Complex64: decodeUnsupported,
reflect.Complex128: decodeUnsupported,
reflect.Array: decodeUnsupported,
reflect.Chan: decodeUnsupported,
reflect.Func: decodeUnsupported,
reflect.Interface: decodeUnsupported,
reflect.Map: decodeUnsupported,
reflect.Ptr: decodeUnsupported,
reflect.Slice: decodeSlice,
reflect.String: decodeString,
reflect.Struct: decodeUnsupported,
reflect.UnsafePointer: decodeUnsupported,
}
// Global map of struct field specs that is populated once for every new
// struct type that is scanned. This caches the field types and the corresponding
// decoder functions to avoid iterating through struct fields on subsequent scans.
globalStructMap = newStructMap()
)
func Struct(dst interface{}) (StructValue, error) {
v := reflect.ValueOf(dst)
// The destination to scan into should be a struct pointer.
if v.Kind() != reflect.Ptr || v.IsNil() {
return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
}
v = v.Elem()
if v.Kind() != reflect.Struct {
return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
}
return StructValue{
spec: globalStructMap.get(v.Type()),
value: v,
}, nil
}
// Scan scans the results from a key-value Redis map result set to a destination struct.
// The Redis keys are matched to the struct's field with the `redis` tag.
func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
if len(keys) != len(vals) {
return errors.New("args should have the same number of keys and vals")
}
strct, err := Struct(dst)
if err != nil {
return err
}
// Iterate through the (key, value) sequence.
for i := 0; i < len(vals); i++ {
key, ok := keys[i].(string)
if !ok {
continue
}
val, ok := vals[i].(string)
if !ok {
continue
}
if err := strct.Scan(key, val); err != nil {
return err
}
}
return nil
}
func decodeBool(f reflect.Value, s string) error {
b, err := strconv.ParseBool(s)
if err != nil {
return err
}
f.SetBool(b)
return nil
}
func decodeInt8(f reflect.Value, s string) error {
return decodeNumber(f, s, 8)
}
func decodeInt16(f reflect.Value, s string) error {
return decodeNumber(f, s, 16)
}
func decodeInt32(f reflect.Value, s string) error {
return decodeNumber(f, s, 32)
}
func decodeInt64(f reflect.Value, s string) error {
return decodeNumber(f, s, 64)
}
func decodeInt(f reflect.Value, s string) error {
return decodeNumber(f, s, 0)
}
func decodeNumber(f reflect.Value, s string, bitSize int) error {
v, err := strconv.ParseInt(s, 10, bitSize)
if err != nil {
return err
}
f.SetInt(v)
return nil
}
func decodeUint8(f reflect.Value, s string) error {
return decodeUnsignedNumber(f, s, 8)
}
func decodeUint16(f reflect.Value, s string) error {
return decodeUnsignedNumber(f, s, 16)
}
func decodeUint32(f reflect.Value, s string) error {
return decodeUnsignedNumber(f, s, 32)
}
func decodeUint64(f reflect.Value, s string) error {
return decodeUnsignedNumber(f, s, 64)
}
func decodeUint(f reflect.Value, s string) error {
return decodeUnsignedNumber(f, s, 0)
}
func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
v, err := strconv.ParseUint(s, 10, bitSize)
if err != nil {
return err
}
f.SetUint(v)
return nil
}
func decodeFloat32(f reflect.Value, s string) error {
v, err := strconv.ParseFloat(s, 32)
if err != nil {
return err
}
f.SetFloat(v)
return nil
}
// although the default is float64, but we better define it.
func decodeFloat64(f reflect.Value, s string) error {
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
f.SetFloat(v)
return nil
}
func decodeString(f reflect.Value, s string) error {
f.SetString(s)
return nil
}
func decodeSlice(f reflect.Value, s string) error {
// []byte slice ([]uint8).
if f.Type().Elem().Kind() == reflect.Uint8 {
f.SetBytes([]byte(s))
}
return nil
}
func decodeUnsupported(v reflect.Value, s string) error {
return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
}

View File

@ -0,0 +1,93 @@
package hscan
import (
"fmt"
"reflect"
"strings"
"sync"
)
// structMap contains the map of struct fields for target structs
// indexed by the struct type.
type structMap struct {
m sync.Map
}
func newStructMap() *structMap {
return new(structMap)
}
func (s *structMap) get(t reflect.Type) *structSpec {
if v, ok := s.m.Load(t); ok {
return v.(*structSpec)
}
spec := newStructSpec(t, "redis")
s.m.Store(t, spec)
return spec
}
//------------------------------------------------------------------------------
// structSpec contains the list of all fields in a target struct.
type structSpec struct {
m map[string]*structField
}
func (s *structSpec) set(tag string, sf *structField) {
s.m[tag] = sf
}
func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
numField := t.NumField()
out := &structSpec{
m: make(map[string]*structField, numField),
}
for i := 0; i < numField; i++ {
f := t.Field(i)
tag := f.Tag.Get(fieldTag)
if tag == "" || tag == "-" {
continue
}
tag = strings.Split(tag, ",")[0]
if tag == "" {
continue
}
// Use the built-in decoder.
out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
}
return out
}
//------------------------------------------------------------------------------
// structField represents a single field in a target struct.
type structField struct {
index int
fn decoderFunc
}
//------------------------------------------------------------------------------
type StructValue struct {
spec *structSpec
value reflect.Value
}
func (s StructValue) Scan(key string, value string) error {
field, ok := s.spec.m[key]
if !ok {
return nil
}
if err := field.fn(s.value.Field(field.index), value); err != nil {
t := s.value.Type()
return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
}
return nil
}

View File

@ -1,33 +0,0 @@
package internal
import (
"context"
"go.opentelemetry.io/otel/api/global"
"go.opentelemetry.io/otel/api/metric"
)
var (
// WritesCounter is a count of write commands performed.
WritesCounter metric.Int64Counter
// NewConnectionsCounter is a count of new connections.
NewConnectionsCounter metric.Int64Counter
)
func init() {
defer func() {
if r := recover(); r != nil {
Logger.Printf(context.Background(), "Error creating meter github.com/go-redis/redis for Instruments", r)
}
}()
meter := metric.Must(global.Meter("github.com/go-redis/redis"))
WritesCounter = meter.NewInt64Counter("redis.writes",
metric.WithDescription("the number of writes initiated"),
)
NewConnectionsCounter = meter.NewInt64Counter("redis.new_connections",
metric.WithDescription("the number of connections created"),
)
}

View File

@ -19,6 +19,8 @@ func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
_ = l.log.Output(2, fmt.Sprintf(format, v...))
}
// Logger calls Output to print to the stderr.
// Arguments are handled in the manner of fmt.Print.
var Logger Logging = &logger{
log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
}

View File

@ -7,9 +7,7 @@ import (
"sync/atomic"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/proto"
"go.opentelemetry.io/otel/api/trace"
)
var noDeadline = time.Time{}
@ -66,41 +64,28 @@ func (cn *Conn) RemoteAddr() net.Addr {
}
func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
return internal.WithSpan(ctx, "with_reader", func(ctx context.Context, span trace.Span) error {
if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
return internal.RecordError(ctx, err)
}
if err := fn(cn.rd); err != nil {
return internal.RecordError(ctx, err)
}
return nil
})
if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
return err
}
return fn(cn.rd)
}
func (cn *Conn) WithWriter(
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
) error {
return internal.WithSpan(ctx, "with_writer", func(ctx context.Context, span trace.Span) error {
if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
return internal.RecordError(ctx, err)
}
if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
return err
}
if cn.bw.Buffered() > 0 {
cn.bw.Reset(cn.netConn)
}
if cn.bw.Buffered() > 0 {
cn.bw.Reset(cn.netConn)
}
if err := fn(cn.wr); err != nil {
return internal.RecordError(ctx, err)
}
if err := fn(cn.wr); err != nil {
return err
}
if err := cn.bw.Flush(); err != nil {
return internal.RecordError(ctx, err)
}
internal.WritesCounter.Add(ctx, 1)
return nil
})
return cn.bw.Flush()
}
func (cn *Conn) Close() error {

View File

@ -12,7 +12,10 @@ import (
)
var (
ErrClosed = errors.New("redis: client is closed")
// ErrClosed performs any operation on the closed client will return this error.
ErrClosed = errors.New("redis: client is closed")
// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
ErrPoolTimeout = errors.New("redis: connection pool timeout")
)
@ -54,6 +57,7 @@ type Options struct {
Dialer func(context.Context) (net.Conn, error)
OnClose func(*Conn) error
PoolFIFO bool
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
@ -117,9 +121,10 @@ func (p *ConnPool) checkMinIdleConns() {
for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
p.poolSize++
p.idleConnsLen++
go func() {
err := p.addIdleConn()
if err != nil {
if err != nil && err != ErrClosed {
p.connsMu.Lock()
p.poolSize--
p.idleConnsLen--
@ -136,9 +141,16 @@ func (p *ConnPool) addIdleConn() error {
}
p.connsMu.Lock()
defer p.connsMu.Unlock()
// It is not allowed to add new connections to the closed connection pool.
if p.closed() {
_ = cn.Close()
return ErrClosed
}
p.conns = append(p.conns, cn)
p.idleConns = append(p.idleConns, cn)
p.connsMu.Unlock()
return nil
}
@ -153,6 +165,14 @@ func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
}
p.connsMu.Lock()
defer p.connsMu.Unlock()
// It is not allowed to add new connections to the closed connection pool.
if p.closed() {
_ = cn.Close()
return nil, ErrClosed
}
p.conns = append(p.conns, cn)
if pooled {
// If pool is full remove the cn on next Put.
@ -162,7 +182,6 @@ func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
p.poolSize++
}
}
p.connsMu.Unlock()
return cn, nil
}
@ -185,7 +204,6 @@ func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
return nil, err
}
internal.NewConnectionsCounter.Add(ctx, 1)
cn := NewConn(netConn)
cn.pooled = pooled
return cn, nil
@ -228,16 +246,19 @@ func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
return nil, ErrClosed
}
err := p.waitTurn(ctx)
if err != nil {
if err := p.waitTurn(ctx); err != nil {
return nil, err
}
for {
p.connsMu.Lock()
cn := p.popIdle()
cn, err := p.popIdle()
p.connsMu.Unlock()
if err != nil {
return nil, err
}
if cn == nil {
break
}
@ -306,17 +327,28 @@ func (p *ConnPool) freeTurn() {
<-p.queue
}
func (p *ConnPool) popIdle() *Conn {
if len(p.idleConns) == 0 {
return nil
func (p *ConnPool) popIdle() (*Conn, error) {
if p.closed() {
return nil, ErrClosed
}
n := len(p.idleConns)
if n == 0 {
return nil, nil
}
idx := len(p.idleConns) - 1
cn := p.idleConns[idx]
p.idleConns = p.idleConns[:idx]
var cn *Conn
if p.opt.PoolFIFO {
cn = p.idleConns[0]
copy(p.idleConns, p.idleConns[1:])
p.idleConns = p.idleConns[:n-1]
} else {
idx := n - 1
cn = p.idleConns[idx]
p.idleConns = p.idleConns[:idx]
}
p.idleConnsLen--
p.checkMinIdleConns()
return cn
return cn, nil
}
func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
@ -477,6 +509,7 @@ func (p *ConnPool) ReapStaleConns() (int, error) {
p.connsMu.Lock()
cn := p.reapStaleConn()
p.connsMu.Unlock()
p.freeTurn()
if cn != nil {

View File

@ -172,8 +172,7 @@ func (p *StickyConnPool) Reset(ctx context.Context) error {
func (p *StickyConnPool) badConnError() error {
if v := p._badConnError.Load(); v != nil {
err := v.(BadConnError)
if err.wrapped != nil {
if err := v.(BadConnError); err.wrapped != nil {
return err
}
}

View File

@ -8,6 +8,7 @@ import (
"github.com/go-redis/redis/v8/internal/util"
)
// redis resp protocol data type.
const (
ErrorReply = '-'
StatusReply = '+'
@ -18,7 +19,7 @@ const (
//------------------------------------------------------------------------------
const Nil = RedisError("redis: nil")
const Nil = RedisError("redis: nil") // nolint:errname
type RedisError string
@ -83,7 +84,7 @@ func (r *Reader) readLine() ([]byte, error) {
return nil, err
}
full = append(full, b...)
full = append(full, b...) //nolint:makezero
b = full
}
if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {

View File

@ -10,7 +10,7 @@ import (
)
// Scan parses bytes `b` to `v` with appropriate type.
// nolint: gocyclo
//nolint:gocyclo
func Scan(b []byte, v interface{}) error {
switch v := v.(type) {
case nil:
@ -106,6 +106,13 @@ func Scan(b []byte, v interface{}) error {
var err error
*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
return err
case *time.Duration:
n, err := util.ParseInt(b, 10, 64)
if err != nil {
return err
}
*v = time.Duration(n)
return nil
case encoding.BinaryUnmarshaler:
return v.UnmarshalBinary(b)
default:
@ -131,7 +138,7 @@ func ScanSlice(data []string, slice interface{}) error {
for i, s := range data {
elem := next()
if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
return err
}
}

View File

@ -98,6 +98,8 @@ func (w *Writer) WriteArg(v interface{}) error {
case time.Time:
w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
return w.bytes(w.numBuf)
case time.Duration:
return w.int(v.Nanoseconds())
case encoding.BinaryMarshaler:
b, err := v.MarshalBinary()
if err != nil {

View File

@ -43,3 +43,8 @@ func (s *source) Seed(seed int64) {
s.src.Seed(seed)
s.mu.Unlock()
}
// Shuffle pseudo-randomizes the order of elements.
// n is the number of elements.
// swap swaps the elements with indexes i and j.
func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }

View File

@ -1,3 +1,4 @@
//go:build appengine
// +build appengine
package internal

View File

@ -1,3 +1,4 @@
//go:build !appengine
// +build !appengine
package internal

View File

@ -4,24 +4,19 @@ import (
"context"
"time"
"github.com/go-redis/redis/v8/internal/proto"
"github.com/go-redis/redis/v8/internal/util"
"go.opentelemetry.io/otel/api/global"
"go.opentelemetry.io/otel/api/trace"
)
func Sleep(ctx context.Context, dur time.Duration) error {
return WithSpan(ctx, "sleep", func(ctx context.Context, span trace.Span) error {
t := time.NewTimer(dur)
defer t.Stop()
t := time.NewTimer(dur)
defer t.Stop()
select {
case <-t.C:
return nil
case <-ctx.Done():
return ctx.Err()
}
})
select {
case <-t.C:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func ToLower(s string) string {
@ -49,33 +44,3 @@ func isLower(s string) bool {
}
return true
}
func Unwrap(err error) error {
u, ok := err.(interface {
Unwrap() error
})
if !ok {
return nil
}
return u.Unwrap()
}
//------------------------------------------------------------------------------
func WithSpan(ctx context.Context, name string, fn func(context.Context, trace.Span) error) error {
if span := trace.SpanFromContext(ctx); !span.IsRecording() {
return fn(ctx, span)
}
ctx, span := global.Tracer("github.com/go-redis/redis").Start(ctx, name)
defer span.End()
return fn(ctx, span)
}
func RecordError(ctx context.Context, err error) error {
if err != proto.Nil {
trace.SpanFromContext(ctx).RecordError(ctx, err)
}
return err
}

View File

@ -1,3 +1,4 @@
//go:build appengine
// +build appengine
package util

View File

@ -1,3 +1,4 @@
//go:build !appengine
// +build !appengine
package util

View File

@ -8,14 +8,12 @@ import (
"net"
"net/url"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/pool"
"go.opentelemetry.io/otel/api/trace"
"go.opentelemetry.io/otel/label"
)
// Limiter is the interface of a rate limiter or a circuit breaker.
@ -58,7 +56,7 @@ type Options struct {
DB int
// Maximum number of retries before giving up.
// Default is 3 retries.
// Default is 3 retries; -1 (not 0) disables retries.
MaxRetries int
// Minimum backoff between each retry.
// Default is 8 milliseconds; -1 disables backoff.
@ -79,8 +77,12 @@ type Options struct {
// Default is ReadTimeout.
WriteTimeout time.Duration
// Type of connection pool.
// true for FIFO pool, false for LIFO pool.
// Note that fifo has higher overhead compared to lifo.
PoolFIFO bool
// Maximum number of socket connections.
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
PoolSize int
// Minimum number of idle connections which is useful when establishing
// new connection is slow.
@ -139,7 +141,7 @@ func (opt *Options) init() {
}
}
if opt.PoolSize == 0 {
opt.PoolSize = 10 * runtime.NumCPU()
opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
}
switch opt.ReadTimeout {
case -1:
@ -191,9 +193,32 @@ func (opt *Options) clone() *Options {
// Scheme is required.
// There are two connection types: by tcp socket and by unix socket.
// Tcp connection:
// redis://<user>:<password>@<host>:<port>/<db_number>
// redis://<user>:<password>@<host>:<port>/<db_number>
// Unix connection:
// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
// Most Option fields can be set using query parameters, with the following restrictions:
// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
// - only scalar type fields are supported (bool, int, time.Duration)
// - for time.Duration fields, values must be a valid input for time.ParseDuration();
// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
// - to disable a duration field, use value less than or equal to 0; to use the default
// value, leave the value blank or remove the parameter
// - only the last value is interpreted if a parameter is given multiple times
// - fields "network", "addr", "username" and "password" can only be set using other
// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
// names will be treated as unknown parameters
// - unknown parameter names will result in an error
// Examples:
// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
// is equivalent to:
// &Options{
// Network: "tcp",
// Addr: "localhost:6789",
// DB: 1, // path "/3" was overridden by "&db=1"
// DialTimeout: 3 * time.Second, // no time unit = seconds
// ReadTimeout: 6 * time.Second,
// MaxRetries: 2,
// }
func ParseURL(redisURL string) (*Options, error) {
u, err := url.Parse(redisURL)
if err != nil {
@ -215,10 +240,6 @@ func setupTCPConn(u *url.URL) (*Options, error) {
o.Username, o.Password = getUserPassword(u)
if len(u.Query()) > 0 {
return nil, errors.New("redis: no options supported")
}
h, p, err := net.SplitHostPort(u.Host)
if err != nil {
h = u.Host
@ -249,7 +270,7 @@ func setupTCPConn(u *url.URL) (*Options, error) {
o.TLSConfig = &tls.Config{ServerName: h}
}
return o, nil
return setupConnParams(u, o)
}
func setupUnixConn(u *url.URL) (*Options, error) {
@ -261,19 +282,122 @@ func setupUnixConn(u *url.URL) (*Options, error) {
return nil, errors.New("redis: empty unix socket path")
}
o.Addr = u.Path
o.Username, o.Password = getUserPassword(u)
return setupConnParams(u, o)
}
dbStr := u.Query().Get("db")
if dbStr == "" {
return o, nil // if database is not set, connect to 0 db.
type queryOptions struct {
q url.Values
err error
}
func (o *queryOptions) string(name string) string {
vs := o.q[name]
if len(vs) == 0 {
return ""
}
delete(o.q, name) // enable detection of unknown parameters
return vs[len(vs)-1]
}
func (o *queryOptions) int(name string) int {
s := o.string(name)
if s == "" {
return 0
}
i, err := strconv.Atoi(s)
if err == nil {
return i
}
if o.err == nil {
o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
}
return 0
}
func (o *queryOptions) duration(name string) time.Duration {
s := o.string(name)
if s == "" {
return 0
}
// try plain number first
if i, err := strconv.Atoi(s); err == nil {
if i <= 0 {
// disable timeouts
return -1
}
return time.Duration(i) * time.Second
}
dur, err := time.ParseDuration(s)
if err == nil {
return dur
}
if o.err == nil {
o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
}
return 0
}
func (o *queryOptions) bool(name string) bool {
switch s := o.string(name); s {
case "true", "1":
return true
case "false", "0", "":
return false
default:
if o.err == nil {
o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
}
return false
}
}
func (o *queryOptions) remaining() []string {
if len(o.q) == 0 {
return nil
}
keys := make([]string, 0, len(o.q))
for k := range o.q {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// setupConnParams converts query parameters in u to option value in o.
func setupConnParams(u *url.URL, o *Options) (*Options, error) {
q := queryOptions{q: u.Query()}
// compat: a future major release may use q.int("db")
if tmp := q.string("db"); tmp != "" {
db, err := strconv.Atoi(tmp)
if err != nil {
return nil, fmt.Errorf("redis: invalid database number: %w", err)
}
o.DB = db
}
db, err := strconv.Atoi(dbStr)
if err != nil {
return nil, fmt.Errorf("redis: invalid database number: %s", err)
o.MaxRetries = q.int("max_retries")
o.MinRetryBackoff = q.duration("min_retry_backoff")
o.MaxRetryBackoff = q.duration("max_retry_backoff")
o.DialTimeout = q.duration("dial_timeout")
o.ReadTimeout = q.duration("read_timeout")
o.WriteTimeout = q.duration("write_timeout")
o.PoolFIFO = q.bool("pool_fifo")
o.PoolSize = q.int("pool_size")
o.MinIdleConns = q.int("min_idle_conns")
o.MaxConnAge = q.duration("max_conn_age")
o.PoolTimeout = q.duration("pool_timeout")
o.IdleTimeout = q.duration("idle_timeout")
o.IdleCheckFrequency = q.duration("idle_check_frequency")
if q.err != nil {
return nil, q.err
}
// any parameters left?
if r := q.remaining(); len(r) > 0 {
return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
}
o.DB = db
return o, nil
}
@ -292,21 +416,9 @@ func getUserPassword(u *url.URL) (string, string) {
func newConnPool(opt *Options) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: func(ctx context.Context) (net.Conn, error) {
var conn net.Conn
err := internal.WithSpan(ctx, "dialer", func(ctx context.Context, span trace.Span) error {
var err error
span.SetAttributes(
label.String("redis.network", opt.Network),
label.String("redis.addr", opt.Addr),
)
conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
if err != nil {
_ = internal.RecordError(ctx, err)
}
return err
})
return conn, err
return opt.Dialer(ctx, opt.Network, opt.Addr)
},
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,

8
vendor/github.com/go-redis/redis/v8/package.json generated vendored Normal file
View File

@ -0,0 +1,8 @@
{
"name": "redis",
"version": "8.11.5",
"main": "index.js",
"repository": "git@github.com:go-redis/redis.git",
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
"license": "BSD-2-clause"
}

View File

@ -24,6 +24,7 @@ type pipelineExecer func(context.Context, []Cmder) error
// depends of your batch size and/or use TxPipeline.
type Pipeliner interface {
StatefulCmdable
Len() int
Do(ctx context.Context, args ...interface{}) *Cmd
Process(ctx context.Context, cmd Cmder) error
Close() error
@ -53,6 +54,15 @@ func (c *Pipeline) init() {
c.statefulCmdable = c.Process
}
// Len returns the number of queued commands.
func (c *Pipeline) Len() int {
c.mu.Lock()
ln := len(c.cmds)
c.mu.Unlock()
return ln
}
// Do queues the custom command for later execution.
func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)

View File

@ -2,7 +2,6 @@ package redis
import (
"context"
"errors"
"fmt"
"strings"
"sync"
@ -13,13 +12,6 @@ import (
"github.com/go-redis/redis/v8/internal/proto"
)
const (
pingTimeout = time.Second
chanSendTimeout = time.Minute
)
var errPingTimeout = errors.New("redis: ping timeout")
// PubSub implements Pub/Sub commands as described in
// http://redis.io/topics/pubsub. Message receiving is NOT safe
// for concurrent use by multiple goroutines.
@ -43,9 +35,12 @@ type PubSub struct {
cmd *Cmd
chOnce sync.Once
msgCh chan *Message
allCh chan interface{}
ping chan struct{}
msgCh *channel
allCh *channel
}
func (c *PubSub) init() {
c.exit = make(chan struct{})
}
func (c *PubSub) String() string {
@ -54,10 +49,6 @@ func (c *PubSub) String() string {
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
}
func (c *PubSub) init() {
c.exit = make(chan struct{})
}
func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
c.mu.Lock()
cn, err := c.conn(ctx, nil)
@ -150,7 +141,7 @@ func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allo
if c.cn != cn {
return
}
if isBadConn(err, allowTimeout) {
if isBadConn(err, allowTimeout, c.opt.Addr) {
c.reconnect(ctx, err)
}
}
@ -261,13 +252,16 @@ func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
}
cmd := NewCmd(ctx, args...)
cn, err := c.connWithLock(ctx)
c.mu.Lock()
defer c.mu.Unlock()
cn, err := c.conn(ctx, nil)
if err != nil {
return err
}
err = c.writeCmd(ctx, cn, cmd)
c.releaseConnWithLock(ctx, cn, err, false)
c.releaseConn(ctx, cn, err, false)
return err
}
@ -370,6 +364,8 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int
c.cmd = NewCmd(ctx)
}
// Don't hold the lock to allow subscriptions and pings.
cn, err := c.connWithLock(ctx)
if err != nil {
return nil, err
@ -380,6 +376,7 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int
})
c.releaseConnWithLock(ctx, cn, err, timeout > 0)
if err != nil {
return nil, err
}
@ -418,56 +415,6 @@ func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
}
}
// Channel returns a Go channel for concurrently receiving messages.
// The channel is closed together with the PubSub. If the Go channel
// is blocked full for 30 seconds the message is dropped.
// Receive* APIs can not be used after channel is created.
//
// go-redis periodically sends ping messages to test connection health
// and re-subscribes if ping can not not received for 30 seconds.
func (c *PubSub) Channel() <-chan *Message {
return c.ChannelSize(100)
}
// ChannelSize is like Channel, but creates a Go channel
// with specified buffer size.
func (c *PubSub) ChannelSize(size int) <-chan *Message {
c.chOnce.Do(func() {
c.initPing()
c.initMsgChan(size)
})
if c.msgCh == nil {
err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
panic(err)
}
if cap(c.msgCh) != size {
err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
panic(err)
}
return c.msgCh
}
// ChannelWithSubscriptions is like Channel, but message type can be either
// *Subscription or *Message. Subscription messages can be used to detect
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} {
c.chOnce.Do(func() {
c.initPing()
c.initAllChan(size)
})
if c.allCh == nil {
err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
panic(err)
}
if cap(c.allCh) != size {
err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
panic(err)
}
return c.allCh
}
func (c *PubSub) getContext() context.Context {
if c.cmd != nil {
return c.cmd.ctx
@ -475,36 +422,135 @@ func (c *PubSub) getContext() context.Context {
return context.Background()
}
func (c *PubSub) initPing() {
//------------------------------------------------------------------------------
// Channel returns a Go channel for concurrently receiving messages.
// The channel is closed together with the PubSub. If the Go channel
// is blocked full for 30 seconds the message is dropped.
// Receive* APIs can not be used after channel is created.
//
// go-redis periodically sends ping messages to test connection health
// and re-subscribes if ping can not not received for 30 seconds.
func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
c.chOnce.Do(func() {
c.msgCh = newChannel(c, opts...)
c.msgCh.initMsgChan()
})
if c.msgCh == nil {
err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
panic(err)
}
return c.msgCh.msgCh
}
// ChannelSize is like Channel, but creates a Go channel
// with specified buffer size.
//
// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
func (c *PubSub) ChannelSize(size int) <-chan *Message {
return c.Channel(WithChannelSize(size))
}
// ChannelWithSubscriptions is like Channel, but message type can be either
// *Subscription or *Message. Subscription messages can be used to detect
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
c.chOnce.Do(func() {
c.allCh = newChannel(c, WithChannelSize(size))
c.allCh.initAllChan()
})
if c.allCh == nil {
err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
panic(err)
}
return c.allCh.allCh
}
type ChannelOption func(c *channel)
// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
//
// The default is 100 messages.
func WithChannelSize(size int) ChannelOption {
return func(c *channel) {
c.chanSize = size
}
}
// WithChannelHealthCheckInterval specifies the health check interval.
// PubSub will ping Redis Server if it does not receive any messages within the interval.
// To disable health check, use zero interval.
//
// The default is 3 seconds.
func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
return func(c *channel) {
c.checkInterval = d
}
}
// WithChannelSendTimeout specifies the channel send timeout after which
// the message is dropped.
//
// The default is 60 seconds.
func WithChannelSendTimeout(d time.Duration) ChannelOption {
return func(c *channel) {
c.chanSendTimeout = d
}
}
type channel struct {
pubSub *PubSub
msgCh chan *Message
allCh chan interface{}
ping chan struct{}
chanSize int
chanSendTimeout time.Duration
checkInterval time.Duration
}
func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
c := &channel{
pubSub: pubSub,
chanSize: 100,
chanSendTimeout: time.Minute,
checkInterval: 3 * time.Second,
}
for _, opt := range opts {
opt(c)
}
if c.checkInterval > 0 {
c.initHealthCheck()
}
return c
}
func (c *channel) initHealthCheck() {
ctx := context.TODO()
c.ping = make(chan struct{}, 1)
go func() {
timer := time.NewTimer(time.Minute)
timer.Stop()
healthy := true
for {
timer.Reset(pingTimeout)
timer.Reset(c.checkInterval)
select {
case <-c.ping:
healthy = true
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
pingErr := c.Ping(ctx)
if healthy {
healthy = false
} else {
if pingErr == nil {
pingErr = errPingTimeout
}
c.mu.Lock()
c.reconnect(ctx, pingErr)
healthy = true
c.mu.Unlock()
if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
c.pubSub.mu.Lock()
c.pubSub.reconnect(ctx, pingErr)
c.pubSub.mu.Unlock()
}
case <-c.exit:
case <-c.pubSub.exit:
return
}
}
@ -512,16 +558,17 @@ func (c *PubSub) initPing() {
}
// initMsgChan must be in sync with initAllChan.
func (c *PubSub) initMsgChan(size int) {
func (c *channel) initMsgChan() {
ctx := context.TODO()
c.msgCh = make(chan *Message, size)
c.msgCh = make(chan *Message, c.chanSize)
go func() {
timer := time.NewTimer(time.Minute)
timer.Stop()
var errCount int
for {
msg, err := c.Receive(ctx)
msg, err := c.pubSub.Receive(ctx)
if err != nil {
if err == pool.ErrClosed {
close(c.msgCh)
@ -548,7 +595,7 @@ func (c *PubSub) initMsgChan(size int) {
case *Pong:
// Ignore.
case *Message:
timer.Reset(chanSendTimeout)
timer.Reset(c.chanSendTimeout)
select {
case c.msgCh <- msg:
if !timer.Stop() {
@ -556,30 +603,28 @@ func (c *PubSub) initMsgChan(size int) {
}
case <-timer.C:
internal.Logger.Printf(
c.getContext(),
"redis: %s channel is full for %s (message is dropped)",
c,
chanSendTimeout,
)
ctx, "redis: %s channel is full for %s (message is dropped)",
c, c.chanSendTimeout)
}
default:
internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
}
}
}()
}
// initAllChan must be in sync with initMsgChan.
func (c *PubSub) initAllChan(size int) {
func (c *channel) initAllChan() {
ctx := context.TODO()
c.allCh = make(chan interface{}, size)
c.allCh = make(chan interface{}, c.chanSize)
go func() {
timer := time.NewTimer(pingTimeout)
timer := time.NewTimer(time.Minute)
timer.Stop()
var errCount int
for {
msg, err := c.Receive(ctx)
msg, err := c.pubSub.Receive(ctx)
if err != nil {
if err == pool.ErrClosed {
close(c.allCh)
@ -601,29 +646,23 @@ func (c *PubSub) initAllChan(size int) {
}
switch msg := msg.(type) {
case *Subscription:
c.sendMessage(msg, timer)
case *Pong:
// Ignore.
case *Message:
c.sendMessage(msg, timer)
case *Subscription, *Message:
timer.Reset(c.chanSendTimeout)
select {
case c.allCh <- msg:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
internal.Logger.Printf(
ctx, "redis: %s channel is full for %s (message is dropped)",
c, c.chanSendTimeout)
}
default:
internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
}
}
}()
}
func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
timer.Reset(pingTimeout)
select {
case c.allCh <- msg:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
internal.Logger.Printf(
c.getContext(),
"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
}
}

View File

@ -2,14 +2,14 @@ package redis
import (
"context"
"errors"
"fmt"
"sync/atomic"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/proto"
"go.opentelemetry.io/otel/api/trace"
"go.opentelemetry.io/otel/label"
)
// Nil reply returned by Redis when key does not exist.
@ -51,9 +51,7 @@ func (hs hooks) process(
ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
) error {
if len(hs.hooks) == 0 {
err := hs.withContext(ctx, func() error {
return fn(ctx, cmd)
})
err := fn(ctx, cmd)
cmd.SetErr(err)
return err
}
@ -69,9 +67,7 @@ func (hs hooks) process(
}
if retErr == nil {
retErr = hs.withContext(ctx, func() error {
return fn(ctx, cmd)
})
retErr = fn(ctx, cmd)
cmd.SetErr(retErr)
}
@ -89,9 +85,7 @@ func (hs hooks) processPipeline(
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
) error {
if len(hs.hooks) == 0 {
err := hs.withContext(ctx, func() error {
return fn(ctx, cmds)
})
err := fn(ctx, cmds)
return err
}
@ -106,9 +100,7 @@ func (hs hooks) processPipeline(
}
if retErr == nil {
retErr = hs.withContext(ctx, func() error {
return fn(ctx, cmds)
})
retErr = fn(ctx, cmds)
}
for hookIndex--; hookIndex >= 0; hookIndex-- {
@ -128,23 +120,6 @@ func (hs hooks) processTxPipeline(
return hs.processPipeline(ctx, cmds, fn)
}
func (hs hooks) withContext(ctx context.Context, fn func() error) error {
done := ctx.Done()
if done == nil {
return fn()
}
errc := make(chan error, 1)
go func() { errc <- fn() }()
select {
case <-done:
return ctx.Err()
case err := <-errc:
return err
}
}
//------------------------------------------------------------------------------
type baseClient struct {
@ -225,12 +200,9 @@ func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
return cn, nil
}
err = internal.WithSpan(ctx, "init_conn", func(ctx context.Context, span trace.Span) error {
return c.initConn(ctx, cn)
})
if err != nil {
if err := c.initConn(ctx, cn); err != nil {
c.connPool.Remove(ctx, cn, err)
if err := internal.Unwrap(err); err != nil {
if err := errors.Unwrap(err); err != nil {
return nil, err
}
return nil, err
@ -289,7 +261,7 @@ func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error)
c.opt.Limiter.ReportResult(err)
}
if isBadConn(err, false) {
if isBadConn(err, false, c.opt.Addr) {
c.connPool.Remove(ctx, cn, err)
} else {
c.connPool.Put(ctx, cn)
@ -299,25 +271,36 @@ func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error)
func (c *baseClient) withConn(
ctx context.Context, fn func(context.Context, *pool.Conn) error,
) error {
return internal.WithSpan(ctx, "with_conn", func(ctx context.Context, span trace.Span) error {
cn, err := c.getConn(ctx)
if err != nil {
return err
}
cn, err := c.getConn(ctx)
if err != nil {
return err
}
if span.IsRecording() {
if remoteAddr := cn.RemoteAddr(); remoteAddr != nil {
span.SetAttributes(label.String("net.peer.ip", remoteAddr.String()))
}
}
defer func() {
c.releaseConn(ctx, cn, err)
}()
defer func() {
c.releaseConn(ctx, cn, err)
}()
done := ctx.Done() //nolint:ifshort
if done == nil {
err = fn(ctx, cn)
return err
})
}
errc := make(chan error, 1)
go func() { errc <- fn(ctx, cn) }()
select {
case <-done:
_ = cn.Close()
// Wait for the goroutine to finish and send something.
<-errc
err = ctx.Err()
return err
case err = <-errc:
return err
}
}
func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
@ -325,45 +308,50 @@ func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
attempt := attempt
var retry bool
err := internal.WithSpan(ctx, "process", func(ctx context.Context, span trace.Span) error {
if attempt > 0 {
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
return err
}
}
retryTimeout := true
err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
})
if err != nil {
return err
}
err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
if err != nil {
retryTimeout = cmd.readTimeout() == nil
return err
}
return nil
})
if err == nil {
return nil
}
retry = shouldRetry(err, retryTimeout)
return err
})
retry, err := c._process(ctx, cmd, attempt)
if err == nil || !retry {
return err
}
lastErr = err
}
return lastErr
}
func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
if attempt > 0 {
if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
return false, err
}
}
retryTimeout := uint32(1)
err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
})
if err != nil {
return err
}
err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
if err != nil {
if cmd.readTimeout() == nil {
atomic.StoreUint32(&retryTimeout, 1)
}
return err
}
return nil
})
if err == nil {
return false, nil
}
retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
return retry, err
}
func (c *baseClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
@ -722,7 +710,9 @@ type conn struct {
hooks // TODO: inherit hooks
}
// Conn is like Client, but its pool contains single connection.
// Conn represents a single Redis connection rather than a pool of connections.
// Prefer running commands from Client unless there is a specific need
// for a continuous single Redis connection.
type Conn struct {
*conn
ctx context.Context

View File

@ -1,5 +0,0 @@
{
"extends": [
"config:base"
]
}

View File

@ -12,7 +12,8 @@ import (
"time"
"github.com/cespare/xxhash/v2"
"github.com/dgryski/go-rendezvous"
rendezvous "github.com/dgryski/go-rendezvous" //nolint
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/hashtag"
"github.com/go-redis/redis/v8/internal/pool"
@ -78,6 +79,9 @@ type RingOptions struct {
ReadTimeout time.Duration
WriteTimeout time.Duration
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
@ -138,6 +142,7 @@ func (opt *RingOptions) clientOptions() *Options {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
@ -571,7 +576,7 @@ func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
}
info := cmdsInfo[name]
if info == nil {
internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
}
return info
}

View File

@ -8,7 +8,7 @@ import (
"strings"
)
type scripter interface {
type Scripter interface {
Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
@ -16,9 +16,9 @@ type scripter interface {
}
var (
_ scripter = (*Client)(nil)
_ scripter = (*Ring)(nil)
_ scripter = (*ClusterClient)(nil)
_ Scripter = (*Client)(nil)
_ Scripter = (*Ring)(nil)
_ Scripter = (*ClusterClient)(nil)
)
type Script struct {
@ -38,25 +38,25 @@ func (s *Script) Hash() string {
return s.hash
}
func (s *Script) Load(ctx context.Context, c scripter) *StringCmd {
func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
return c.ScriptLoad(ctx, s.src)
}
func (s *Script) Exists(ctx context.Context, c scripter) *BoolSliceCmd {
func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
return c.ScriptExists(ctx, s.hash)
}
func (s *Script) Eval(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
return c.Eval(ctx, s.src, keys, args...)
}
func (s *Script) EvalSha(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
return c.EvalSha(ctx, s.hash, keys, args...)
}
// Run optimistically uses EVALSHA to run the script. If script does not exist
// it is retried using EVAL.
func (s *Script) Run(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
r := s.EvalSha(ctx, c, keys, args...)
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
return s.Eval(ctx, c, keys, args...)

View File

@ -23,7 +23,13 @@ type FailoverOptions struct {
MasterName string
// A seed list of host:port addresses of sentinel nodes.
SentinelAddrs []string
// Sentinel password from "requirepass <password>" (if enabled) in Sentinel configuration
// If specified with SentinelPassword, enables ACL-based authentication (via
// AUTH <user> <pass>).
SentinelUsername string
// Sentinel password from "requirepass <password>" (if enabled) in Sentinel
// configuration, or, if SentinelUsername is also supplied, used for ACL-based
// authentication.
SentinelPassword string
// Allows routing read-only commands to the closest master or slave node.
@ -36,6 +42,10 @@ type FailoverOptions struct {
// Route all commands to slave read-only nodes.
SlaveOnly bool
// Use slaves disconnected with master when cannot get connected slaves
// Now, this option only works in RandomSlaveAddr function.
UseDisconnectedSlaves bool
// Following options are copied from Options struct.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
@ -53,6 +63,9 @@ type FailoverOptions struct {
ReadTimeout time.Duration
WriteTimeout time.Duration
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
@ -82,6 +95,7 @@ func (opt *FailoverOptions) clientOptions() *Options {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
@ -101,6 +115,7 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
OnConnect: opt.OnConnect,
DB: 0,
Username: opt.SentinelUsername,
Password: opt.SentinelPassword,
MaxRetries: opt.MaxRetries,
@ -111,6 +126,7 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
@ -142,6 +158,7 @@ func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
@ -167,6 +184,10 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
copy(sentinelAddrs, failoverOpt.SentinelAddrs)
rand.Shuffle(len(sentinelAddrs), func(i, j int) {
sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
})
failover := &sentinelFailover{
opt: failoverOpt,
sentinelAddrs: sentinelAddrs,
@ -177,11 +198,14 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
opt.init()
connPool := newConnPool(opt)
failover.mu.Lock()
failover.onFailover = func(ctx context.Context, addr string) {
_ = connPool.Filter(func(cn *pool.Conn) bool {
return cn.RemoteAddr().String() != addr
})
}
failover.mu.Unlock()
c := Client{
baseClient: newBaseClient(opt, connPool),
@ -208,14 +232,21 @@ func masterSlaveDialer(
failover.trySwitchMaster(ctx, addr)
}
}
if err != nil {
return nil, err
}
if failover.opt.Dialer != nil {
return failover.opt.Dialer(ctx, network, addr)
}
return net.DialTimeout("tcp", addr, failover.opt.DialTimeout)
netDialer := &net.Dialer{
Timeout: failover.opt.DialTimeout,
KeepAlive: 5 * time.Minute,
}
if failover.opt.TLSConfig == nil {
return netDialer.DialContext(ctx, network, addr)
}
return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
}
}
@ -430,10 +461,22 @@ func (c *sentinelFailover) closeSentinel() error {
}
func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
addresses, err := c.slaveAddrs(ctx)
if c.opt == nil {
return "", errors.New("opt is nil")
}
addresses, err := c.slaveAddrs(ctx, false)
if err != nil {
return "", err
}
if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
addresses, err = c.slaveAddrs(ctx, true)
if err != nil {
return "", err
}
}
if len(addresses) == 0 {
return c.MasterAddr(ctx)
}
@ -482,10 +525,10 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
return addr, nil
}
return "", errors.New("redis: all sentinels are unreachable")
return "", errors.New("redis: all sentinels specified in configuration are unreachable")
}
func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
c.mu.RLock()
sentinel := c.sentinel
c.mu.RUnlock()
@ -508,6 +551,8 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
_ = c.closeSentinel()
}
var sentinelReachable bool
for i, sentinelAddr := range c.sentinelAddrs {
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
@ -518,16 +563,22 @@ func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
_ = sentinel.Close()
continue
}
sentinelReachable = true
addrs := parseSlaveAddrs(slaves, useDisconnected)
if len(addrs) == 0 {
continue
}
// Push working sentinel to the top.
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
c.setSentinel(ctx, sentinel)
addrs := parseSlaveAddrs(slaves)
return addrs, nil
}
return []string{}, errors.New("redis: all sentinels are unreachable")
if sentinelReachable {
return []string{}, nil
}
return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
}
func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
@ -547,12 +598,11 @@ func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *Sentinel
c.opt.MasterName, err)
return []string{}
}
return parseSlaveAddrs(addrs)
return parseSlaveAddrs(addrs, false)
}
func parseSlaveAddrs(addrs []interface{}) []string {
func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
nodes := make([]string, 0, len(addrs))
for _, node := range addrs {
ip := ""
port := ""
@ -574,8 +624,12 @@ func parseSlaveAddrs(addrs []interface{}) []string {
for _, flag := range flags {
switch flag {
case "s_down", "o_down", "disconnected":
case "s_down", "o_down":
isDown = true
case "disconnected":
if !keepDisconnected {
isDown = true
}
}
}
@ -589,7 +643,7 @@ func parseSlaveAddrs(addrs []interface{}) []string {
func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
c.mu.RLock()
currentAddr := c._masterAddr
currentAddr := c._masterAddr //nolint:ifshort
c.mu.RUnlock()
if addr == currentAddr {
@ -630,15 +684,22 @@ func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
}
for _, sentinel := range sentinels {
vals := sentinel.([]interface{})
var ip, port string
for i := 0; i < len(vals); i += 2 {
key := vals[i].(string)
if key == "name" {
sentinelAddr := vals[i+1].(string)
if !contains(c.sentinelAddrs, sentinelAddr) {
internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
sentinelAddr, c.opt.MasterName)
c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
}
switch key {
case "ip":
ip = vals[i+1].(string)
case "port":
port = vals[i+1].(string)
}
}
if ip != "" && port != "" {
sentinelAddr := net.JoinHostPort(ip, port)
if !contains(c.sentinelAddrs, sentinelAddr) {
internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
sentinelAddr, c.opt.MasterName)
c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
}
}
}
@ -646,6 +707,7 @@ func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
func (c *sentinelFailover) listen(pubsub *PubSub) {
ctx := context.TODO()
if c.onUpdate != nil {
c.onUpdate(ctx)
}
@ -701,7 +763,7 @@ func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
Addr: masterAddr,
}}
slaveAddrs, err := failover.slaveAddrs(ctx)
slaveAddrs, err := failover.slaveAddrs(ctx, false)
if err != nil {
return nil, err
}
@ -723,9 +785,12 @@ func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
}
c := NewClusterClient(opt)
failover.mu.Lock()
failover.onUpdate = func(ctx context.Context) {
c.ReloadState(ctx)
}
failover.mu.Unlock()
return c
}

View File

@ -13,7 +13,8 @@ const TxFailedErr = proto.RedisError("redis: transaction failed")
// Tx implements Redis transactions as described in
// http://redis.io/topics/transactions. It's NOT safe for concurrent use
// by multiple goroutines, because Exec resets list of watched keys.
// If you don't need WATCH it is better to use Pipeline.
//
// If you don't need WATCH, use Pipeline instead.
type Tx struct {
baseClient
cmdable
@ -65,16 +66,13 @@ func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
// The transaction is automatically closed when fn exits.
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
tx := c.newTx(ctx)
defer tx.Close(ctx)
if len(keys) > 0 {
if err := tx.Watch(ctx, keys...).Err(); err != nil {
_ = tx.Close(ctx)
return err
}
}
err := fn(tx)
_ = tx.Close(ctx)
return err
return fn(tx)
}
// Close closes the transaction, releasing any open resources.

View File

@ -25,6 +25,7 @@ type UniversalOptions struct {
Username string
Password string
SentinelUsername string
SentinelPassword string
MaxRetries int
@ -35,6 +36,9 @@ type UniversalOptions struct {
ReadTimeout time.Duration
WriteTimeout time.Duration
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
PoolFIFO bool
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
@ -53,6 +57,7 @@ type UniversalOptions struct {
// The sentinel master name.
// Only failover clients.
MasterName string
}
@ -82,6 +87,7 @@ func (o *UniversalOptions) Cluster() *ClusterOptions {
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
@ -109,6 +115,7 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
DB: o.DB,
Username: o.Username,
Password: o.Password,
SentinelUsername: o.SentinelUsername,
SentinelPassword: o.SentinelPassword,
MaxRetries: o.MaxRetries,
@ -119,6 +126,7 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
@ -154,6 +162,7 @@ func (o *UniversalOptions) Simple() *Options {
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
@ -168,9 +177,9 @@ func (o *UniversalOptions) Simple() *Options {
// --------------------------------------------------------------------
// UniversalClient is an abstract client which - based on the provided options -
// can connect to either clusters, or sentinel-backed failover instances
// or simple single-instance servers. This can be useful for testing
// cluster-specific applications locally.
// represents either a ClusterClient, a FailoverClient, or a single-node Client.
// This can be useful for testing cluster-specific applications locally or having different
// clients in different environments.
type UniversalClient interface {
Cmdable
Context() context.Context
@ -190,12 +199,12 @@ var (
_ UniversalClient = (*Ring)(nil)
)
// NewUniversalClient returns a new multi client. The type of client returned depends
// on the following three conditions:
// NewUniversalClient returns a new multi client. The type of the returned client depends
// on the following conditions:
//
// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
// 2. if the number of Addrs is two or more, a ClusterClient will be returned
// 3. otherwise, a single-node redis Client will be returned.
// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
// 2. if the number of Addrs is two or more, a ClusterClient is returned.
// 3. Otherwise, a single-node Client is returned.
func NewUniversalClient(opts *UniversalOptions) UniversalClient {
if opts.MasterName != "" {
return NewFailoverClient(opts.Failover())

6
vendor/github.com/go-redis/redis/v8/version.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
package redis
// Version is the current release version.
func Version() string {
return "8.11.5"
}

View File

@ -1,22 +0,0 @@
.DS_Store
Thumbs.db
.tools/
.idea/
.vscode/
*.iml
*.so
coverage.*
gen/
/example/basic/basic
/example/grpc/client/client
/example/grpc/server/server
/example/http/client/client
/example/http/server/server
/example/jaeger/jaeger
/example/namedtracer/namedtracer
/example/prometheus/prometheus
/example/zipkin/zipkin
/example/otel-collector/otel-collector

View File

@ -1,3 +0,0 @@
[submodule "opentelemetry-proto"]
path = exporters/otlp/internal/opentelemetry-proto
url = https://github.com/open-telemetry/opentelemetry-proto

View File

@ -1,32 +0,0 @@
# See https://github.com/golangci/golangci-lint#config-file
run:
issues-exit-code: 1 #Default
tests: true #Default
linters:
enable:
- misspell
- goimports
- golint
- gofmt
issues:
exclude-rules:
# helpers in tests often (rightfully) pass a *testing.T as their first argument
- path: _test\.go
text: "context.Context should be the first parameter of a function"
linters:
- golint
# Yes, they are, but it's okay in a test
- path: _test\.go
text: "exported func.*returns unexported type.*which can be annoying to use"
linters:
- golint
linters-settings:
misspell:
locale: US
ignore-words:
- cancelled
goimports:
local-prefixes: go.opentelemetry.io

View File

@ -1,922 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.13.0] - 2020-10-08
### Added
- OTLP Metric exporter supports Histogram aggregation. (#1209)
- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214)
- A Baggage API to implement the OpenTelemetry specification. (#1217)
### Changed
- Set default propagator to no-op propagator. (#1184)
- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification.
They now are `Unset`, `Error`, and `Ok`.
They no longer track the gRPC codes. (#1214)
- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214)
- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/propagators`. (#1217)
### Fixed
- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226)
### Removed
- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212)
- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification.
The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212)
- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216)
- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217)
- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219)
- Nested array/slice support has been removed. (#1226)
## [0.12.0] - 2020-09-24
### Added
- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108)
- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s.
This addition was made to conform with our project option conventions. (#1155)
- Instrumentation library information was added to the Zipkin exporter. (#1119)
- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166)
- More semantic conventions for k8s as resource attributes. (#1167)
### Changed
- Add reconnecting udp connection type to Jaeger exporter.
This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record.
It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063)
- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`.
This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108)
- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`.
This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108)
- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109)
- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package.
This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118)
- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119)
- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115)
- Move `tools` package under `internal`. (#1141)
- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142)
The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged.
- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153)
- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155)
- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161)
- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to
recommend the use of `newConfig()` instead of `configure()`. (#1163)
- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163)
- Ensure exported interface types include parameter names and update the
Style Guide to reflect this styling rule. (#1172)
- Don't consider unset environment variable for resource detection to be an error. (#1170)
- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and
`go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`.
- ValueObserver instruments use LastValue aggregator by default. (#1165)
- OTLP Metric exporter supports LastValue aggregation. (#1165)
- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185)
- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190)
- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190)
- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190)
- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190)
- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190)
- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192)
- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201)
- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195)
- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203)
### Removed
- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the
`go.opentelemetry.io/contrib/propagators/` module. (#1191)
- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194)
### Fixed
- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171)
- Fix missing shutdown processor in otel-collector example. (#1186)
- Fix missing shutdown processor in basic and namedtracer examples. (#1197)
## [0.11.0] - 2020-08-24
### Added
- Support for exporting array-valued attributes via OTLP. (#992)
- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994)
- Support for filtering metric label sets. (#1047)
- A dimensionality-reducing metric Processor. (#1057)
- Integration tests for more OTel Collector Attribute types. (#1062)
- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078)
### Changed
- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049)
- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049)
- Rename `api/testharness` to `api/apitest`. (#1049)
- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049)
- Change Metric Processor to merge multiple observations. (#1024)
- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module.
This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038)
- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016)
- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042)
- Replace `WithSyncer` with `WithBatcher` in examples. (#1044)
- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046)
- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060)
- Unify Callback Function Naming.
Rename `*Callback` with `*Func`. (#1061)
- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064)
- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface.
This interface still supports the export of `SpanData`, but only as a slice.
Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078)
- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error.
If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078)
- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`.
This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078)
### Removed
- Duplicate, unused API sampler interface. (#999)
Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead.
- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository.
This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027)
- The `WithSpan` method of the `Tracer` interface.
The functionality this method provided was limited compared to what a user can provide themselves.
It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043)
- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions.
These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077)
- The `oterror` package. (#1026)
- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032)
### Fixed
- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031)
- Correct instrumentation version tag in Jaeger exporter. (#1037)
- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043)
- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050)
- The `otel-collector` example referenced outdated collector processors. (#1006)
## [0.10.0] - 2020-07-29
This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages.
### Added
- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern.
These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944)
- Add propagator option for gRPC instrumentation. (#986)
- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987)
### Changed
- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function.
This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944)
- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`.
This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963)
- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962)
- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968)
- `value.Bool` was replaced with `kv.BoolValue`.
- `value.Int64` was replaced with `kv.Int64Value`.
- `value.Uint64` was replaced with `kv.Uint64Value`.
- `value.Float64` was replaced with `kv.Float64Value`.
- `value.Int32` was replaced with `kv.Int32Value`.
- `value.Uint32` was replaced with `kv.Uint32Value`.
- `value.Float32` was replaced with `kv.Float32Value`.
- `value.String` was replaced with `kv.StringValue`.
- `value.Int` was replaced with `kv.IntValue`.
- `value.Uint` was replaced with `kv.UintValue`.
- `value.Array` was replaced with `kv.ArrayValue`.
- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972)
- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979)
- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980)
- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985)
- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989)
### Removed
- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970)
### Fixed
- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953)
- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957)
- Use `global.Handle` for span export errors in the OTLP exporter. (#946)
- Correct Go language formatting in the README documentation. (#961)
- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977)
- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983)
- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984)
## [0.9.0] - 2020-07-20
### Added
- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939)
- A Detector to automatically detect resources from an environment variable. (#939)
- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938)
- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`.
References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942)
### Changed
- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948)
### Removed
- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943)
## [0.8.0] - 2020-07-09
### Added
- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject.
A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882)
- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882)
- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882)
- Add `peer.service` semantic attribute. (#898)
- Add database-specific semantic attributes. (#899)
- Add semantic convention for `faas.coldstart` and `container.id`. (#909)
- Add http content size semantic conventions. (#905)
- Include `http.request_content_length` in HTTP request basic attributes. (#905)
- Add semantic conventions for operating system process resource attribute keys. (#919)
- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931)
### Changed
- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879)
- Use lowercase header names for B3 Multiple Headers. (#881)
- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`.
This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings.
If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882)
- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header.
Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid.
This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882)
- Extend semantic conventions for RPC. (#900)
- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920)
- `"api/standard".FaaSName` -> `FaaSNameKey`
- `"api/standard".FaaSID` -> `FaaSIDKey`
- `"api/standard".FaaSVersion` -> `FaaSVersionKey`
- `"api/standard".FaaSInstance` -> `FaaSInstanceKey`
### Removed
- The `FlagsUnused` trace flag is removed.
The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882)
- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed.
If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882)
### Fixed
- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881)
- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882)
- The B3 propagator now propagates the debug flag.
This removes the behavior of changing the debug flag into a set sampling bit.
Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882)
- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882)
- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883)
- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885)
- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896)
- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908)
- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912)
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
- Update otel-colector example to use the v0.5.0 collector. (#915)
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
This is in accordance with OpenTelemetry semantic conventions. (#922)
- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923)
- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925)
- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926)
- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930)
## [0.7.0] - 2020-06-26
This release implements the v0.5.0 version of the OpenTelemetry specification.
### Added
- The othttp instrumentation now includes default metrics. (#861)
- This CHANGELOG file to track all changes in the project going forward.
- Support for array type attributes. (#798)
- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844)
- Timestamps are now passed to exporters for each export. (#835)
- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s.
This replaces the prior `Record` `struct` use for this purpose. (#835)
- New dependabot integration to automate package upgrades. (#814)
- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument.
This instrumentation version is passed on to exporters. (#811) (#805) (#802)
- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811)
- Environment variables for Jaeger exporter are supported. (#796)
- New `aggregation.Kind` in the export metric API. (#808)
- New example that uses OTLP and the collector. (#790)
- Handle errors in the span `SetName` during span initialization. (#791)
- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777)
- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778)
- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`.
There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778)
- Options to specify propagators for httptrace and grpctrace instrumentation. (#784)
- The required `application/json` header for the Zipkin exporter is included in all exports. (#774)
- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769
### Changed
- Rename `Integrator` to `Processor` in the metric SDK. (#863)
- Rename `AggregationSelector` to `AggregatorSelector`. (#859)
- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858)
- Rename `simple` integrator to `basic` integrator. (#857)
- Merge otlp collector examples. (#841)
- Change the metric SDK to support cumulative, delta, and pass-through exporters directly.
With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840)
- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812)
- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other.
All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`.
Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812)
- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812)
- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810)
- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808
- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806)
- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791)
- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779)
- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781)
### Removed
- `Uint64NumberKind` and related functions from the API. (#864)
- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803)
- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775)
### Fixed
- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866)
- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871)
- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824)
- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867)
- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853)
- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854)
- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848)
- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817)
- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828)
- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838)
- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829)
- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815)
- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823)
- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830)
- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822)
- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820)
- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831)
- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836)
- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837)
- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839)
- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843)
- Set span status from HTTP status code in the othttp instrumentation. (#832)
- Fixed typo in push controller comment. (#834)
- The `Aggregator` testing has been updated and cleaned. (#812)
- `metric.Number(0)` expressions are replaced by `0` where possible. (#812)
- Fixed `global` `handler_test.go` test failure. #804
- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766)
- Fixed OTLP example's accidental early close of exporter. (#807)
- Ensure zipkin exporter reads and closes response body. (#788)
- Update instrumentation to use `api/standard` keys instead of custom keys. (#782)
- Clean up tools and RELEASING documentation. (#762)
## [0.6.0] - 2020-05-21
### Added
- Support for `Resource`s in the prometheus exporter. (#757)
- New pull controller. (#751)
- New `UpDownSumObserver` instrument. (#750)
- OpenTelemetry collector demo. (#711)
- New `SumObserver` instrument. (#747)
- New `UpDownCounter` instrument. (#745)
- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742)
- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731)
### Changed
- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761)
- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758)
- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756)
- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754)
- The prometheus exporter now uses the new pull controller. (#751)
- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752)
- Support use of synchronous instruments in asynchronous callbacks (#725)
- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739)
- Rename `Observer` instrument to `ValueObserver`. (#734)
- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. 727)
### Fixed
- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755)
- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743)
- Fix `string` case in `kv` `Infer` function. (#746)
- Fix panic in grpctrace client interceptors. (#740)
- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737)
- Rewrite span batch process queue batching logic. (#719)
- Remove the push controller named Meter map. (#738)
- Fix Histogram aggregator initial state (fix #735). (#736)
- Ensure golang alpine image is running `golang-1.14` for examples. (#733)
- Added test for grpctrace `UnaryInterceptorClient`. (#695)
- Rearrange `api/metric` code layout. (#724)
## [0.5.0] - 2020-05-13
### Added
- Batch `Observer` callback support. (#717)
- Alias `api` types to root package of project. (#696)
- Create basic `othttp.Transport` for simple client instrumentation. (#678)
- `SetAttribute(string, interface{})` to the trace API. (#674)
- Jaeger exporter option that allows user to specify custom http client. (#671)
- `Stringer` and `Infer` methods to `key`s. (#662)
### Changed
- Rename `NewKey` in the `kv` package to just `Key`. (#721)
- Move `core` and `key` to `kv` package. (#720)
- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709)
- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710)
- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710)
- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710)
- Move `Number` from `core` to `api/metric` package. (#706)
- Move `SpanContext` from `core` to `trace` package. (#692)
- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681)
### Fixed
- Update tooling to run generators in all submodules. (#705)
- gRPC interceptor regexp to match methods without a service name. (#683)
- Use a `const` for padding 64-bit B3 trace IDs. (#701)
- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700)
- Left-pad 64-bit B3 trace IDs with zero. (#698)
- Propagate at least the first W3C tracestate header. (#694)
- Remove internal `StateLocker` implementation. (#688)
- Increase instance size CI system uses. (#690)
- Add a `key` benchmark and use reflection in `key.Infer()`. (#679)
- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680)
- Reimplement histogram using mutex instead of `StateLocker`. (#669)
- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667)
- Update documentation to not include any references to `WithKeys`. (#672)
- Correct misspelling. (#668)
- Fix clobbering of the span context if extraction fails. (#656)
- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670)
## [0.4.3] - 2020-04-24
### Added
- `Dockerfile` and `docker-compose.yml` to run example code. (#635)
- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621)
- New `api/label` package, providing common label set implementation. (#651)
- Support for JSON marshaling of `Resources`. (#654)
- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642)
- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627)
- `WithSpanFormatter` option to the othttp plugin. (#617)
- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612)
- The prometheus exporter now supports exporting histograms. (#601)
- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613)
- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613)
- An `Equal` method to the `Resource` test the equivalence of resources. (#613)
- An iterable structure (`AttributeIterator`) for `Resource` attributes.
### Changed
- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644)
- Pass `Resources` through the metrics export pipeline. (#659)
### Removed
- `WithKeys` option from the metric API. (#639)
### Fixed
- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658)
- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653)
- Use type names for return values in jaeger exporter. (#648)
- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650)
- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647)
- Do not cache `reflect.ValueOf()` in metric Labels. (#649)
- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626)
- Add error wrapping to the prometheus exporter. (#631)
- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623)
- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614)
- Update `Resource` internal representation to uniquely and reliably identify resources. (#613)
- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622)
- Ensure spans created by httptrace client tracer reflect operation structure. (#618)
- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
## [0.4.2] - 2020-03-31
### Fixed
- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607)
- Fix time conversion from internal to OTLP in OTLP exporter. (#606)
## [0.4.1] - 2020-03-31
### Fixed
- Update `tag.sh` to create signed tags. (#604)
## [0.4.0] - 2020-03-30
### Added
- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580)
- Script to verify examples after a new release. (#579)
### Removed
- The dogstatsd exporter due to lack of support.
This additionally removes support for statsd. (#591)
- `LabelSet` from the metric API.
This is replaced by a `[]core.KeyValue` slice. (#595)
- `Labels` from the metric API's `Meter` interface. (#595)
### Changed
- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574)
- Renamed `internal/metric.Meter` to `MeterImpl`. (#580)
- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580)
### Fixed
- Corrected missing return in mock span. (#582)
- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596)
- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588)
- Update pre-release script to be compatible between GNU and BSD based systems. (#592)
- Add a `RecordBatch` benchmark. (#594)
- Moved span transforms of the OTLP exporter to the internal package. (#593)
- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569)
- Removed unneeded allocation on empty labels in OLTP exporter. (#597)
- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599)
- Update project documentation godoc.org links to pkg.go.dev. (#602)
## [0.3.0] - 2020-03-21
This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality.
There is still a possibility of breaking changes.
### Added
- Add `Observer` metric instrument. (#474)
- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494)
- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
- The zipkin trace exporter. (#495)
- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
- The `StatusMessage` field was add to the trace `Span`. (#524)
- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
- The `Resource` type was added to the SDK. (#528)
- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction.
Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560)
- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560)
- Scripts to better automate the release process. (#576)
### Changed
- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506)
- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511)
- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511)
- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524)
- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531)
- Rename metric API `Options` to `Config`. (#541)
- Rename metric `Counter` aggregator to be `Sum`. (#541)
- Unify metric options into `Option` from instrument specific options. (#541)
- The trace API's `TraceProvider` now support `Resource`s. (#545)
- Correct error in zipkin module name. (#548)
- The jaeger trace exporter now supports `Resource`s. (#551)
- Metric SDK now supports `Resource`s.
The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552)
- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557)
- The stdout trace exporter now supports `Resource`s. (#558)
- The metric `Descriptor` is now included at the API instead of the SDK. (#560)
- Replace `Ordered` with an iterator in `export.Labels`. (#567)
### Removed
- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452)
- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560)
- `GetDescriptor` from the metric SDK. (#575)
- The `Gauge` instrument from the metric API. (#537)
### Fixed
- Make histogram aggregator checkpoint consistent. (#438)
- Update README with import instructions and how to build and test. (#505)
- The default label encoding was updated to be unique. (#508)
- Use `NewRoot` in the othttp plugin for public endpoints. (#513)
- Fix data race in `BatchedSpanProcessor`. (#518)
- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521
- Use a variable-size array to represent ordered labels in maps. (#523)
- Update the OTLP protobuf and update changed import path. (#532)
- Use `StateLocker` implementation in `MinMaxSumCount`. (#546)
- Eliminate goroutine leak in histogram stress test. (#547)
- Update OTLP exporter with latest protobuf. (#550)
- Add filters to the othttp plugin. (#556)
- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565)
- Encode labels once during checkpoint.
The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter.
This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572)
- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573)
## [0.2.3] - 2020-03-04
### Added
- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473)
- Configurable push frequency for exporters setup pipeline. (#504)
### Changed
- Rename the `exporter` directory to `exporters`.
The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`.
This resulted in all subsequent releases not becoming the default latest.
A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages.
Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags.
Consequentially, this action also renames *all* exporter packages. (#502)
### Removed
- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503)
## [0.2.2] - 2020-02-27
### Added
- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467)
- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467)
- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467)
- `Config` and configuring `Option` to the propagator API. (#467)
- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467)
- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467)
- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467)
- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467)
- Histogram aggregator. (#433)
- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456)
- `AlwaysParentSample` sampler to the trace API. (#455)
- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
### Changed
- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
- Move correlation context propagation to correlation package. (#479)
- Do not default to putting remote span context into links. (#480)
- Propagators extrac
- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
- Renamed the `export` package to `metric` to match directory structure. (#432)
- Rename the `api/distributedcontext` package to `api/correlation`. (#444)
- Rename the `api/propagators` package to `api/propagation`. (#444)
- Move the propagators from the `propagators` package into the `trace` API package. (#444)
- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462)
- Moved all dependencies of tools package to a tools directory. (#466)
### Removed
- Binary propagators. (#467)
- NOOP propagator. (#467)
### Fixed
- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492)
- Fix a possible nil-dereference crash (#478)
- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483)
- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484)
- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482)
- Initialize `onError` based on `Config` in prometheus exporter. (#486)
- Correct module name in prometheus exporter README. (#475)
- Removed tracer name prefix from span names. (#430)
- Fix `aggregator_test.go` import package comment. (#431)
- Improved detail in stdout exporter. (#436)
- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442)
- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442)
- Reword function documentation in gRPC plugin. (#446)
- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441)
- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441)
- Upgraded to Go 1.13 in CI. (#465)
- Correct opentelemetry.io URL in trace SDK documentation. (#464)
- Refactored reference counting logic in SDK determination of stale records. (#468)
- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469)
## [0.2.1.1] - 2020-01-13
### Fixed
- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428)
## [0.2.1] - 2020-01-08
### Added
- Global meter forwarding implementation.
This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392)
- Global trace forwarding implementation.
This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406)
- Standardize export pipeline creation in all exporters. (#395)
- A testing, organization, and comments for 64-bit field alignment. (#418)
- Script to tag all modules in the project. (#414)
### Changed
- Renamed `propagation` package to `propagators`. (#362)
- Renamed `B3Propagator` propagator to `B3`. (#362)
- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362)
- Renamed `BinaryPropagator` propagator to `Binary`. (#362)
- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362)
- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362)
- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362)
- Renamed `SpanOption` to `StartOption` in the trace API. (#369)
- Renamed `StartOptions` to `StartConfig` in the trace API. (#369)
- Renamed `EndOptions` to `EndConfig` in the trace API. (#369)
- `Number` now has a pointer receiver for its methods. (#375)
- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379)
- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379)
- Renamed `Message` in Event to `Name` in the trace API. (#389)
- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385)
- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400)
- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400)
- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400)
- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400)
- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400)
- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400)
- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400)
- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400)
- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400)
- Renamed the `File` option in the stdout exporter to `Writer`. (#404)
- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case.
### Fixed
- Aggregator import path corrected. (#421)
- Correct links in README. (#368)
- The README was updated to match latest code changes in its examples. (#374)
- Don't capitalize error statements. (#375)
- Fix ignored errors. (#375)
- Fix ambiguous variable naming. (#375)
- Removed unnecessary type casting. (#375)
- Use named parameters. (#375)
- Updated release schedule. (#378)
- Correct http-stackdriver example module name. (#394)
- Removed the `http.request` span in `httptrace` package. (#397)
- Add comments in the metrics SDK (#399)
- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403)
- Add documentation of compatible exporters in the README. (#405)
- Typo fix. (#408)
- Simplify span check logic in SDK tracer implementation. (#419)
## [0.2.0] - 2019-12-03
### Added
- Unary gRPC tracing example. (#351)
- Prometheus exporter. (#334)
- Dogstatsd metrics exporter. (#326)
### Changed
- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352)
- Rename `GetMeter` to `Meter`. (#357)
- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
- Rename `HTTPB3Propagator` to `B3Propagator`. (#355)
- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
- Move `/global` package to `/api/global`. (#356)
- Rename `GetTracer` to `Tracer`. (#347)
### Removed
- `SetAttribute` from the `Span` interface in the trace API. (#361)
- `AddLink` from the `Span` interface in the trace API. (#349)
- `Link` from the `Span` interface in the trace API. (#349)
### Fixed
- Exclude example directories from coverage report. (#365)
- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360)
- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359)
- Run the race checker for all test. (#354)
- Redundant commands in the Makefile are removed. (#354)
- Split the `generate` and `lint` targets of the Makefile. (#354)
- Renames `circle-ci` target to more generic `ci` in Makefile. (#354)
- Add example Prometheus binary to gitignore. (#358)
- Support negative numbers with the `MaxSumCount`. (#335)
- Resolve race conditions in `push_test.go` identified in #339. (#340)
- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336)
- Trace benchmark now tests both `AlwaysSample` and `NeverSample`.
Previously it was testing `AlwaysSample` twice. (#325)
- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325)
- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325)
- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint.
This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
This was corrected. (#333)
## [0.1.2] - 2019-11-18
### Fixed
- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328)
- Removed unnecessary unslicing of parameters that are already a slice. (#324)
## [0.1.1] - 2019-11-18
This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch.
### Added
- Metrics stdout export pipeline. (#265)
- Array aggregation for raw measure metrics. (#282)
- The core.Value now have a `MarshalJSON` method. (#281)
### Removed
- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314)
- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292)
### Changed
- Allocation in LabelSet construction to reduce GC overhead. (#318)
- `trace.WithAttributes` to append values instead of replacing (#315)
- Use a formula for tolerance in sampling tests. (#298)
- Move export types into trace and metric-specific sub-directories. (#289)
- `SpanKind` back to being based on an `int` type. (#288)
### Fixed
- URL to OpenTelemetry website in README. (#323)
- Name of othttp default tracer. (#321)
- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294)
- CI modules cache to correctly restore/save from/to the cache. (#316)
- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293)
- README now reflects the new code structure introduced with these changes. (#291)
- Make the basic example work. (#279)
## [0.1.0] - 2019-11-04
This is the first release of open-telemetry go library.
It contains api and sdk for trace and meter.
### Added
- Initial OpenTelemetry trace and metric API prototypes.
- Initial OpenTelemetry trace, metric, and export SDK packages.
- A wireframe bridge to support compatibility with OpenTracing.
- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup.
- Exporters for Jaeger, Stackdriver, and stdout.
- Propagators for binary, B3, and trace-context protocols.
- Project information and guidelines in the form of a README and CONTRIBUTING.
- Tools to build the project and a Makefile to automate the process.
- Apache-2.0 license.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v0.13.0...HEAD
[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0
[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0
[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0
[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0
[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0
[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0
[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0
[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0
[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0
[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3
[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2
[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1
[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0
[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0
[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3
[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2
[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1
[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1
[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0
[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0

View File

@ -1,17 +0,0 @@
#####################################################
#
# List of approvers for this repository
#
#####################################################
#
# Learn about membership in OpenTelemetry community:
# https://github.com/open-telemetry/community/blob/master/community-membership.md
#
#
# Learn about CODEOWNERS file format:
# https://help.github.com/en/articles/about-code-owners
#
* @jmacd @lizthegrey @MrAlias @Aneurysm9 @evantorrie @XSAM
CODEOWNERS @MrAlias @Aneurysm9

View File

@ -1,373 +0,0 @@
# Contributing to opentelemetry-go
The Go special interest group (SIG) meets regularly. See the
OpenTelemetry
[community](https://github.com/open-telemetry/community#golang-sdk)
repo for information on this and other language SIGs.
See the [public meeting
notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
for a summary description of past meetings. To request edit access,
join the meeting or get in touch on
[Gitter](https://gitter.im/open-telemetry/opentelemetry-go).
## Development
You can view and edit the source code by cloning this repository:
```bash
git clone https://github.com/open-telemetry/opentelemetry-go.git
```
Run `make test` to run the tests instead of `go test`.
There are some generated files checked into the repo. To make sure
that the generated files are up-to-date, run `make` (or `make
precommit` - the `precommit` target is the default).
The `precommit` target also fixes the formatting of the code and
checks the status of the go module files.
If after running `make precommit` the output of `git status` contains
`nothing to commit, working tree clean` then it means that everything
is up-to-date and properly formatted.
## Pull Requests
### How to Send Pull Requests
Everyone is welcome to contribute code to `opentelemetry-go` via
GitHub pull requests (PRs).
To create a new PR, fork the project in GitHub and clone the upstream
repo:
```sh
$ go get -d go.opentelemetry.io/otel
```
(This may print some warning about "build constraints exclude all Go
files", just ignore it.)
This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
can alternatively use `git` directly with:
```sh
$ git clone https://github.com/open-telemetry/opentelemetry-go
```
(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
that name is a kind of a redirector to GitHub that `go get` can
understand, but `git` does not.)
This would put the project in the `opentelemetry-go` directory in
current working directory.
Enter the newly created directory and add your fork as a new remote:
```sh
$ git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
```
Check out a new branch, make modifications, run linters and tests, update
`CHANGELOG.md`, and push the branch to your fork:
```sh
$ git checkout -b <YOUR_BRANCH_NAME>
# edit files
# update changelog
$ make precommit
$ git add -p
$ git commit
$ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
```
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
request ID to the entry you added to `CHANGELOG.md`.
### How to Receive Comments
* If the PR is not ready for review, please put `[WIP]` in the title,
tag it as `work-in-progress`, or mark it as
[`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
* Make sure CLA is signed and CI is clear.
### How to Get PRs Merged
A PR is considered to be **ready to merge** when:
* It has received two approvals from Collaborators/Maintainers (at
different companies). This is not enforced through technical means
and a PR may be **ready to merge** with a single approval if the change
and its approach have been discussed and consensus reached.
* Major feedbacks are resolved.
* It has been open for review for at least one working day. This gives
people reasonable time to review.
* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for
one day and may be merged with a single Maintainer's approval.
* `CHANGELOG.md` has been updated to reflect what has been
added, changed, removed, or fixed.
* Urgent fix can take exception as long as it has been actively
communicated.
Any Maintainer can merge the PR once it is **ready to merge**.
## Design Choices
As with other OpenTelemetry clients, opentelemetry-go follows the
[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification).
It's especially valuable to read through the [library
guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/library-guidelines.md).
### Focus on Capabilities, Not Structure Compliance
OpenTelemetry is an evolving specification, one where the desires and
use cases are clear, but the method to satisfy those uses cases are
not.
As such, Contributions should provide functionality and behavior that
conforms to the specification, but the interface and structure is
flexible.
It is preferable to have contributions follow the idioms of the
language rather than conform to specific API names or argument
patterns in the spec.
For a deeper discussion, see:
https://github.com/open-telemetry/opentelemetry-specification/issues/165
## Style Guide
One of the primary goals of this project is that it is actually used by
developers. With this goal in mind the project strives to build
user-friendly and idiomatic Go code adhering to the Go community's best
practices.
For a non-comprehensive but foundational overview of these best practices
the [Effective Go](https://golang.org/doc/effective_go.html) documentation
is an excellent starting place.
As a convenience for developers building this project the `make precommit`
will format, lint, validate, and in some cases fix the changes you plan to
submit. This check will need to pass for your changes to be able to be
merged.
In addition to idiomatic Go, the project has adopted certain standards for
implementations of common patterns. These standards should be followed as a
default, and if they are not followed documentation needs to be included as
to the reasons why.
### Configuration
When creating an instantiation function for a complex `struct` it is useful
to allow variable number of options to be applied. However, the strong type
system of Go restricts the function design options. There are a few ways to
solve this problem, but we have landed on the following design.
#### `config`
Configuration should be held in a `struct` named `config`, or prefixed with
specific type name this Configuration applies to if there are multiple
`config` in the package. This `struct` must contain configuration options.
```go
// config contains configuration options for a thing.
type config struct {
// options ...
}
```
In general the `config` `struct` will not need to be used externally to the
package and should be unexported. If, however, it is expected that the user
will likely want to build custom options for the configuration, the `config`
should be exported. Please, include in the documentation for the `config`
how the user can extend the configuration.
It is important that `config` are not shared across package boundaries.
Meaning a `config` from one package should not be directly used by another.
Optionally, it is common to include a `newConfig` function (with the same
naming scheme). This function wraps any defaults setting and looping over
all options to create a configured `config`.
```go
// newConfig returns an appropriately configured config.
func newConfig([]Option) config {
// Set default values for config.
config := config{/* […] */}
for _, option := range options {
option.Apply(&config)
}
// Preform any validation here.
return config
}
```
If validation of the `config` options is also preformed this can return an
error as well that is expected to be handled by the instantiation function
or propagated to the user.
Given the design goal of not having the user need to work with the `config`,
the `newConfig` function should also be unexported.
#### `Option`
To set the value of the options a `config` contains, a corresponding
`Option` interface type should be used.
```go
type Option interface {
Apply(*config)
}
```
The name of the interface should be prefixed in the same way the
corresponding `config` is (if at all).
#### Options
All user configurable options for a `config` must have a related unexported
implementation of the `Option` interface and an exported configuration
function that wraps this implementation.
The wrapping function name should be prefixed with `With*` (or in the
special case of a boolean options `Without*`) and should have the following
function signature.
```go
func With*(…) Option { … }
```
##### `bool` Options
```go
type defaultFalseOption bool
func (o defaultFalseOption) Apply(c *config) {
c.Bool = bool(o)
}
// WithOption sets a T* to have an option included.
func WithOption() Option {
return defaultFalseOption(true)
}
```
```go
type defaultTrueOption bool
func (o defaultTrueOption) Apply(c *config) {
c.Bool = bool(o)
}
// WithoutOption sets a T* to have Bool option excluded.
func WithoutOption() Option {
return defaultTrueOption(false)
}
````
##### Declared Type Options
```go
type myTypeOption struct {
MyType MyType
}
func (o myTypeOption) Apply(c *config) {
c.MyType = o.MyType
}
// WithMyType sets T* to have include MyType.
func WithMyType(t MyType) Option {
return myTypeOption{t}
}
```
#### Instantiation
Using this configuration pattern to configure instantiation with a `New*`
function.
```go
func NewT*(options ...Option) T* {…}
```
Any required parameters can be declared before the variadic `options`.
#### Dealing with Overlap
Sometimes there are multiple complex `struct` that share common
configuration and also have distinct configuration. To avoid repeated
portions of `config`s, a common `config` can be used with the union of
options being handled with the `Option` interface.
For example.
```go
// config holds options for all animals.
type config struct {
Weight float64
Color string
MaxAltitude float64
}
// DogOption apply Dog specific options.
type DogOption interface {
ApplyDog(*config)
}
// BirdOption apply Bird specific options.
type BirdOption interface {
ApplyBird(*config)
}
// Option apply options for all animals.
type Option interface {
BirdOption
DogOption
}
type weightOption float64
func (o weightOption) ApplyDog(c *config) { c.Weight = float64(o) }
func (o weightOption) ApplyBird(c *config) { c.Weight = float64(o) }
func WithWeight(w float64) Option { return weightOption(w) }
type furColorOption string
func (o furColorOption) ApplyDog(c *config) { c.Color = string(o) }
func WithFurColor(c string) DogOption { return furColorOption(c) }
type maxAltitudeOption float64
func (o maxAltitudeOption) ApplyBird(c *config) { c.MaxAltitude = float64(o) }
func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
func NewDog(name string, o ...DogOption) Dog {…}
func NewBird(name string, o ...BirdOption) Bird {…}
```
### Interface Type
To allow other developers to better comprehend the code, it is important
to ensure it is sufficiently documented. One simple measure that contributes
to this aim is self-documenting by naming method parameters. Therefore,
where appropriate, methods of every exported interface type should have
their parameters appropriately named.
## Approvers and Maintainers
Approvers:
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Josh MacDonald](https://github.com/jmacd), LightStep
- [Sam Xie](https://github.com/XSAM)
Maintainers:
- [Anthony Mirabella](https://github.com/Aneurysm9), Centene
- [Tyler Yahn](https://github.com/MrAlias), New Relic
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
repo](https://github.com/open-telemetry/community/blob/master/community-membership.md).

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,177 +0,0 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
EXAMPLES := $(shell ./get_main_pkgs.sh ./example)
TOOLS_MOD_DIR := ./internal/tools
# All source code and documents. Used in spell check.
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting.
ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort))
ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
# Mac OS Catalina 10.5.x doesn't support 386. Hence skip 386 test
SKIP_386_TEST = false
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Darwin)
SW_VERS := $(shell sw_vers -productVersion)
ifeq ($(shell echo $(SW_VERS) | egrep '^(10.1[5-9]|1[1-9]|[2-9])'), $(SW_VERS))
SKIP_386_TEST = true
endif
endif
GOTEST_MIN = go test -timeout 30s
GOTEST = $(GOTEST_MIN) -race
GOTEST_WITH_COVERAGE = $(GOTEST) -coverprofile=coverage.out -covermode=atomic -coverpkg=./...
.DEFAULT_GOAL := precommit
.PHONY: precommit
TOOLS_DIR := $(abspath ./.tools)
$(TOOLS_DIR)/golangci-lint: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint
$(TOOLS_DIR)/misspell: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/misspell github.com/client9/misspell/cmd/misspell
$(TOOLS_DIR)/stringer: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/stringer golang.org/x/tools/cmd/stringer
$(TOOLS_DIR)/gojq: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
cd $(TOOLS_MOD_DIR) && \
go build -o $(TOOLS_DIR)/gojq github.com/itchyny/gojq/cmd/gojq
precommit: dependabot-check license-check generate build lint examples test-benchmarks test
.PHONY: test-with-coverage
test-with-coverage:
set -e; \
printf "" > coverage.txt; \
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
echo "go test ./... + coverage in $${dir}"; \
(cd "$${dir}" && \
$(GOTEST_WITH_COVERAGE) ./... && \
go tool cover -html=coverage.out -o coverage.html); \
[ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \
done; \
sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt
.PHONY: ci
ci: precommit check-clean-work-tree test-with-coverage test-386
.PHONY: check-clean-work-tree
check-clean-work-tree:
@if ! git diff --quiet; then \
echo; \
echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
echo; \
git status; \
exit 1; \
fi
.PHONY: build
build:
# TODO: Fix this on windows.
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "compiling all packages in $${dir}"; \
(cd "$${dir}" && \
go build ./... && \
go test -run xxxxxMatchNothingxxxxx ./... >/dev/null); \
done
.PHONY: test
test:
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "go test ./... + race in $${dir}"; \
(cd "$${dir}" && \
$(GOTEST) ./...); \
done
.PHONY: test-386
test-386:
if [ $(SKIP_386_TEST) = true ] ; then \
echo "skipping the test for GOARCH 386 as it is not supported on the current OS"; \
else \
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "go test ./... GOARCH 386 in $${dir}"; \
(cd "$${dir}" && \
GOARCH=386 $(GOTEST_MIN) ./...); \
done; \
fi
.PHONY: examples
examples:
@set -e; for ex in $(EXAMPLES); do \
echo "Building $${ex}"; \
(cd "$${ex}" && \
go build .); \
done
.PHONY: test-benchmarks
test-benchmarks:
@set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "test benchmarks in $${dir}"; \
(cd "$${dir}" && go test -test.benchtime=1ms -run=NONE -bench=. ./...) > /dev/null; \
done
.PHONY: lint
lint: $(TOOLS_DIR)/golangci-lint $(TOOLS_DIR)/misspell
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "golangci-lint in $${dir}"; \
(cd "$${dir}" && \
$(TOOLS_DIR)/golangci-lint run --fix && \
$(TOOLS_DIR)/golangci-lint run); \
done
$(TOOLS_DIR)/misspell -w $(ALL_DOCS)
set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
echo "go mod tidy in $${dir}"; \
(cd "$${dir}" && \
go mod tidy); \
done
generate: $(TOOLS_DIR)/stringer
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
echo "running generators in $${dir}"; \
(cd "$${dir}" && \
PATH="$(TOOLS_DIR):$${PATH}" go generate ./...); \
done
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path './vendor/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
exit 1; \
fi
.PHONY: dependabot-check
dependabot-check:
@result=$$( \
for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \
do grep -q "$$f" .github/dependabot.yml \
|| echo "$$f"; \
done; \
); \
if [ -n "$$result" ]; then \
echo "missing go.mod dependabot check:"; echo "$$result"; \
exit 1; \
fi

View File

@ -1,72 +0,0 @@
# -*- mode: makefile; -*-
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This Makefile.proto has rules to generate *.pb.go files in
# `exporters/otlp/internal/opentelemetry-proto-gen` from the .proto files in
# `exporters/otlp/internal/opentelemetry-proto` using protoc with a go plugin.
#
# The protoc binary and other tools are sourced from a docker image
# `PROTOC_IMAGE`.
#
# Prereqs: The archiving utility `pax` is installed.
PROTOC_IMAGE := namely/protoc-all:1.29_2
PROTOBUF_VERSION := v1
OTEL_PROTO_SUBMODULE := exporters/otlp/internal/opentelemetry-proto
PROTOBUF_GEN_DIR := exporters/otlp/internal/opentelemetry-proto-gen
PROTOBUF_TEMP_DIR := gen/pb-go
PROTO_SOURCE_DIR := gen/proto
SUBMODULE_PROTO_FILES := $(wildcard $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/*/$(PROTOBUF_VERSION)/*.proto \
$(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/collector/*/$(PROTOBUF_VERSION)/*.proto)
SOURCE_PROTO_FILES := $(subst $(OTEL_PROTO_SUBMODULE),$(PROTO_SOURCE_DIR),$(SUBMODULE_PROTO_FILES))
default: protobuf
.PHONY: protobuf protobuf-source gen-protobuf copy-protobufs
protobuf: protobuf-source gen-protobuf copy-protobufs
protobuf-source: $(SOURCE_PROTO_FILES) | $(PROTO_SOURCE_DIR)/
# Changes go_package in .proto file to point to repo-local location
define exec-replace-pkgname
sed 's,go_package = "github.com/open-telemetry/opentelemetry-proto/gen/go,go_package = "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen,' < $(1) > $(2)
endef
# replace opentelemetry-proto package name by go.opentelemetry.io/otel specific version
$(SOURCE_PROTO_FILES): $(PROTO_SOURCE_DIR)/%.proto: $(OTEL_PROTO_SUBMODULE)/%.proto
@mkdir -p $(@D)
$(call exec-replace-pkgname,$<,$@)
# Command to run protoc using docker image
define exec-protoc-all
docker run -v `pwd`:/defs $(PROTOC_IMAGE) $(1)
endef
gen-protobuf: $(SOURCE_PROTO_FILES) | $(PROTOBUF_GEN_DIR)/
$(foreach file,$(subst ${PROTO_SOURCE_DIR}/,,$(SOURCE_PROTO_FILES)),$(call exec-protoc-all, -i $(PROTO_SOURCE_DIR) -f ${file} -l gogo -o ${PROTOBUF_TEMP_DIR}))
# requires `pax` to be installed, as it has consistent options for both BSD (Darwin) and Linux
copy-protobufs: | $(PROTOBUF_GEN_DIR)/
find ./$(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/$(PROTOBUF_GEN_DIR) -type f -print0 | \
pax -0 -s ',^./$(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/$(PROTOBUF_GEN_DIR),,' -rw ./$(PROTOBUF_GEN_DIR)
$(PROTO_SOURCE_DIR)/ $(PROTOBUF_GEN_DIR)/:
mkdir -p $@
.PHONY: clean
clean:
rm -rf ./gen

View File

@ -1,57 +0,0 @@
# OpenTelemetry-Go
[![Circle CI](https://circleci.com/gh/open-telemetry/opentelemetry-go.svg?style=svg)](https://circleci.com/gh/open-telemetry/opentelemetry-go)
[![Docs](https://godoc.org/go.opentelemetry.io/otel?status.svg)](https://pkg.go.dev/go.opentelemetry.io/otel)
[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
[![Gitter](https://badges.gitter.im/open-telemetry/opentelemetry-go.svg)](https://gitter.im/open-telemetry/opentelemetry-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
The Go [OpenTelemetry](https://opentelemetry.io/) implementation.
## Getting Started
OpenTelemetry's goal is to provide a single set of APIs to capture distributed
traces and metrics from your application and send them to an observability
platform. This project allows you to do just that for applications written in
Go. There are two steps to this process: instrument your application, and
configure an exporter.
### Instrumentation
To start capturing distributed traces and metric events from your application
it first needs to be instrumented. The easiest way to do this is by using an
instrumentation library for your code. Be sure to check out [the officially
supported instrumentation
libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/instrumentation).
If you need to extend the telemetry an instrumentation library provides or want
to build your own instrumentation for your application directly you will need
to use the
[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api)
package. The included [examples](./example/) are a good way to see some
practical uses of this process.
### Export
Now that your application is instrumented to collect telemetry, it needs an
export pipeline to send that telemetry to an observability platform.
You can find officially supported exporters [here](./exporters/) and in the
companion [contrib
repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/exporters/metric).
Additionally, there are many vendor specific or 3rd party exporters for
OpenTelemetry. These exporters are broken down by
[trace](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/trace?tab=importedby)
and
[metric](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/metric?tab=importedby)
support.
## Project Status
[Project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
and [milestones](https://github.com/open-telemetry/opentelemetry-go/milestones)
can be found at the respective links. We try to keep these accurate and should
be the best place to go for answers on project status.
## Contributing
See the [contributing documentation](CONTRIBUTING.md).

View File

@ -1,81 +0,0 @@
# Release Process
## Pre-Release
Update go.mod for submodules to depend on the new release which will happen in the next step.
1. Run the pre-release script. It creates a branch `pre_release_<new tag>` that will contain all release changes.
```
./pre_release.sh -t <new tag>
```
2. Verify the changes.
```
git diff master
```
This should have changed the version for all modules to be `<new tag>`.
3. Update the [Changelog](./CHANGELOG.md).
- Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
To verify this, you can look directly at the commits since the `<last tag>`.
```
git --no-pager log --pretty=oneline "<last tag>..HEAD"
```
- Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
- Update all the appropriate links at the bottom.
4. Push the changes to upstream and create a Pull Request on GitHub.
Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
## Tag
Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
Failure to do so will leave things in a broken state.
***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
It is critical you make sure the version you push upstream is correct.
[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
1. Run the tag.sh script using the `<commit-hash>` of the commit on the master branch for the merged Pull Request.
```
./tag.sh <new tag> <commit-hash>
```
2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
Make sure you push all sub-modules as well.
```
git push upstream <new tag>
git push upstream <submodules-path/new tag>
...
```
## Release
Finally create a Release for the new `<new tag>` on GitHub.
The release body should include all the release notes from the Changelog for this release.
Additionally, the `tag.sh` script generates commit logs since last release which can be used to supplement the release notes.
## Verify Examples
After releasing verify that examples build outside of the repository.
```
./verify_examples.sh
```
The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
This ensures they build with the published release, not the local copy.
## Contrib Repository
Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/master/RELEASING.md) that uses this release.

View File

@ -1,16 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package global provides global providers, propagators and more.
package global // import "go.opentelemetry.io/otel/api/global"

View File

@ -1,91 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global
import (
"log"
"os"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
)
var (
// globalErrorHandler provides an ErrorHandler that can be used
// throughout an OpenTelemetry instrumented project. When a user
// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
// `Handle` and will be delegated to the registered ErrorHandler.
globalErrorHandler = &loggingErrorHandler{
l: log.New(os.Stderr, "", log.LstdFlags),
}
// delegateErrorHandlerOnce ensures that a user provided ErrorHandler is
// only ever registered once.
delegateErrorHandlerOnce sync.Once
// Comiple time check that loggingErrorHandler implements ErrorHandler.
_ otel.ErrorHandler = (*loggingErrorHandler)(nil)
)
// loggingErrorHandler logs all errors to STDERR.
type loggingErrorHandler struct {
delegate atomic.Value
l *log.Logger
}
// setDelegate sets the ErrorHandler delegate if one is not already set.
func (h *loggingErrorHandler) setDelegate(d otel.ErrorHandler) {
if h.delegate.Load() != nil {
// Delegate already registered
return
}
h.delegate.Store(d)
}
// Handle implements otel.ErrorHandler.
func (h *loggingErrorHandler) Handle(err error) {
if d := h.delegate.Load(); d != nil {
d.(otel.ErrorHandler).Handle(err)
return
}
h.l.Print(err)
}
// ErrorHandler returns the global ErrorHandler instance. If no ErrorHandler
// instance has been set (`SetErrorHandler`), the default ErrorHandler which
// logs errors to STDERR is returned.
func ErrorHandler() otel.ErrorHandler {
return globalErrorHandler
}
// SetErrorHandler sets the global ErrorHandler to be h.
func SetErrorHandler(h otel.ErrorHandler) {
delegateErrorHandlerOnce.Do(func() {
current := ErrorHandler()
if current == h {
return
}
if internalHandler, ok := current.(*loggingErrorHandler); ok {
internalHandler.setDelegate(h)
}
})
}
// Handle is a convience function for ErrorHandler().Handle(err)
func Handle(err error) {
ErrorHandler().Handle(err)
}

View File

@ -1,347 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"context"
"sync"
"sync/atomic"
"unsafe"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/api/metric/registry"
"go.opentelemetry.io/otel/label"
)
// This file contains the forwarding implementation of MeterProvider used as
// the default global instance. Metric events using instruments provided by
// this implementation are no-ops until the first Meter implementation is set
// as the global provider.
//
// The implementation here uses Mutexes to maintain a list of active Meters in
// the MeterProvider and Instruments in each Meter, under the assumption that
// these interfaces are not performance-critical.
//
// We have the invariant that setDelegate() will be called before a new
// MeterProvider implementation is registered as the global provider. Mutexes
// in the MeterProvider and Meters ensure that each instrument has a delegate
// before the global provider is set.
//
// Bound instrument operations are implemented by delegating to the
// instrument after it is registered, with a sync.Once initializer to
// protect against races with Release().
//
// Metric uniqueness checking is implemented by calling the exported
// methods of the api/metric/registry package.
type meterKey struct {
Name, Version string
}
type meterProvider struct {
delegate metric.MeterProvider
// lock protects `delegate` and `meters`.
lock sync.Mutex
// meters maintains a unique entry for every named Meter
// that has been registered through the global instance.
meters map[meterKey]*meterEntry
}
type meterImpl struct {
delegate unsafe.Pointer // (*metric.MeterImpl)
lock sync.Mutex
syncInsts []*syncImpl
asyncInsts []*asyncImpl
}
type meterEntry struct {
unique metric.MeterImpl
impl meterImpl
}
type instrument struct {
descriptor metric.Descriptor
}
type syncImpl struct {
delegate unsafe.Pointer // (*metric.SyncImpl)
instrument
}
type asyncImpl struct {
delegate unsafe.Pointer // (*metric.AsyncImpl)
instrument
runner metric.AsyncRunner
}
// SyncImpler is implemented by all of the sync metric
// instruments.
type SyncImpler interface {
SyncImpl() metric.SyncImpl
}
// AsyncImpler is implemented by all of the async
// metric instruments.
type AsyncImpler interface {
AsyncImpl() metric.AsyncImpl
}
type syncHandle struct {
delegate unsafe.Pointer // (*metric.HandleImpl)
inst *syncImpl
labels []label.KeyValue
initialize sync.Once
}
var _ metric.MeterProvider = &meterProvider{}
var _ metric.MeterImpl = &meterImpl{}
var _ metric.InstrumentImpl = &syncImpl{}
var _ metric.BoundSyncImpl = &syncHandle{}
var _ metric.AsyncImpl = &asyncImpl{}
func (inst *instrument) Descriptor() metric.Descriptor {
return inst.descriptor
}
// MeterProvider interface and delegation
func newMeterProvider() *meterProvider {
return &meterProvider{
meters: map[meterKey]*meterEntry{},
}
}
func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
p.lock.Lock()
defer p.lock.Unlock()
p.delegate = provider
for key, entry := range p.meters {
entry.impl.setDelegate(key.Name, key.Version, provider)
}
p.meters = nil
}
func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
p.lock.Lock()
defer p.lock.Unlock()
if p.delegate != nil {
return p.delegate.Meter(instrumentationName, opts...)
}
key := meterKey{
Name: instrumentationName,
Version: metric.NewMeterConfig(opts...).InstrumentationVersion,
}
entry, ok := p.meters[key]
if !ok {
entry = &meterEntry{}
entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
p.meters[key] = entry
}
return metric.WrapMeterImpl(entry.unique, key.Name, metric.WithInstrumentationVersion(key.Version))
}
// Meter interface and delegation
func (m *meterImpl) setDelegate(name, version string, provider metric.MeterProvider) {
m.lock.Lock()
defer m.lock.Unlock()
d := new(metric.MeterImpl)
*d = provider.Meter(name, metric.WithInstrumentationVersion(version)).MeterImpl()
m.delegate = unsafe.Pointer(d)
for _, inst := range m.syncInsts {
inst.setDelegate(*d)
}
m.syncInsts = nil
for _, obs := range m.asyncInsts {
obs.setDelegate(*d)
}
m.asyncInsts = nil
}
func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, error) {
m.lock.Lock()
defer m.lock.Unlock()
if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
return (*meterPtr).NewSyncInstrument(desc)
}
inst := &syncImpl{
instrument: instrument{
descriptor: desc,
},
}
m.syncInsts = append(m.syncInsts, inst)
return inst, nil
}
// Synchronous delegation
func (inst *syncImpl) setDelegate(d metric.MeterImpl) {
implPtr := new(metric.SyncImpl)
var err error
*implPtr, err = d.NewSyncInstrument(inst.descriptor)
if err != nil {
// TODO: There is no standard way to deliver this error to the user.
// See https://github.com/open-telemetry/opentelemetry-go/issues/514
// Note that the default SDK will not generate any errors yet, this is
// only for added safety.
panic(err)
}
atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr))
}
func (inst *syncImpl) Implementation() interface{} {
if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
return (*implPtr).Implementation()
}
return inst
}
func (inst *syncImpl) Bind(labels []label.KeyValue) metric.BoundSyncImpl {
if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
return (*implPtr).Bind(labels)
}
return &syncHandle{
inst: inst,
labels: labels,
}
}
func (bound *syncHandle) Unbind() {
bound.initialize.Do(func() {})
implPtr := (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
if implPtr == nil {
return
}
(*implPtr).Unbind()
}
// Async delegation
func (m *meterImpl) NewAsyncInstrument(
desc metric.Descriptor,
runner metric.AsyncRunner,
) (metric.AsyncImpl, error) {
m.lock.Lock()
defer m.lock.Unlock()
if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
return (*meterPtr).NewAsyncInstrument(desc, runner)
}
inst := &asyncImpl{
instrument: instrument{
descriptor: desc,
},
runner: runner,
}
m.asyncInsts = append(m.asyncInsts, inst)
return inst, nil
}
func (obs *asyncImpl) Implementation() interface{} {
if implPtr := (*metric.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil {
return (*implPtr).Implementation()
}
return obs
}
func (obs *asyncImpl) setDelegate(d metric.MeterImpl) {
implPtr := new(metric.AsyncImpl)
var err error
*implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner)
if err != nil {
// TODO: There is no standard way to deliver this error to the user.
// See https://github.com/open-telemetry/opentelemetry-go/issues/514
// Note that the default SDK will not generate any errors yet, this is
// only for added safety.
panic(err)
}
atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr))
}
// Metric updates
func (m *meterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, measurements ...metric.Measurement) {
if delegatePtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil {
(*delegatePtr).RecordBatch(ctx, labels, measurements...)
}
}
func (inst *syncImpl) RecordOne(ctx context.Context, number metric.Number, labels []label.KeyValue) {
if instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil {
(*instPtr).RecordOne(ctx, number, labels)
}
}
// Bound instrument initialization
func (bound *syncHandle) RecordOne(ctx context.Context, number metric.Number) {
instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate))
if instPtr == nil {
return
}
var implPtr *metric.BoundSyncImpl
bound.initialize.Do(func() {
implPtr = new(metric.BoundSyncImpl)
*implPtr = (*instPtr).Bind(bound.labels)
atomic.StorePointer(&bound.delegate, unsafe.Pointer(implPtr))
})
if implPtr == nil {
implPtr = (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
}
// This may still be nil if instrument was created and bound
// without a delegate, then the instrument was set to have a
// delegate and unbound.
if implPtr == nil {
return
}
(*implPtr).RecordOne(ctx, number)
}
func AtomicFieldOffsets() map[string]uintptr {
return map[string]uintptr{
"meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate),
"meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate),
"syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate),
"asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate),
"syncHandle.delegate": unsafe.Offsetof(syncHandle{}.delegate),
}
}

View File

@ -1,134 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/api/trace"
)
type (
tracerProviderHolder struct {
tp trace.TracerProvider
}
meterProviderHolder struct {
mp metric.MeterProvider
}
propagatorsHolder struct {
tm otel.TextMapPropagator
}
)
var (
globalTracer = defaultTracerValue()
globalMeter = defaultMeterValue()
globalPropagators = defaultPropagatorsValue()
delegateMeterOnce sync.Once
delegateTraceOnce sync.Once
)
// TracerProvider is the internal implementation for global.TracerProvider.
func TracerProvider() trace.TracerProvider {
return globalTracer.Load().(tracerProviderHolder).tp
}
// SetTracerProvider is the internal implementation for global.SetTracerProvider.
func SetTracerProvider(tp trace.TracerProvider) {
delegateTraceOnce.Do(func() {
current := TracerProvider()
if current == tp {
// Setting the provider to the prior default is nonsense, panic.
// Panic is acceptable because we are likely still early in the
// process lifetime.
panic("invalid TracerProvider, the global instance cannot be reinstalled")
} else if def, ok := current.(*tracerProvider); ok {
def.setDelegate(tp)
}
})
globalTracer.Store(tracerProviderHolder{tp: tp})
}
// MeterProvider is the internal implementation for global.MeterProvider.
func MeterProvider() metric.MeterProvider {
return globalMeter.Load().(meterProviderHolder).mp
}
// SetMeterProvider is the internal implementation for global.SetMeterProvider.
func SetMeterProvider(mp metric.MeterProvider) {
delegateMeterOnce.Do(func() {
current := MeterProvider()
if current == mp {
// Setting the provider to the prior default is nonsense, panic.
// Panic is acceptable because we are likely still early in the
// process lifetime.
panic("invalid MeterProvider, the global instance cannot be reinstalled")
} else if def, ok := current.(*meterProvider); ok {
def.setDelegate(mp)
}
})
globalMeter.Store(meterProviderHolder{mp: mp})
}
// TextMapPropagator is the internal implementation for global.TextMapPropagator.
func TextMapPropagator() otel.TextMapPropagator {
return globalPropagators.Load().(propagatorsHolder).tm
}
// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
func SetTextMapPropagator(p otel.TextMapPropagator) {
globalPropagators.Store(propagatorsHolder{tm: p})
}
func defaultTracerValue() *atomic.Value {
v := &atomic.Value{}
v.Store(tracerProviderHolder{tp: &tracerProvider{}})
return v
}
func defaultMeterValue() *atomic.Value {
v := &atomic.Value{}
v.Store(meterProviderHolder{mp: newMeterProvider()})
return v
}
func defaultPropagatorsValue() *atomic.Value {
v := &atomic.Value{}
v.Store(propagatorsHolder{tm: getDefaultTextMapPropagator()})
return v
}
// getDefaultTextMapPropagator returns the default TextMapPropagator,
// configured with W3C trace and baggage propagation.
func getDefaultTextMapPropagator() otel.TextMapPropagator {
return otel.NewCompositeTextMapPropagator()
}
// ResetForTest restores the initial global state, for testing purposes.
func ResetForTest() {
globalTracer = defaultTracerValue()
globalMeter = defaultMeterValue()
globalPropagators = defaultPropagatorsValue()
delegateMeterOnce = sync.Once{}
delegateTraceOnce = sync.Once{}
}

View File

@ -1,128 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
/*
This file contains the forwarding implementation of the TracerProvider used as
the default global instance. Prior to initialization of an SDK, Tracers
returned by the global TracerProvider will provide no-op functionality. This
means that all Span created prior to initialization are no-op Spans.
Once an SDK has been initialized, all provided no-op Tracers are swapped for
Tracers provided by the SDK defined TracerProvider. However, any Span started
prior to this initialization does not change its behavior. Meaning, the Span
remains a no-op Span.
The implementation to track and swap Tracers locks all new Tracer creation
until the swap is complete. This assumes that this operation is not
performance-critical. If that assumption is incorrect, be sure to configure an
SDK prior to any Tracer creation.
*/
import (
"context"
"sync"
"go.opentelemetry.io/otel/api/trace"
"go.opentelemetry.io/otel/internal/trace/noop"
)
// tracerProvider is a placeholder for a configured SDK TracerProvider.
//
// All TracerProvider functionality is forwarded to a delegate once
// configured.
type tracerProvider struct {
mtx sync.Mutex
tracers []*tracer
delegate trace.TracerProvider
}
// Compile-time guarantee that tracerProvider implements the TracerProvider
// interface.
var _ trace.TracerProvider = &tracerProvider{}
// setDelegate configures p to delegate all TracerProvider functionality to
// provider.
//
// All Tracers provided prior to this function call are switched out to be
// Tracers provided by provider.
//
// Delegation only happens on the first call to this method. All subsequent
// calls result in no delegation changes.
func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
if p.delegate != nil {
return
}
p.mtx.Lock()
defer p.mtx.Unlock()
p.delegate = provider
for _, t := range p.tracers {
t.setDelegate(provider)
}
p.tracers = nil
}
// Tracer implements TracerProvider.
func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
p.mtx.Lock()
defer p.mtx.Unlock()
if p.delegate != nil {
return p.delegate.Tracer(name)
}
t := &tracer{name: name, opts: opts}
p.tracers = append(p.tracers, t)
return t
}
// tracer is a placeholder for a trace.Tracer.
//
// All Tracer functionality is forwarded to a delegate once configured.
// Otherwise, all functionality is forwarded to a NoopTracer.
type tracer struct {
once sync.Once
name string
opts []trace.TracerOption
delegate trace.Tracer
}
// Compile-time guarantee that tracer implements the trace.Tracer interface.
var _ trace.Tracer = &tracer{}
// setDelegate configures t to delegate all Tracer functionality to Tracers
// created by provider.
//
// All subsequent calls to the Tracer methods will be passed to the delegate.
//
// Delegation only happens on the first call to this method. All subsequent
// calls result in no delegation changes.
func (t *tracer) setDelegate(provider trace.TracerProvider) {
t.once.Do(func() { t.delegate = provider.Tracer(t.name, t.opts...) })
}
// Start implements trace.Tracer by forwarding the call to t.delegate if
// set, otherwise it forwards the call to a NoopTracer.
func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanOption) (context.Context, trace.Span) {
if t.delegate != nil {
return t.delegate.Start(ctx, name, opts...)
}
return noop.Tracer.Start(ctx, name, opts...)
}

View File

@ -1,49 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global
import (
"go.opentelemetry.io/otel/api/global/internal"
"go.opentelemetry.io/otel/api/metric"
)
// Meter creates an implementation of the Meter interface from the global
// MeterProvider. The instrumentationName must be the name of the library
// providing instrumentation. This name may be the same as the instrumented
// code only if that code provides built-in instrumentation. If the
// instrumentationName is empty, then a implementation defined default name
// will be used instead.
//
// This is short for MeterProvider().Meter(name)
func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
return MeterProvider().Meter(instrumentationName, opts...)
}
// MeterProvider returns the registered global meter provider. If
// none is registered then a default meter provider is returned that
// forwards the Meter interface to the first registered Meter.
//
// Use the meter provider to create a named meter. E.g.
// meter := global.MeterProvider().Meter("example.com/foo")
// or
// meter := global.Meter("example.com/foo")
func MeterProvider() metric.MeterProvider {
return internal.MeterProvider()
}
// SetMeterProvider registers `mp` as the global meter provider.
func SetMeterProvider(mp metric.MeterProvider) {
internal.SetMeterProvider(mp)
}

View File

@ -1,31 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global
import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/api/global/internal"
)
// TextMapPropagator returns the global TextMapPropagator. If none has been
// set, a No-Op TextMapPropagator is returned.
func TextMapPropagator() otel.TextMapPropagator {
return internal.TextMapPropagator()
}
// SetTextMapPropagator sets propagator as the global TSetTextMapPropagator.
func SetTextMapPropagator(propagator otel.TextMapPropagator) {
internal.SetTextMapPropagator(propagator)
}

View File

@ -1,44 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global
import (
"go.opentelemetry.io/otel/api/global/internal"
"go.opentelemetry.io/otel/api/trace"
)
// Tracer creates a named tracer that implements Tracer interface.
// If the name is an empty string then provider uses default name.
//
// This is short for TracerProvider().Tracer(name)
func Tracer(name string) trace.Tracer {
return TracerProvider().Tracer(name)
}
// TracerProvider returns the registered global trace provider.
// If none is registered then an instance of NoopTracerProvider is returned.
//
// Use the trace provider to create a named tracer. E.g.
// tracer := global.TracerProvider().Tracer("example.com/foo")
// or
// tracer := global.Tracer("example.com/foo")
func TracerProvider() trace.TracerProvider {
return internal.TracerProvider()
}
// SetTracerProvider registers `tp` as the global trace provider.
func SetTracerProvider(tp trace.TracerProvider) {
internal.SetTracerProvider(tp)
}

View File

@ -1,217 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"go.opentelemetry.io/otel/label"
)
// The file is organized as follows:
//
// - Observation type
// - Three kinds of Observer callback (int64, float64, batch)
// - Three kinds of Observer result (int64, float64, batch)
// - Three kinds of Observe() function (int64, float64, batch)
// - Three kinds of AsyncRunner interface (abstract, single, batch)
// - Two kinds of Observer constructor (int64, float64)
// - Two kinds of Observation() function (int64, float64)
// - Various internals
// Observation is used for reporting an asynchronous batch of metric
// values. Instances of this type should be created by asynchronous
// instruments (e.g., Int64ValueObserver.Observation()).
type Observation struct {
// number needs to be aligned for 64-bit atomic operations.
number Number
instrument AsyncImpl
}
// Int64ObserverFunc is a type of callback that integral
// observers run.
type Int64ObserverFunc func(context.Context, Int64ObserverResult)
// Float64ObserverFunc is a type of callback that floating point
// observers run.
type Float64ObserverFunc func(context.Context, Float64ObserverResult)
// BatchObserverFunc is a callback argument for use with any
// Observer instrument that will be reported as a batch of
// observations.
type BatchObserverFunc func(context.Context, BatchObserverResult)
// Int64ObserverResult is passed to an observer callback to capture
// observations for one asynchronous integer metric instrument.
type Int64ObserverResult struct {
instrument AsyncImpl
function func([]label.KeyValue, ...Observation)
}
// Float64ObserverResult is passed to an observer callback to capture
// observations for one asynchronous floating point metric instrument.
type Float64ObserverResult struct {
instrument AsyncImpl
function func([]label.KeyValue, ...Observation)
}
// BatchObserverResult is passed to a batch observer callback to
// capture observations for multiple asynchronous instruments.
type BatchObserverResult struct {
function func([]label.KeyValue, ...Observation)
}
// Observe captures a single integer value from the associated
// instrument callback, with the given labels.
func (ir Int64ObserverResult) Observe(value int64, labels ...label.KeyValue) {
ir.function(labels, Observation{
instrument: ir.instrument,
number: NewInt64Number(value),
})
}
// Observe captures a single floating point value from the associated
// instrument callback, with the given labels.
func (fr Float64ObserverResult) Observe(value float64, labels ...label.KeyValue) {
fr.function(labels, Observation{
instrument: fr.instrument,
number: NewFloat64Number(value),
})
}
// Observe captures a multiple observations from the associated batch
// instrument callback, with the given labels.
func (br BatchObserverResult) Observe(labels []label.KeyValue, obs ...Observation) {
br.function(labels, obs...)
}
// AsyncRunner is expected to convert into an AsyncSingleRunner or an
// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner
// does not satisfy one of these interfaces.
type AsyncRunner interface {
// AnyRunner() is a non-exported method with no functional use
// other than to make this a non-empty interface.
AnyRunner()
}
// AsyncSingleRunner is an interface implemented by single-observer
// callbacks.
type AsyncSingleRunner interface {
// Run accepts a single instrument and function for capturing
// observations of that instrument. Each call to the function
// receives one captured observation. (The function accepts
// multiple observations so the same implementation can be
// used for batch runners.)
Run(ctx context.Context, single AsyncImpl, capture func([]label.KeyValue, ...Observation))
AsyncRunner
}
// AsyncBatchRunner is an interface implemented by batch-observer
// callbacks.
type AsyncBatchRunner interface {
// Run accepts a function for capturing observations of
// multiple instruments.
Run(ctx context.Context, capture func([]label.KeyValue, ...Observation))
AsyncRunner
}
var _ AsyncSingleRunner = (*Int64ObserverFunc)(nil)
var _ AsyncSingleRunner = (*Float64ObserverFunc)(nil)
var _ AsyncBatchRunner = (*BatchObserverFunc)(nil)
// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments.
func newInt64AsyncRunner(c Int64ObserverFunc) AsyncSingleRunner {
return &c
}
// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments.
func newFloat64AsyncRunner(c Float64ObserverFunc) AsyncSingleRunner {
return &c
}
// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments.
func newBatchAsyncRunner(c BatchObserverFunc) AsyncBatchRunner {
return &c
}
// AnyRunner implements AsyncRunner.
func (*Int64ObserverFunc) AnyRunner() {}
// AnyRunner implements AsyncRunner.
func (*Float64ObserverFunc) AnyRunner() {}
// AnyRunner implements AsyncRunner.
func (*BatchObserverFunc) AnyRunner() {}
// Run implements AsyncSingleRunner.
func (i *Int64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) {
(*i)(ctx, Int64ObserverResult{
instrument: impl,
function: function,
})
}
// Run implements AsyncSingleRunner.
func (f *Float64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) {
(*f)(ctx, Float64ObserverResult{
instrument: impl,
function: function,
})
}
// Run implements AsyncBatchRunner.
func (b *BatchObserverFunc) Run(ctx context.Context, function func([]label.KeyValue, ...Observation)) {
(*b)(ctx, BatchObserverResult{
function: function,
})
}
// wrapInt64ValueObserverInstrument converts an AsyncImpl into Int64ValueObserver.
func wrapInt64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Int64ValueObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Int64ValueObserver{asyncInstrument: common}, err
}
// wrapFloat64ValueObserverInstrument converts an AsyncImpl into Float64ValueObserver.
func wrapFloat64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Float64ValueObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Float64ValueObserver{asyncInstrument: common}, err
}
// wrapInt64SumObserverInstrument converts an AsyncImpl into Int64SumObserver.
func wrapInt64SumObserverInstrument(asyncInst AsyncImpl, err error) (Int64SumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Int64SumObserver{asyncInstrument: common}, err
}
// wrapFloat64SumObserverInstrument converts an AsyncImpl into Float64SumObserver.
func wrapFloat64SumObserverInstrument(asyncInst AsyncImpl, err error) (Float64SumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Float64SumObserver{asyncInstrument: common}, err
}
// wrapInt64UpDownSumObserverInstrument converts an AsyncImpl into Int64UpDownSumObserver.
func wrapInt64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Int64UpDownSumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Int64UpDownSumObserver{asyncInstrument: common}, err
}
// wrapFloat64UpDownSumObserverInstrument converts an AsyncImpl into Float64UpDownSumObserver.
func wrapFloat64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Float64UpDownSumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Float64UpDownSumObserver{asyncInstrument: common}, err
}

View File

@ -1,125 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import "go.opentelemetry.io/otel/unit"
// InstrumentConfig contains options for instrument descriptors.
type InstrumentConfig struct {
// Description describes the instrument in human-readable terms.
Description string
// Unit describes the measurement unit for a instrument.
Unit unit.Unit
// InstrumentationName is the name of the library providing
// instrumentation.
InstrumentationName string
// InstrumentationVersion is the version of the library providing
// instrumentation.
InstrumentationVersion string
}
// InstrumentOption is an interface for applying instrument options.
type InstrumentOption interface {
// ApplyMeter is used to set a InstrumentOption value of a
// InstrumentConfig.
ApplyInstrument(*InstrumentConfig)
}
// NewInstrumentConfig creates a new InstrumentConfig
// and applies all the given options.
func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig {
var config InstrumentConfig
for _, o := range opts {
o.ApplyInstrument(&config)
}
return config
}
// WithDescription applies provided description.
func WithDescription(desc string) InstrumentOption {
return descriptionOption(desc)
}
type descriptionOption string
func (d descriptionOption) ApplyInstrument(config *InstrumentConfig) {
config.Description = string(d)
}
// WithUnit applies provided unit.
func WithUnit(unit unit.Unit) InstrumentOption {
return unitOption(unit)
}
type unitOption unit.Unit
func (u unitOption) ApplyInstrument(config *InstrumentConfig) {
config.Unit = unit.Unit(u)
}
// WithInstrumentationName sets the instrumentation name.
func WithInstrumentationName(name string) InstrumentOption {
return instrumentationNameOption(name)
}
type instrumentationNameOption string
func (i instrumentationNameOption) ApplyInstrument(config *InstrumentConfig) {
config.InstrumentationName = string(i)
}
// MeterConfig contains options for Meters.
type MeterConfig struct {
// InstrumentationVersion is the version of the library providing
// instrumentation.
InstrumentationVersion string
}
// MeterOption is an interface for applying Meter options.
type MeterOption interface {
// ApplyMeter is used to set a MeterOption value of a MeterConfig.
ApplyMeter(*MeterConfig)
}
// NewMeterConfig creates a new MeterConfig and applies
// all the given options.
func NewMeterConfig(opts ...MeterOption) MeterConfig {
var config MeterConfig
for _, o := range opts {
o.ApplyMeter(&config)
}
return config
}
// Option is an interface for applying Instrument or Meter options.
type Option interface {
InstrumentOption
MeterOption
}
// WithInstrumentationVersion sets the instrumentation version.
func WithInstrumentationVersion(version string) Option {
return instrumentationVersionOption(version)
}
type instrumentationVersionOption string
func (i instrumentationVersionOption) ApplyMeter(config *MeterConfig) {
config.InstrumentationVersion = string(i)
}
func (i instrumentationVersionOption) ApplyInstrument(config *InstrumentConfig) {
config.InstrumentationVersion = string(i)
}

View File

@ -1,95 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"go.opentelemetry.io/otel/label"
)
// Float64Counter is a metric that accumulates float64 values.
type Float64Counter struct {
syncInstrument
}
// Int64Counter is a metric that accumulates int64 values.
type Int64Counter struct {
syncInstrument
}
// BoundFloat64Counter is a bound instrument for Float64Counter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundFloat64Counter struct {
syncBoundInstrument
}
// BoundInt64Counter is a boundInstrument for Int64Counter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundInt64Counter struct {
syncBoundInstrument
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Float64Counter) Bind(labels ...label.KeyValue) (h BoundFloat64Counter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Int64Counter) Bind(labels ...label.KeyValue) (h BoundInt64Counter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Float64Counter) Measurement(value float64) Measurement {
return c.float64Measurement(value)
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Int64Counter) Measurement(value int64) Measurement {
return c.int64Measurement(value)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Float64Counter) Add(ctx context.Context, value float64, labels ...label.KeyValue) {
c.directRecord(ctx, NewFloat64Number(value), labels)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Int64Counter) Add(ctx context.Context, value int64, labels ...label.KeyValue) {
c.directRecord(ctx, NewInt64Number(value), labels)
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundFloat64Counter) Add(ctx context.Context, value float64) {
b.directRecord(ctx, NewFloat64Number(value))
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundInt64Counter) Add(ctx context.Context, value int64) {
b.directRecord(ctx, NewInt64Number(value))
}

View File

@ -1,77 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import "go.opentelemetry.io/otel/unit"
// Descriptor contains all the settings that describe an instrument,
// including its name, metric kind, number kind, and the configurable
// options.
type Descriptor struct {
name string
kind Kind
numberKind NumberKind
config InstrumentConfig
}
// NewDescriptor returns a Descriptor with the given contents.
func NewDescriptor(name string, mkind Kind, nkind NumberKind, opts ...InstrumentOption) Descriptor {
return Descriptor{
name: name,
kind: mkind,
numberKind: nkind,
config: NewInstrumentConfig(opts...),
}
}
// Name returns the metric instrument's name.
func (d Descriptor) Name() string {
return d.name
}
// MetricKind returns the specific kind of instrument.
func (d Descriptor) MetricKind() Kind {
return d.kind
}
// Description provides a human-readable description of the metric
// instrument.
func (d Descriptor) Description() string {
return d.config.Description
}
// Unit describes the units of the metric instrument. Unitless
// metrics return the empty string.
func (d Descriptor) Unit() unit.Unit {
return d.config.Unit
}
// NumberKind returns whether this instrument is declared over int64,
// float64, or uint64 values.
func (d Descriptor) NumberKind() NumberKind {
return d.numberKind
}
// InstrumentationName returns the name of the library that provided
// instrumentation for this instrument.
func (d Descriptor) InstrumentationName() string {
return d.config.InstrumentationName
}
// InstrumentationVersion returns the version of the library that provided
// instrumentation for this instrument.
func (d Descriptor) InstrumentationVersion() string {
return d.config.InstrumentationVersion
}

View File

@ -1,50 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metric provides support for reporting measurements using instruments.
//
// Instruments are categorized as below:
//
// Synchronous instruments are called by the user with a Context.
// Asynchronous instruments are called by the SDK during collection.
//
// Additive instruments are semantically intended for capturing a sum.
// Non-additive instruments are intended for capturing a distribution.
//
// Additive instruments may be monotonic, in which case they are
// non-descreasing and naturally define a rate.
//
// The synchronous instrument names are:
//
// Counter: additive, monotonic
// UpDownCounter: additive
// ValueRecorder: non-additive
//
// and the asynchronous instruments are:
//
// SumObserver: additive, monotonic
// UpDownSumObserver: additive
// ValueObserver: non-additive
//
// All instruments are provided with support for either float64 or
// int64 input values.
//
// The Meter interface supports allocating new instruments as well as
// interfaces for recording batches of synchronous measurements or
// asynchronous observations. To obtain a Meter, use a MeterProvider.
//
// The MeterProvider interface supports obtaining a named Meter interface. To
// obtain a MeterProvider implementation, initialize and configure any
// compatible SDK.
package metric // import "go.opentelemetry.io/otel/api/metric"

View File

@ -1,79 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate stringer -type=Kind
package metric
// Kind describes the kind of instrument.
type Kind int8
const (
// ValueRecorderKind indicates a ValueRecorder instrument.
ValueRecorderKind Kind = iota
// ValueObserverKind indicates an ValueObserver instrument.
ValueObserverKind
// CounterKind indicates a Counter instrument.
CounterKind
// UpDownCounterKind indicates a UpDownCounter instrument.
UpDownCounterKind
// SumObserverKind indicates a SumObserver instrument.
SumObserverKind
// UpDownSumObserverKind indicates a UpDownSumObserver instrument.
UpDownSumObserverKind
)
// Synchronous returns whether this is a synchronous kind of instrument.
func (k Kind) Synchronous() bool {
switch k {
case CounterKind, UpDownCounterKind, ValueRecorderKind:
return true
}
return false
}
// Asynchronous returns whether this is an asynchronous kind of instrument.
func (k Kind) Asynchronous() bool {
return !k.Synchronous()
}
// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping).
func (k Kind) Adding() bool {
switch k {
case CounterKind, UpDownCounterKind, SumObserverKind, UpDownSumObserverKind:
return true
}
return false
}
// Adding returns whether this kind of instrument groups its inputs (as opposed to Adding).
func (k Kind) Grouping() bool {
return !k.Adding()
}
// Monotonic returns whether this kind of instrument exposes a non-decreasing sum.
func (k Kind) Monotonic() bool {
switch k {
case CounterKind, SumObserverKind:
return true
}
return false
}
// Cumulative returns whether this kind of instrument receives precomputed sums.
func (k Kind) PrecomputedSum() bool {
return k.Adding() && k.Asynchronous()
}

View File

@ -1,28 +0,0 @@
// Code generated by "stringer -type=Kind"; DO NOT EDIT.
package metric
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ValueRecorderKind-0]
_ = x[ValueObserverKind-1]
_ = x[CounterKind-2]
_ = x[UpDownCounterKind-3]
_ = x[SumObserverKind-4]
_ = x[UpDownSumObserverKind-5]
}
const _Kind_name = "ValueRecorderKindValueObserverKindCounterKindUpDownCounterKindSumObserverKindUpDownSumObserverKind"
var _Kind_index = [...]uint8{0, 17, 34, 45, 62, 77, 98}
func (i Kind) String() string {
if i < 0 || i >= Kind(len(_Kind_index)-1) {
return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
}

View File

@ -1,320 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"go.opentelemetry.io/otel/label"
)
// The file is organized as follows:
//
// - MeterProvider interface
// - Meter struct
// - RecordBatch
// - BatchObserver
// - Synchronous instrument constructors (2 x int64,float64)
// - Asynchronous instrument constructors (1 x int64,float64)
// - Batch asynchronous constructors (1 x int64,float64)
// - Internals
// MeterProvider supports named Meter instances.
type MeterProvider interface {
// Meter creates an implementation of the Meter interface.
// The instrumentationName must be the name of the library providing
// instrumentation. This name may be the same as the instrumented code
// only if that code provides built-in instrumentation. If the
// instrumentationName is empty, then a implementation defined default
// name will be used instead.
Meter(instrumentationName string, opts ...MeterOption) Meter
}
// Meter is the OpenTelemetry metric API, based on a `MeterImpl`
// implementation and the `Meter` library name.
//
// An uninitialized Meter is a no-op implementation.
type Meter struct {
impl MeterImpl
name, version string
}
// RecordBatch atomically records a batch of measurements.
func (m Meter) RecordBatch(ctx context.Context, ls []label.KeyValue, ms ...Measurement) {
if m.impl == nil {
return
}
m.impl.RecordBatch(ctx, ls, ms...)
}
// NewBatchObserver creates a new BatchObserver that supports
// making batches of observations for multiple instruments.
func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver {
return BatchObserver{
meter: m,
runner: newBatchAsyncRunner(callback),
}
}
// NewInt64Counter creates a new integer Counter instrument with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) {
return wrapInt64CounterInstrument(
m.newSync(name, CounterKind, Int64NumberKind, options))
}
// NewFloat64Counter creates a new floating point Counter with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) {
return wrapFloat64CounterInstrument(
m.newSync(name, CounterKind, Float64NumberKind, options))
}
// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) {
return wrapInt64UpDownCounterInstrument(
m.newSync(name, UpDownCounterKind, Int64NumberKind, options))
}
// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) {
return wrapFloat64UpDownCounterInstrument(
m.newSync(name, UpDownCounterKind, Float64NumberKind, options))
}
// NewInt64ValueRecorder creates a new integer ValueRecorder instrument with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewInt64ValueRecorder(name string, opts ...InstrumentOption) (Int64ValueRecorder, error) {
return wrapInt64ValueRecorderInstrument(
m.newSync(name, ValueRecorderKind, Int64NumberKind, opts))
}
// NewFloat64ValueRecorder creates a new floating point ValueRecorder with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewFloat64ValueRecorder(name string, opts ...InstrumentOption) (Float64ValueRecorder, error) {
return wrapFloat64ValueRecorderInstrument(
m.newSync(name, ValueRecorderKind, Float64NumberKind, opts))
}
// NewInt64ValueObserver creates a new integer ValueObserver instrument
// with the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewInt64ValueObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64ValueObserver, error) {
if callback == nil {
return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64ValueObserverInstrument(
m.newAsync(name, ValueObserverKind, Int64NumberKind, opts,
newInt64AsyncRunner(callback)))
}
// NewFloat64ValueObserver creates a new floating point ValueObserver with
// the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64ValueObserver, error) {
if callback == nil {
return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64ValueObserverInstrument(
m.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
newFloat64AsyncRunner(callback)))
}
// NewInt64SumObserver creates a new integer SumObserver instrument
// with the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewInt64SumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64SumObserver, error) {
if callback == nil {
return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64SumObserverInstrument(
m.newAsync(name, SumObserverKind, Int64NumberKind, opts,
newInt64AsyncRunner(callback)))
}
// NewFloat64SumObserver creates a new floating point SumObserver with
// the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewFloat64SumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64SumObserver, error) {
if callback == nil {
return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64SumObserverInstrument(
m.newAsync(name, SumObserverKind, Float64NumberKind, opts,
newFloat64AsyncRunner(callback)))
}
// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
// with the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
if callback == nil {
return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64UpDownSumObserverInstrument(
m.newAsync(name, UpDownSumObserverKind, Int64NumberKind, opts,
newInt64AsyncRunner(callback)))
}
// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
// the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
if callback == nil {
return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64UpDownSumObserverInstrument(
m.newAsync(name, UpDownSumObserverKind, Float64NumberKind, opts,
newFloat64AsyncRunner(callback)))
}
// NewInt64ValueObserver creates a new integer ValueObserver instrument
// with the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64ValueObserver(name string, opts ...InstrumentOption) (Int64ValueObserver, error) {
if b.runner == nil {
return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64ValueObserverInstrument(
b.meter.newAsync(name, ValueObserverKind, Int64NumberKind, opts, b.runner))
}
// NewFloat64ValueObserver creates a new floating point ValueObserver with
// the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewFloat64ValueObserver(name string, opts ...InstrumentOption) (Float64ValueObserver, error) {
if b.runner == nil {
return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64ValueObserverInstrument(
b.meter.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
b.runner))
}
// NewInt64SumObserver creates a new integer SumObserver instrument
// with the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) {
if b.runner == nil {
return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64SumObserverInstrument(
b.meter.newAsync(name, SumObserverKind, Int64NumberKind, opts, b.runner))
}
// NewFloat64SumObserver creates a new floating point SumObserver with
// the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewFloat64SumObserver(name string, opts ...InstrumentOption) (Float64SumObserver, error) {
if b.runner == nil {
return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64SumObserverInstrument(
b.meter.newAsync(name, SumObserverKind, Float64NumberKind, opts,
b.runner))
}
// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
// with the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewInt64UpDownSumObserver(name string, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
if b.runner == nil {
return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64UpDownSumObserverInstrument(
b.meter.newAsync(name, UpDownSumObserverKind, Int64NumberKind, opts, b.runner))
}
// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
// the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) NewFloat64UpDownSumObserver(name string, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
if b.runner == nil {
return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64UpDownSumObserverInstrument(
b.meter.newAsync(name, UpDownSumObserverKind, Float64NumberKind, opts,
b.runner))
}
// MeterImpl returns the underlying MeterImpl of this Meter.
func (m Meter) MeterImpl() MeterImpl {
return m.impl
}
// newAsync constructs one new asynchronous instrument.
func (m Meter) newAsync(
name string,
mkind Kind,
nkind NumberKind,
opts []InstrumentOption,
runner AsyncRunner,
) (
AsyncImpl,
error,
) {
if m.impl == nil {
return NoopAsync{}, nil
}
desc := NewDescriptor(name, mkind, nkind, opts...)
desc.config.InstrumentationName = m.name
desc.config.InstrumentationVersion = m.version
return m.impl.NewAsyncInstrument(desc, runner)
}
// newSync constructs one new synchronous instrument.
func (m Meter) newSync(
name string,
metricKind Kind,
numberKind NumberKind,
opts []InstrumentOption,
) (
SyncImpl,
error,
) {
if m.impl == nil {
return NoopSync{}, nil
}
desc := NewDescriptor(name, metricKind, numberKind, opts...)
desc.config.InstrumentationName = m.name
desc.config.InstrumentationVersion = m.version
return m.impl.NewSyncInstrument(desc)
}

View File

@ -1,222 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
// MeterMust is a wrapper for Meter interfaces that panics when any
// instrument constructor encounters an error.
type MeterMust struct {
meter Meter
}
// BatchObserverMust is a wrapper for BatchObserver that panics when
// any instrument constructor encounters an error.
type BatchObserverMust struct {
batch BatchObserver
}
// Must constructs a MeterMust implementation from a Meter, allowing
// the application to panic when any instrument constructor yields an
// error.
func Must(meter Meter) MeterMust {
return MeterMust{meter: meter}
}
// NewInt64Counter calls `Meter.NewInt64Counter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter {
if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter {
if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter {
if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter {
if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64ValueRecorder calls `Meter.NewInt64ValueRecorder` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64ValueRecorder(name string, mos ...InstrumentOption) Int64ValueRecorder {
if inst, err := mm.meter.NewInt64ValueRecorder(name, mos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64ValueRecorder calls `Meter.NewFloat64ValueRecorder` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64ValueRecorder(name string, mos ...InstrumentOption) Float64ValueRecorder {
if inst, err := mm.meter.NewFloat64ValueRecorder(name, mos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64ValueObserver calls `Meter.NewInt64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64ValueObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64ValueObserver {
if inst, err := mm.meter.NewInt64ValueObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64ValueObserver calls `Meter.NewFloat64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64ValueObserver {
if inst, err := mm.meter.NewFloat64ValueObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64SumObserver calls `Meter.NewInt64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64SumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64SumObserver {
if inst, err := mm.meter.NewInt64SumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64SumObserver calls `Meter.NewFloat64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64SumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64SumObserver {
if inst, err := mm.meter.NewFloat64SumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64UpDownSumObserver calls `Meter.NewInt64UpDownSumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownSumObserver {
if inst, err := mm.meter.NewInt64UpDownSumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64UpDownSumObserver calls `Meter.NewFloat64UpDownSumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownSumObserver {
if inst, err := mm.meter.NewFloat64UpDownSumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewBatchObserver returns a wrapper around BatchObserver that panics
// when any instrument constructor returns an error.
func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust {
return BatchObserverMust{
batch: mm.meter.NewBatchObserver(callback),
}
}
// NewInt64ValueObserver calls `BatchObserver.NewInt64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewInt64ValueObserver(name string, oos ...InstrumentOption) Int64ValueObserver {
if inst, err := bm.batch.NewInt64ValueObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64ValueObserver calls `BatchObserver.NewFloat64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewFloat64ValueObserver(name string, oos ...InstrumentOption) Float64ValueObserver {
if inst, err := bm.batch.NewFloat64ValueObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64SumObserver calls `BatchObserver.NewInt64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewInt64SumObserver(name string, oos ...InstrumentOption) Int64SumObserver {
if inst, err := bm.batch.NewInt64SumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64SumObserver calls `BatchObserver.NewFloat64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewFloat64SumObserver(name string, oos ...InstrumentOption) Float64SumObserver {
if inst, err := bm.batch.NewFloat64SumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewInt64UpDownSumObserver calls `BatchObserver.NewInt64UpDownSumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewInt64UpDownSumObserver(name string, oos ...InstrumentOption) Int64UpDownSumObserver {
if inst, err := bm.batch.NewInt64UpDownSumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64UpDownSumObserver calls `BatchObserver.NewFloat64UpDownSumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) NewFloat64UpDownSumObserver(name string, oos ...InstrumentOption) Float64UpDownSumObserver {
if inst, err := bm.batch.NewFloat64UpDownSumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}

View File

@ -1,58 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"go.opentelemetry.io/otel/label"
)
type NoopMeterProvider struct{}
type noopInstrument struct{}
type noopBoundInstrument struct{}
type NoopSync struct{ noopInstrument }
type NoopAsync struct{ noopInstrument }
var _ MeterProvider = NoopMeterProvider{}
var _ SyncImpl = NoopSync{}
var _ BoundSyncImpl = noopBoundInstrument{}
var _ AsyncImpl = NoopAsync{}
func (NoopMeterProvider) Meter(_ string, _ ...MeterOption) Meter {
return Meter{}
}
func (noopInstrument) Implementation() interface{} {
return nil
}
func (noopInstrument) Descriptor() Descriptor {
return Descriptor{}
}
func (noopBoundInstrument) RecordOne(context.Context, Number) {
}
func (noopBoundInstrument) Unbind() {
}
func (NoopSync) Bind([]label.KeyValue) BoundSyncImpl {
return noopBoundInstrument{}
}
func (NoopSync) RecordOne(context.Context, Number, []label.KeyValue) {
}

View File

@ -1,540 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
//go:generate stringer -type=NumberKind
import (
"fmt"
"math"
"sync/atomic"
"go.opentelemetry.io/otel/internal"
)
// NumberKind describes the data type of the Number.
type NumberKind int8
const (
// Int64NumberKind means that the Number stores int64.
Int64NumberKind NumberKind = iota
// Float64NumberKind means that the Number stores float64.
Float64NumberKind
)
// Zero returns a zero value for a given NumberKind
func (k NumberKind) Zero() Number {
switch k {
case Int64NumberKind:
return NewInt64Number(0)
case Float64NumberKind:
return NewFloat64Number(0.)
default:
return Number(0)
}
}
// Minimum returns the minimum representable value
// for a given NumberKind
func (k NumberKind) Minimum() Number {
switch k {
case Int64NumberKind:
return NewInt64Number(math.MinInt64)
case Float64NumberKind:
return NewFloat64Number(-1. * math.MaxFloat64)
default:
return Number(0)
}
}
// Maximum returns the maximum representable value
// for a given NumberKind
func (k NumberKind) Maximum() Number {
switch k {
case Int64NumberKind:
return NewInt64Number(math.MaxInt64)
case Float64NumberKind:
return NewFloat64Number(math.MaxFloat64)
default:
return Number(0)
}
}
// Number represents either an integral or a floating point value. It
// needs to be accompanied with a source of NumberKind that describes
// the actual type of the value stored within Number.
type Number uint64
// - constructors
// NewNumberFromRaw creates a new Number from a raw value.
func NewNumberFromRaw(r uint64) Number {
return Number(r)
}
// NewInt64Number creates an integral Number.
func NewInt64Number(i int64) Number {
return NewNumberFromRaw(internal.Int64ToRaw(i))
}
// NewFloat64Number creates a floating point Number.
func NewFloat64Number(f float64) Number {
return NewNumberFromRaw(internal.Float64ToRaw(f))
}
// NewNumberSignChange returns a number with the same magnitude and
// the opposite sign. `kind` must describe the kind of number in `nn`.
//
// Does not change Uint64NumberKind values.
func NewNumberSignChange(kind NumberKind, nn Number) Number {
switch kind {
case Int64NumberKind:
return NewInt64Number(-nn.AsInt64())
case Float64NumberKind:
return NewFloat64Number(-nn.AsFloat64())
}
return nn
}
// - as x
// AsNumber gets the Number.
func (n *Number) AsNumber() Number {
return *n
}
// AsRaw gets the uninterpreted raw value. Might be useful for some
// atomic operations.
func (n *Number) AsRaw() uint64 {
return uint64(*n)
}
// AsInt64 assumes that the value contains an int64 and returns it as
// such.
func (n *Number) AsInt64() int64 {
return internal.RawToInt64(n.AsRaw())
}
// AsFloat64 assumes that the measurement value contains a float64 and
// returns it as such.
func (n *Number) AsFloat64() float64 {
return internal.RawToFloat64(n.AsRaw())
}
// - as x atomic
// AsNumberAtomic gets the Number atomically.
func (n *Number) AsNumberAtomic() Number {
return NewNumberFromRaw(n.AsRawAtomic())
}
// AsRawAtomic gets the uninterpreted raw value atomically. Might be
// useful for some atomic operations.
func (n *Number) AsRawAtomic() uint64 {
return atomic.LoadUint64(n.AsRawPtr())
}
// AsInt64Atomic assumes that the number contains an int64 and returns
// it as such atomically.
func (n *Number) AsInt64Atomic() int64 {
return atomic.LoadInt64(n.AsInt64Ptr())
}
// AsFloat64Atomic assumes that the measurement value contains a
// float64 and returns it as such atomically.
func (n *Number) AsFloat64Atomic() float64 {
return internal.RawToFloat64(n.AsRawAtomic())
}
// - as x ptr
// AsRawPtr gets the pointer to the raw, uninterpreted raw
// value. Might be useful for some atomic operations.
func (n *Number) AsRawPtr() *uint64 {
return (*uint64)(n)
}
// AsInt64Ptr assumes that the number contains an int64 and returns a
// pointer to it.
func (n *Number) AsInt64Ptr() *int64 {
return internal.RawPtrToInt64Ptr(n.AsRawPtr())
}
// AsFloat64Ptr assumes that the number contains a float64 and returns a
// pointer to it.
func (n *Number) AsFloat64Ptr() *float64 {
return internal.RawPtrToFloat64Ptr(n.AsRawPtr())
}
// - coerce
// CoerceToInt64 casts the number to int64. May result in
// data/precision loss.
func (n *Number) CoerceToInt64(kind NumberKind) int64 {
switch kind {
case Int64NumberKind:
return n.AsInt64()
case Float64NumberKind:
return int64(n.AsFloat64())
default:
// you get what you deserve
return 0
}
}
// CoerceToFloat64 casts the number to float64. May result in
// data/precision loss.
func (n *Number) CoerceToFloat64(kind NumberKind) float64 {
switch kind {
case Int64NumberKind:
return float64(n.AsInt64())
case Float64NumberKind:
return n.AsFloat64()
default:
// you get what you deserve
return 0
}
}
// - set
// SetNumber sets the number to the passed number. Both should be of
// the same kind.
func (n *Number) SetNumber(nn Number) {
*n.AsRawPtr() = nn.AsRaw()
}
// SetRaw sets the number to the passed raw value. Both number and the
// raw number should represent the same kind.
func (n *Number) SetRaw(r uint64) {
*n.AsRawPtr() = r
}
// SetInt64 assumes that the number contains an int64 and sets it to
// the passed value.
func (n *Number) SetInt64(i int64) {
*n.AsInt64Ptr() = i
}
// SetFloat64 assumes that the number contains a float64 and sets it
// to the passed value.
func (n *Number) SetFloat64(f float64) {
*n.AsFloat64Ptr() = f
}
// - set atomic
// SetNumberAtomic sets the number to the passed number
// atomically. Both should be of the same kind.
func (n *Number) SetNumberAtomic(nn Number) {
atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw())
}
// SetRawAtomic sets the number to the passed raw value
// atomically. Both number and the raw number should represent the
// same kind.
func (n *Number) SetRawAtomic(r uint64) {
atomic.StoreUint64(n.AsRawPtr(), r)
}
// SetInt64Atomic assumes that the number contains an int64 and sets
// it to the passed value atomically.
func (n *Number) SetInt64Atomic(i int64) {
atomic.StoreInt64(n.AsInt64Ptr(), i)
}
// SetFloat64Atomic assumes that the number contains a float64 and
// sets it to the passed value atomically.
func (n *Number) SetFloat64Atomic(f float64) {
atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f))
}
// - swap
// SwapNumber sets the number to the passed number and returns the old
// number. Both this number and the passed number should be of the
// same kind.
func (n *Number) SwapNumber(nn Number) Number {
old := *n
n.SetNumber(nn)
return old
}
// SwapRaw sets the number to the passed raw value and returns the old
// raw value. Both number and the raw number should represent the same
// kind.
func (n *Number) SwapRaw(r uint64) uint64 {
old := n.AsRaw()
n.SetRaw(r)
return old
}
// SwapInt64 assumes that the number contains an int64, sets it to the
// passed value and returns the old int64 value.
func (n *Number) SwapInt64(i int64) int64 {
old := n.AsInt64()
n.SetInt64(i)
return old
}
// SwapFloat64 assumes that the number contains an float64, sets it to
// the passed value and returns the old float64 value.
func (n *Number) SwapFloat64(f float64) float64 {
old := n.AsFloat64()
n.SetFloat64(f)
return old
}
// - swap atomic
// SwapNumberAtomic sets the number to the passed number and returns
// the old number atomically. Both this number and the passed number
// should be of the same kind.
func (n *Number) SwapNumberAtomic(nn Number) Number {
return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw()))
}
// SwapRawAtomic sets the number to the passed raw value and returns
// the old raw value atomically. Both number and the raw number should
// represent the same kind.
func (n *Number) SwapRawAtomic(r uint64) uint64 {
return atomic.SwapUint64(n.AsRawPtr(), r)
}
// SwapInt64Atomic assumes that the number contains an int64, sets it
// to the passed value and returns the old int64 value atomically.
func (n *Number) SwapInt64Atomic(i int64) int64 {
return atomic.SwapInt64(n.AsInt64Ptr(), i)
}
// SwapFloat64Atomic assumes that the number contains an float64, sets
// it to the passed value and returns the old float64 value
// atomically.
func (n *Number) SwapFloat64Atomic(f float64) float64 {
return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f)))
}
// - add
// AddNumber assumes that this and the passed number are of the passed
// kind and adds the passed number to this number.
func (n *Number) AddNumber(kind NumberKind, nn Number) {
switch kind {
case Int64NumberKind:
n.AddInt64(nn.AsInt64())
case Float64NumberKind:
n.AddFloat64(nn.AsFloat64())
}
}
// AddRaw assumes that this number and the passed raw value are of the
// passed kind and adds the passed raw value to this number.
func (n *Number) AddRaw(kind NumberKind, r uint64) {
n.AddNumber(kind, NewNumberFromRaw(r))
}
// AddInt64 assumes that the number contains an int64 and adds the
// passed int64 to it.
func (n *Number) AddInt64(i int64) {
*n.AsInt64Ptr() += i
}
// AddFloat64 assumes that the number contains a float64 and adds the
// passed float64 to it.
func (n *Number) AddFloat64(f float64) {
*n.AsFloat64Ptr() += f
}
// - add atomic
// AddNumberAtomic assumes that this and the passed number are of the
// passed kind and adds the passed number to this number atomically.
func (n *Number) AddNumberAtomic(kind NumberKind, nn Number) {
switch kind {
case Int64NumberKind:
n.AddInt64Atomic(nn.AsInt64())
case Float64NumberKind:
n.AddFloat64Atomic(nn.AsFloat64())
}
}
// AddRawAtomic assumes that this number and the passed raw value are
// of the passed kind and adds the passed raw value to this number
// atomically.
func (n *Number) AddRawAtomic(kind NumberKind, r uint64) {
n.AddNumberAtomic(kind, NewNumberFromRaw(r))
}
// AddInt64Atomic assumes that the number contains an int64 and adds
// the passed int64 to it atomically.
func (n *Number) AddInt64Atomic(i int64) {
atomic.AddInt64(n.AsInt64Ptr(), i)
}
// AddFloat64Atomic assumes that the number contains a float64 and
// adds the passed float64 to it atomically.
func (n *Number) AddFloat64Atomic(f float64) {
for {
o := n.AsFloat64Atomic()
if n.CompareAndSwapFloat64(o, o+f) {
break
}
}
}
// - compare and swap (atomic only)
// CompareAndSwapNumber does the atomic CAS operation on this
// number. This number and passed old and new numbers should be of the
// same kind.
func (n *Number) CompareAndSwapNumber(on, nn Number) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw())
}
// CompareAndSwapRaw does the atomic CAS operation on this
// number. This number and passed old and new raw values should be of
// the same kind.
func (n *Number) CompareAndSwapRaw(or, nr uint64) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr)
}
// CompareAndSwapInt64 assumes that this number contains an int64 and
// does the atomic CAS operation on it.
func (n *Number) CompareAndSwapInt64(oi, ni int64) bool {
return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni)
}
// CompareAndSwapFloat64 assumes that this number contains a float64 and
// does the atomic CAS operation on it.
func (n *Number) CompareAndSwapFloat64(of, nf float64) bool {
return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf))
}
// - compare
// CompareNumber compares two Numbers given their kind. Both numbers
// should have the same kind. This returns:
// 0 if the numbers are equal
// -1 if the subject `n` is less than the argument `nn`
// +1 if the subject `n` is greater than the argument `nn`
func (n *Number) CompareNumber(kind NumberKind, nn Number) int {
switch kind {
case Int64NumberKind:
return n.CompareInt64(nn.AsInt64())
case Float64NumberKind:
return n.CompareFloat64(nn.AsFloat64())
default:
// you get what you deserve
return 0
}
}
// CompareRaw compares two numbers, where one is input as a raw
// uint64, interpreting both values as a `kind` of number.
func (n *Number) CompareRaw(kind NumberKind, r uint64) int {
return n.CompareNumber(kind, NewNumberFromRaw(r))
}
// CompareInt64 assumes that the Number contains an int64 and performs
// a comparison between the value and the other value. It returns the
// typical result of the compare function: -1 if the value is less
// than the other, 0 if both are equal, 1 if the value is greater than
// the other.
func (n *Number) CompareInt64(i int64) int {
this := n.AsInt64()
if this < i {
return -1
} else if this > i {
return 1
}
return 0
}
// CompareFloat64 assumes that the Number contains a float64 and
// performs a comparison between the value and the other value. It
// returns the typical result of the compare function: -1 if the value
// is less than the other, 0 if both are equal, 1 if the value is
// greater than the other.
//
// Do not compare NaN values.
func (n *Number) CompareFloat64(f float64) int {
this := n.AsFloat64()
if this < f {
return -1
} else if this > f {
return 1
}
return 0
}
// - relations to zero
// IsPositive returns true if the actual value is greater than zero.
func (n *Number) IsPositive(kind NumberKind) bool {
return n.compareWithZero(kind) > 0
}
// IsNegative returns true if the actual value is less than zero.
func (n *Number) IsNegative(kind NumberKind) bool {
return n.compareWithZero(kind) < 0
}
// IsZero returns true if the actual value is equal to zero.
func (n *Number) IsZero(kind NumberKind) bool {
return n.compareWithZero(kind) == 0
}
// - misc
// Emit returns a string representation of the raw value of the
// Number. A %d is used for integral values, %f for floating point
// values.
func (n *Number) Emit(kind NumberKind) string {
switch kind {
case Int64NumberKind:
return fmt.Sprintf("%d", n.AsInt64())
case Float64NumberKind:
return fmt.Sprintf("%f", n.AsFloat64())
default:
return ""
}
}
// AsInterface returns the number as an interface{}, typically used
// for NumberKind-correct JSON conversion.
func (n *Number) AsInterface(kind NumberKind) interface{} {
switch kind {
case Int64NumberKind:
return n.AsInt64()
case Float64NumberKind:
return n.AsFloat64()
default:
return math.NaN()
}
}
// - private stuff
func (n *Number) compareWithZero(kind NumberKind) int {
switch kind {
case Int64NumberKind:
return n.CompareInt64(0)
case Float64NumberKind:
return n.CompareFloat64(0.)
default:
// you get what you deserve
return 0
}
}

View File

@ -1,24 +0,0 @@
// Code generated by "stringer -type=NumberKind"; DO NOT EDIT.
package metric
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Int64NumberKind-0]
_ = x[Float64NumberKind-1]
}
const _NumberKind_name = "Int64NumberKindFloat64NumberKind"
var _NumberKind_index = [...]uint8{0, 15, 32}
func (i NumberKind) String() string {
if i < 0 || i >= NumberKind(len(_NumberKind_index)-1) {
return "NumberKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _NumberKind_name[_NumberKind_index[i]:_NumberKind_index[i+1]]
}

View File

@ -1,124 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
// BatchObserver represents an Observer callback that can report
// observations for multiple instruments.
type BatchObserver struct {
meter Meter
runner AsyncBatchRunner
}
// Int64ValueObserver is a metric that captures a set of int64 values at a
// point in time.
type Int64ValueObserver struct {
asyncInstrument
}
// Float64ValueObserver is a metric that captures a set of float64 values
// at a point in time.
type Float64ValueObserver struct {
asyncInstrument
}
// Int64SumObserver is a metric that captures a precomputed sum of
// int64 values at a point in time.
type Int64SumObserver struct {
asyncInstrument
}
// Float64SumObserver is a metric that captures a precomputed sum of
// float64 values at a point in time.
type Float64SumObserver struct {
asyncInstrument
}
// Int64UpDownSumObserver is a metric that captures a precomputed sum of
// int64 values at a point in time.
type Int64UpDownSumObserver struct {
asyncInstrument
}
// Float64UpDownSumObserver is a metric that captures a precomputed sum of
// float64 values at a point in time.
type Float64UpDownSumObserver struct {
asyncInstrument
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64ValueObserver) Observation(v int64) Observation {
return Observation{
number: NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64ValueObserver) Observation(v float64) Observation {
return Observation{
number: NewFloat64Number(v),
instrument: f.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64SumObserver) Observation(v int64) Observation {
return Observation{
number: NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64SumObserver) Observation(v float64) Observation {
return Observation{
number: NewFloat64Number(v),
instrument: f.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64UpDownSumObserver) Observation(v int64) Observation {
return Observation{
number: NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverFunc
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64UpDownSumObserver) Observation(v float64) Observation {
return Observation{
number: NewFloat64Number(v),
instrument: f.instrument,
}
}

View File

@ -1,170 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry // import "go.opentelemetry.io/otel/api/metric/registry"
import (
"context"
"fmt"
"sync"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/label"
)
// MeterProvider is a standard MeterProvider for wrapping `MeterImpl`
type MeterProvider struct {
impl metric.MeterImpl
}
var _ metric.MeterProvider = (*MeterProvider)(nil)
// uniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
// uniqueness checking for instrument descriptors. Use NewUniqueInstrumentMeter
// to wrap an implementation with uniqueness checking.
type uniqueInstrumentMeterImpl struct {
lock sync.Mutex
impl metric.MeterImpl
state map[key]metric.InstrumentImpl
}
var _ metric.MeterImpl = (*uniqueInstrumentMeterImpl)(nil)
type key struct {
instrumentName string
instrumentationName string
InstrumentationVersion string
}
// NewMeterProvider returns a new provider that implements instrument
// name-uniqueness checking.
func NewMeterProvider(impl metric.MeterImpl) *MeterProvider {
return &MeterProvider{
impl: NewUniqueInstrumentMeterImpl(impl),
}
}
// Meter implements MeterProvider.
func (p *MeterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
return metric.WrapMeterImpl(p.impl, instrumentationName, opts...)
}
// ErrMetricKindMismatch is the standard error for mismatched metric
// instrument definitions.
var ErrMetricKindMismatch = fmt.Errorf(
"A metric was already registered by this name with another kind or number type")
// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl with
// the addition of uniqueness checking.
func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) metric.MeterImpl {
return &uniqueInstrumentMeterImpl{
impl: impl,
state: map[key]metric.InstrumentImpl{},
}
}
// RecordBatch implements metric.MeterImpl.
func (u *uniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, ms ...metric.Measurement) {
u.impl.RecordBatch(ctx, labels, ms...)
}
func keyOf(descriptor metric.Descriptor) key {
return key{
descriptor.Name(),
descriptor.InstrumentationName(),
descriptor.InstrumentationVersion(),
}
}
// NewMetricKindMismatchError formats an error that describes a
// mismatched metric instrument definition.
func NewMetricKindMismatchError(desc metric.Descriptor) error {
return fmt.Errorf("Metric was %s (%s %s)registered as a %s %s: %w",
desc.Name(),
desc.InstrumentationName(),
desc.InstrumentationVersion(),
desc.NumberKind(),
desc.MetricKind(),
ErrMetricKindMismatch)
}
// Compatible determines whether two metric.Descriptors are considered
// the same for the purpose of uniqueness checking.
func Compatible(candidate, existing metric.Descriptor) bool {
return candidate.MetricKind() == existing.MetricKind() &&
candidate.NumberKind() == existing.NumberKind()
}
// checkUniqueness returns an ErrMetricKindMismatch error if there is
// a conflict between a descriptor that was already registered and the
// `descriptor` argument. If there is an existing compatible
// registration, this returns the already-registered instrument. If
// there is no conflict and no prior registration, returns (nil, nil).
func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) {
impl, ok := u.state[keyOf(descriptor)]
if !ok {
return nil, nil
}
if !Compatible(descriptor, impl.Descriptor()) {
return nil, NewMetricKindMismatchError(impl.Descriptor())
}
return impl, nil
}
// NewSyncInstrument implements metric.MeterImpl.
func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
u.lock.Lock()
defer u.lock.Unlock()
impl, err := u.checkUniqueness(descriptor)
if err != nil {
return nil, err
} else if impl != nil {
return impl.(metric.SyncImpl), nil
}
syncInst, err := u.impl.NewSyncInstrument(descriptor)
if err != nil {
return nil, err
}
u.state[keyOf(descriptor)] = syncInst
return syncInst, nil
}
// NewAsyncInstrument implements metric.MeterImpl.
func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument(
descriptor metric.Descriptor,
runner metric.AsyncRunner,
) (metric.AsyncImpl, error) {
u.lock.Lock()
defer u.lock.Unlock()
impl, err := u.checkUniqueness(descriptor)
if err != nil {
return nil, err
} else if impl != nil {
return impl.(metric.AsyncImpl), nil
}
asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner)
if err != nil {
return nil, err
}
u.state[keyOf(descriptor)] = asyncInst
return asyncInst, nil
}

View File

@ -1,94 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"go.opentelemetry.io/otel/label"
)
// MeterImpl is the interface an SDK must implement to supply a Meter
// implementation.
type MeterImpl interface {
// RecordBatch atomically records a batch of measurements.
RecordBatch(ctx context.Context, labels []label.KeyValue, measurement ...Measurement)
// NewSyncInstrument returns a newly constructed
// synchronous instrument implementation or an error, should
// one occur.
NewSyncInstrument(descriptor Descriptor) (SyncImpl, error)
// NewAsyncInstrument returns a newly constructed
// asynchronous instrument implementation or an error, should
// one occur.
NewAsyncInstrument(
descriptor Descriptor,
runner AsyncRunner,
) (AsyncImpl, error)
}
// InstrumentImpl is a common interface for synchronous and
// asynchronous instruments.
type InstrumentImpl interface {
// Implementation returns the underlying implementation of the
// instrument, which allows the implementation to gain access
// to its own representation especially from a `Measurement`.
Implementation() interface{}
// Descriptor returns a copy of the instrument's Descriptor.
Descriptor() Descriptor
}
// SyncImpl is the implementation-level interface to a generic
// synchronous instrument (e.g., ValueRecorder and Counter instruments).
type SyncImpl interface {
InstrumentImpl
// Bind creates an implementation-level bound instrument,
// binding a label set with this instrument implementation.
Bind(labels []label.KeyValue) BoundSyncImpl
// RecordOne captures a single synchronous metric event.
RecordOne(ctx context.Context, number Number, labels []label.KeyValue)
}
// BoundSyncImpl is the implementation-level interface to a
// generic bound synchronous instrument
type BoundSyncImpl interface {
// RecordOne captures a single synchronous metric event.
RecordOne(ctx context.Context, number Number)
// Unbind frees the resources associated with this bound instrument. It
// does not affect the metric this bound instrument was created through.
Unbind()
}
// AsyncImpl is an implementation-level interface to an
// asynchronous instrument (e.g., Observer instruments).
type AsyncImpl interface {
InstrumentImpl
}
// WrapMeterImpl constructs a `Meter` implementation from a
// `MeterImpl` implementation.
func WrapMeterImpl(impl MeterImpl, instrumentationName string, opts ...MeterOption) Meter {
return Meter{
impl: impl,
name: instrumentationName,
version: NewMeterConfig(opts...).InstrumentationVersion,
}
}

View File

@ -1,192 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"errors"
"go.opentelemetry.io/otel/label"
)
// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil.
var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation")
// Measurement is used for reporting a synchronous batch of metric
// values. Instances of this type should be created by synchronous
// instruments (e.g., Int64Counter.Measurement()).
type Measurement struct {
// number needs to be aligned for 64-bit atomic operations.
number Number
instrument SyncImpl
}
// syncInstrument contains a SyncImpl.
type syncInstrument struct {
instrument SyncImpl
}
// syncBoundInstrument contains a BoundSyncImpl.
type syncBoundInstrument struct {
boundInstrument BoundSyncImpl
}
// asyncInstrument contains a AsyncImpl.
type asyncInstrument struct {
instrument AsyncImpl
}
// SyncImpl returns the instrument that created this measurement.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (m Measurement) SyncImpl() SyncImpl {
return m.instrument
}
// Number returns a number recorded in this measurement.
func (m Measurement) Number() Number {
return m.number
}
// AsyncImpl returns the instrument that created this observation.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (m Observation) AsyncImpl() AsyncImpl {
return m.instrument
}
// Number returns a number recorded in this observation.
func (m Observation) Number() Number {
return m.number
}
// AsyncImpl implements AsyncImpl.
func (a asyncInstrument) AsyncImpl() AsyncImpl {
return a.instrument
}
// SyncImpl returns the implementation object for synchronous instruments.
func (s syncInstrument) SyncImpl() SyncImpl {
return s.instrument
}
func (s syncInstrument) bind(labels []label.KeyValue) syncBoundInstrument {
return newSyncBoundInstrument(s.instrument.Bind(labels))
}
func (s syncInstrument) float64Measurement(value float64) Measurement {
return newMeasurement(s.instrument, NewFloat64Number(value))
}
func (s syncInstrument) int64Measurement(value int64) Measurement {
return newMeasurement(s.instrument, NewInt64Number(value))
}
func (s syncInstrument) directRecord(ctx context.Context, number Number, labels []label.KeyValue) {
s.instrument.RecordOne(ctx, number, labels)
}
func (h syncBoundInstrument) directRecord(ctx context.Context, number Number) {
h.boundInstrument.RecordOne(ctx, number)
}
// Unbind calls SyncImpl.Unbind.
func (h syncBoundInstrument) Unbind() {
h.boundInstrument.Unbind()
}
// checkNewAsync receives an AsyncImpl and potential
// error, and returns the same types, checking for and ensuring that
// the returned interface is not nil.
func checkNewAsync(instrument AsyncImpl, err error) (asyncInstrument, error) {
if instrument == nil {
if err == nil {
err = ErrSDKReturnedNilImpl
}
instrument = NoopAsync{}
}
return asyncInstrument{
instrument: instrument,
}, err
}
// checkNewSync receives an SyncImpl and potential
// error, and returns the same types, checking for and ensuring that
// the returned interface is not nil.
func checkNewSync(instrument SyncImpl, err error) (syncInstrument, error) {
if instrument == nil {
if err == nil {
err = ErrSDKReturnedNilImpl
}
// Note: an alternate behavior would be to synthesize a new name
// or group all duplicately-named instruments of a certain type
// together and use a tag for the original name, e.g.,
// name = 'invalid.counter.int64'
// label = 'original-name=duplicate-counter-name'
instrument = NoopSync{}
}
return syncInstrument{
instrument: instrument,
}, err
}
func newSyncBoundInstrument(boundInstrument BoundSyncImpl) syncBoundInstrument {
return syncBoundInstrument{
boundInstrument: boundInstrument,
}
}
func newMeasurement(instrument SyncImpl, number Number) Measurement {
return Measurement{
instrument: instrument,
number: number,
}
}
// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter.
func wrapInt64CounterInstrument(syncInst SyncImpl, err error) (Int64Counter, error) {
common, err := checkNewSync(syncInst, err)
return Int64Counter{syncInstrument: common}, err
}
// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter.
func wrapFloat64CounterInstrument(syncInst SyncImpl, err error) (Float64Counter, error) {
common, err := checkNewSync(syncInst, err)
return Float64Counter{syncInstrument: common}, err
}
// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter.
func wrapInt64UpDownCounterInstrument(syncInst SyncImpl, err error) (Int64UpDownCounter, error) {
common, err := checkNewSync(syncInst, err)
return Int64UpDownCounter{syncInstrument: common}, err
}
// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter.
func wrapFloat64UpDownCounterInstrument(syncInst SyncImpl, err error) (Float64UpDownCounter, error) {
common, err := checkNewSync(syncInst, err)
return Float64UpDownCounter{syncInstrument: common}, err
}
// wrapInt64ValueRecorderInstrument converts a SyncImpl into Int64ValueRecorder.
func wrapInt64ValueRecorderInstrument(syncInst SyncImpl, err error) (Int64ValueRecorder, error) {
common, err := checkNewSync(syncInst, err)
return Int64ValueRecorder{syncInstrument: common}, err
}
// wrapFloat64ValueRecorderInstrument converts a SyncImpl into Float64ValueRecorder.
func wrapFloat64ValueRecorderInstrument(syncInst SyncImpl, err error) (Float64ValueRecorder, error) {
common, err := checkNewSync(syncInst, err)
return Float64ValueRecorder{syncInstrument: common}, err
}

View File

@ -1,96 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"go.opentelemetry.io/otel/label"
)
// Float64UpDownCounter is a metric instrument that sums floating
// point values.
type Float64UpDownCounter struct {
syncInstrument
}
// Int64UpDownCounter is a metric instrument that sums integer values.
type Int64UpDownCounter struct {
syncInstrument
}
// BoundFloat64UpDownCounter is a bound instrument for Float64UpDownCounter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundFloat64UpDownCounter struct {
syncBoundInstrument
}
// BoundInt64UpDownCounter is a boundInstrument for Int64UpDownCounter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundInt64UpDownCounter struct {
syncBoundInstrument
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Float64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundFloat64UpDownCounter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Int64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundInt64UpDownCounter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Float64UpDownCounter) Measurement(value float64) Measurement {
return c.float64Measurement(value)
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Int64UpDownCounter) Measurement(value int64) Measurement {
return c.int64Measurement(value)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...label.KeyValue) {
c.directRecord(ctx, NewFloat64Number(value), labels)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...label.KeyValue) {
c.directRecord(ctx, NewInt64Number(value), labels)
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundFloat64UpDownCounter) Add(ctx context.Context, value float64) {
b.directRecord(ctx, NewFloat64Number(value))
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundInt64UpDownCounter) Add(ctx context.Context, value int64) {
b.directRecord(ctx, NewInt64Number(value))
}

View File

@ -1,97 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"go.opentelemetry.io/otel/label"
)
// Float64ValueRecorder is a metric that records float64 values.
type Float64ValueRecorder struct {
syncInstrument
}
// Int64ValueRecorder is a metric that records int64 values.
type Int64ValueRecorder struct {
syncInstrument
}
// BoundFloat64ValueRecorder is a bound instrument for Float64ValueRecorder.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundFloat64ValueRecorder struct {
syncBoundInstrument
}
// BoundInt64ValueRecorder is a bound instrument for Int64ValueRecorder.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundInt64ValueRecorder struct {
syncBoundInstrument
}
// Bind creates a bound instrument for this ValueRecorder. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Float64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundFloat64ValueRecorder) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Bind creates a bound instrument for this ValueRecorder. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Int64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundInt64ValueRecorder) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Float64ValueRecorder) Measurement(value float64) Measurement {
return c.float64Measurement(value)
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Int64ValueRecorder) Measurement(value int64) Measurement {
return c.int64Measurement(value)
}
// Record adds a new value to the list of ValueRecorder's records. The
// labels should contain the keys and values to be associated with
// this value.
func (c Float64ValueRecorder) Record(ctx context.Context, value float64, labels ...label.KeyValue) {
c.directRecord(ctx, NewFloat64Number(value), labels)
}
// Record adds a new value to the ValueRecorder's distribution. The
// labels should contain the keys and values to be associated with
// this value.
func (c Int64ValueRecorder) Record(ctx context.Context, value int64, labels ...label.KeyValue) {
c.directRecord(ctx, NewInt64Number(value), labels)
}
// Record adds a new value to the ValueRecorder's distribution using the labels
// previously bound to the ValueRecorder via Bind().
func (b BoundFloat64ValueRecorder) Record(ctx context.Context, value float64) {
b.directRecord(ctx, NewFloat64Number(value))
}
// Record adds a new value to the ValueRecorder's distribution using the labels
// previously bound to the ValueRecorder via Bind().
func (b BoundInt64ValueRecorder) Record(ctx context.Context, value int64) {
b.directRecord(ctx, NewInt64Number(value))
}

View File

@ -1,314 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
"time"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/label"
)
// TracerProvider provides access to instrumentation Tracers.
type TracerProvider interface {
// Tracer creates an implementation of the Tracer interface.
// The instrumentationName must be the name of the library providing
// instrumentation. This name may be the same as the instrumented code
// only if that code provides built-in instrumentation. If the
// instrumentationName is empty, then a implementation defined default
// name will be used instead.
Tracer(instrumentationName string, opts ...TracerOption) Tracer
}
// TracerConfig is a group of options for a Tracer.
//
// Most users will use the tracer options instead.
type TracerConfig struct {
// InstrumentationVersion is the version of the instrumentation library.
InstrumentationVersion string
}
// NewTracerConfig applies all the options to a returned TracerConfig.
// The default value for all the fields of the returned TracerConfig are the
// default zero value of the type. Also, this does not perform any validation
// on the returned TracerConfig (e.g. no uniqueness checking or bounding of
// data), instead it is left to the implementations of the SDK to perform this
// action.
func NewTracerConfig(opts ...TracerOption) *TracerConfig {
config := new(TracerConfig)
for _, option := range opts {
option.Apply(config)
}
return config
}
// TracerOption applies an options to a TracerConfig.
type TracerOption interface {
Apply(*TracerConfig)
}
type instVersionTracerOption string
func (o instVersionTracerOption) Apply(c *TracerConfig) { c.InstrumentationVersion = string(o) }
// WithInstrumentationVersion sets the instrumentation version for a Tracer.
func WithInstrumentationVersion(version string) TracerOption {
return instVersionTracerOption(version)
}
type Tracer interface {
// Start a span.
Start(ctx context.Context, spanName string, opts ...SpanOption) (context.Context, Span)
}
// ErrorConfig provides options to set properties of an error
// event at the time it is recorded.
//
// Most users will use the error options instead.
type ErrorConfig struct {
Timestamp time.Time
StatusCode codes.Code
}
// ErrorOption applies changes to ErrorConfig that sets options when an error event is recorded.
type ErrorOption func(*ErrorConfig)
// WithErrorTime sets the time at which the error event should be recorded.
func WithErrorTime(t time.Time) ErrorOption {
return func(c *ErrorConfig) {
c.Timestamp = t
}
}
// WithErrorStatus indicates the span status that should be set when recording an error event.
func WithErrorStatus(s codes.Code) ErrorOption {
return func(c *ErrorConfig) {
c.StatusCode = s
}
}
type Span interface {
// Tracer returns tracer used to create this span. Tracer cannot be nil.
Tracer() Tracer
// End completes the span. No updates are allowed to span after it
// ends. The only exception is setting status of the span.
End(options ...SpanOption)
// AddEvent adds an event to the span.
AddEvent(ctx context.Context, name string, attrs ...label.KeyValue)
// AddEventWithTimestamp adds an event with a custom timestamp
// to the span.
AddEventWithTimestamp(ctx context.Context, timestamp time.Time, name string, attrs ...label.KeyValue)
// IsRecording returns true if the span is active and recording events is enabled.
IsRecording() bool
// RecordError records an error as a span event.
RecordError(ctx context.Context, err error, opts ...ErrorOption)
// SpanContext returns span context of the span. Returned SpanContext is usable
// even after the span ends.
SpanContext() SpanContext
// SetStatus sets the status of the span in the form of a code
// and a message. SetStatus overrides the value of previous
// calls to SetStatus on the Span.
//
// The default span status is OK, so it is not necessary to
// explicitly set an OK status on successful Spans unless it
// is to add an OK message or to override a previous status on the Span.
SetStatus(code codes.Code, msg string)
// SetName sets the name of the span.
SetName(name string)
// Set span attributes
SetAttributes(kv ...label.KeyValue)
}
// SpanConfig is a group of options for a Span.
//
// Most users will use span options instead.
type SpanConfig struct {
// Attributes describe the associated qualities of a Span.
Attributes []label.KeyValue
// Timestamp is a time in a Span life-cycle.
Timestamp time.Time
// Links are the associations a Span has with other Spans.
Links []Link
// Record is the recording state of a Span.
Record bool
// NewRoot identifies a Span as the root Span for a new trace. This is
// commonly used when an existing trace crosses trust boundaries and the
// remote parent span context should be ignored for security.
NewRoot bool
// SpanKind is the role a Span has in a trace.
SpanKind SpanKind
}
// NewSpanConfig applies all the options to a returned SpanConfig.
// The default value for all the fields of the returned SpanConfig are the
// default zero value of the type. Also, this does not perform any validation
// on the returned SpanConfig (e.g. no uniqueness checking or bounding of
// data). Instead, it is left to the implementations of the SDK to perform this
// action.
func NewSpanConfig(opts ...SpanOption) *SpanConfig {
c := new(SpanConfig)
for _, option := range opts {
option.Apply(c)
}
return c
}
// SpanOption applies an option to a SpanConfig.
type SpanOption interface {
Apply(*SpanConfig)
}
type attributeSpanOption []label.KeyValue
func (o attributeSpanOption) Apply(c *SpanConfig) {
c.Attributes = append(c.Attributes, []label.KeyValue(o)...)
}
// WithAttributes adds the attributes to a span. These attributes are meant to
// provide additional information about the work the Span represents. The
// attributes are added to the existing Span attributes, i.e. this does not
// overwrite.
func WithAttributes(attributes ...label.KeyValue) SpanOption {
return attributeSpanOption(attributes)
}
type timestampSpanOption time.Time
func (o timestampSpanOption) Apply(c *SpanConfig) { c.Timestamp = time.Time(o) }
// WithTimestamp sets the time of a Span life-cycle moment (e.g. started or
// stopped).
func WithTimestamp(t time.Time) SpanOption {
return timestampSpanOption(t)
}
type linksSpanOption []Link
func (o linksSpanOption) Apply(c *SpanConfig) { c.Links = append(c.Links, []Link(o)...) }
// WithLinks adds links to a Span. The links are added to the existing Span
// links, i.e. this does not overwrite.
func WithLinks(links ...Link) SpanOption {
return linksSpanOption(links)
}
type recordSpanOption bool
func (o recordSpanOption) Apply(c *SpanConfig) { c.Record = bool(o) }
// WithRecord specifies that the span should be recorded. It is important to
// note that implementations may override this option, i.e. if the span is a
// child of an un-sampled trace.
func WithRecord() SpanOption {
return recordSpanOption(true)
}
type newRootSpanOption bool
func (o newRootSpanOption) Apply(c *SpanConfig) { c.NewRoot = bool(o) }
// WithNewRoot specifies that the Span should be treated as a root Span. Any
// existing parent span context will be ignored when defining the Span's trace
// identifiers.
func WithNewRoot() SpanOption {
return newRootSpanOption(true)
}
type spanKindSpanOption SpanKind
func (o spanKindSpanOption) Apply(c *SpanConfig) { c.SpanKind = SpanKind(o) }
// WithSpanKind sets the SpanKind of a Span.
func WithSpanKind(kind SpanKind) SpanOption {
return spanKindSpanOption(kind)
}
// Link is used to establish relationship between two spans within the same Trace or
// across different Traces. Few examples of Link usage.
// 1. Batch Processing: A batch of elements may contain elements associated with one
// or more traces/spans. Since there can only be one parent SpanContext, Link is
// used to keep reference to SpanContext of all elements in the batch.
// 2. Public Endpoint: A SpanContext in incoming client request on a public endpoint
// is untrusted from service provider perspective. In such case it is advisable to
// start a new trace with appropriate sampling decision.
// However, it is desirable to associate incoming SpanContext to new trace initiated
// on service provider side so two traces (from Client and from Service Provider) can
// be correlated.
type Link struct {
SpanContext
Attributes []label.KeyValue
}
// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span
// will be processed and visualized by various backends.
type SpanKind int
const (
// As a convenience, these match the proto definition, see
// opentelemetry/proto/trace/v1/trace.proto
//
// The unspecified value is not a valid `SpanKind`. Use
// `ValidateSpanKind()` to coerce a span kind to a valid
// value.
SpanKindUnspecified SpanKind = 0
SpanKindInternal SpanKind = 1
SpanKindServer SpanKind = 2
SpanKindClient SpanKind = 3
SpanKindProducer SpanKind = 4
SpanKindConsumer SpanKind = 5
)
// ValidateSpanKind returns a valid span kind value. This will coerce
// invalid values into the default value, SpanKindInternal.
func ValidateSpanKind(spanKind SpanKind) SpanKind {
switch spanKind {
case SpanKindInternal,
SpanKindServer,
SpanKindClient,
SpanKindProducer,
SpanKindConsumer:
// valid
return spanKind
default:
return SpanKindInternal
}
}
// String returns the specified name of the SpanKind in lower-case.
func (sk SpanKind) String() string {
switch sk {
case SpanKindInternal:
return "internal"
case SpanKindServer:
return "server"
case SpanKindClient:
return "client"
case SpanKindProducer:
return "producer"
case SpanKindConsumer:
return "consumer"
default:
return "unspecified"
}
}

View File

@ -1,55 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
)
type traceContextKeyType int
const (
currentSpanKey traceContextKeyType = iota
remoteContextKey
)
// ContextWithSpan creates a new context with a current span set to
// the passed span.
func ContextWithSpan(ctx context.Context, span Span) context.Context {
return context.WithValue(ctx, currentSpanKey, span)
}
// SpanFromContext returns the current span stored in the context.
func SpanFromContext(ctx context.Context) Span {
if span, has := ctx.Value(currentSpanKey).(Span); has {
return span
}
return noopSpan{}
}
// ContextWithRemoteSpanContext creates a new context with a remote
// span context set to the passed span context.
func ContextWithRemoteSpanContext(ctx context.Context, sc SpanContext) context.Context {
return context.WithValue(ctx, remoteContextKey, sc)
}
// RemoteSpanContextFromContext returns the remote span context stored
// in the context.
func RemoteSpanContextFromContext(ctx context.Context) SpanContext {
if sc, ok := ctx.Value(remoteContextKey).(SpanContext); ok {
return sc
}
return EmptySpanContext()
}

View File

@ -1,16 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package trace provides tracing support.
package trace // import "go.opentelemetry.io/otel/api/trace"

View File

@ -1,75 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
"time"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/label"
)
type noopSpan struct {
}
var _ Span = noopSpan{}
// SpanContext returns an invalid span context.
func (noopSpan) SpanContext() SpanContext {
return EmptySpanContext()
}
// IsRecording always returns false for NoopSpan.
func (noopSpan) IsRecording() bool {
return false
}
// SetStatus does nothing.
func (noopSpan) SetStatus(status codes.Code, msg string) {
}
// SetError does nothing.
func (noopSpan) SetError(v bool) {
}
// SetAttributes does nothing.
func (noopSpan) SetAttributes(attributes ...label.KeyValue) {
}
// End does nothing.
func (noopSpan) End(options ...SpanOption) {
}
// RecordError does nothing.
func (noopSpan) RecordError(ctx context.Context, err error, opts ...ErrorOption) {
}
// Tracer returns noop implementation of Tracer.
func (noopSpan) Tracer() Tracer {
return noopTracer{}
}
// AddEvent does nothing.
func (noopSpan) AddEvent(ctx context.Context, name string, attrs ...label.KeyValue) {
}
// AddEventWithTimestamp does nothing.
func (noopSpan) AddEventWithTimestamp(ctx context.Context, timestamp time.Time, name string, attrs ...label.KeyValue) {
}
// SetName does nothing.
func (noopSpan) SetName(name string) {
}

View File

@ -1,29 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
)
type noopTracer struct{}
var _ Tracer = noopTracer{}
// Start starts a noop span.
func (noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
span := noopSpan{}
return ContextWithSpan(ctx, span), span
}

View File

@ -1,30 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
type noopTracerProvider struct{}
var _ TracerProvider = noopTracerProvider{}
// Tracer returns noop implementation of Tracer.
func (p noopTracerProvider) Tracer(_ string, _ ...TracerOption) Tracer {
return noopTracer{}
}
// NoopTracerProvider returns a noop implementation of TracerProvider. The
// Tracer and Spans created from the noop provider will also be noop.
func NoopTracerProvider() TracerProvider {
return noopTracerProvider{}
}

View File

@ -1,197 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"bytes"
"encoding/hex"
"encoding/json"
)
const (
// FlagsSampled is a bitmask with the sampled bit set. A SpanContext
// with the sampling bit set means the span is sampled.
FlagsSampled = byte(0x01)
// FlagsDeferred is a bitmask with the deferred bit set. A SpanContext
// with the deferred bit set means the sampling decision has been
// defered to the receiver.
FlagsDeferred = byte(0x02)
// FlagsDebug is a bitmask with the debug bit set.
FlagsDebug = byte(0x04)
ErrInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
ErrInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
ErrNilTraceID errorConst = "trace-id can't be all zero"
ErrInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
ErrNilSpanID errorConst = "span-id can't be all zero"
)
type errorConst string
func (e errorConst) Error() string {
return string(e)
}
// ID is a unique identity of a trace.
type ID [16]byte
var nilTraceID ID
var _ json.Marshaler = nilTraceID
// IsValid checks whether the trace ID is valid. A valid trace ID does
// not consist of zeros only.
func (t ID) IsValid() bool {
return !bytes.Equal(t[:], nilTraceID[:])
}
// MarshalJSON implements a custom marshal function to encode TraceID
// as a hex string.
func (t ID) MarshalJSON() ([]byte, error) {
return json.Marshal(t.String())
}
// String returns the hex string representation form of a TraceID
func (t ID) String() string {
return hex.EncodeToString(t[:])
}
// SpanID is a unique identify of a span in a trace.
type SpanID [8]byte
var nilSpanID SpanID
var _ json.Marshaler = nilSpanID
// IsValid checks whether the span ID is valid. A valid span ID does
// not consist of zeros only.
func (s SpanID) IsValid() bool {
return !bytes.Equal(s[:], nilSpanID[:])
}
// MarshalJSON implements a custom marshal function to encode SpanID
// as a hex string.
func (s SpanID) MarshalJSON() ([]byte, error) {
return json.Marshal(s.String())
}
// String returns the hex string representation form of a SpanID
func (s SpanID) String() string {
return hex.EncodeToString(s[:])
}
// IDFromHex returns a TraceID from a hex string if it is compliant
// with the w3c trace-context specification.
// See more at https://www.w3.org/TR/trace-context/#trace-id
func IDFromHex(h string) (ID, error) {
t := ID{}
if len(h) != 32 {
return t, ErrInvalidTraceIDLength
}
if err := decodeHex(h, t[:]); err != nil {
return t, err
}
if !t.IsValid() {
return t, ErrNilTraceID
}
return t, nil
}
// SpanIDFromHex returns a SpanID from a hex string if it is compliant
// with the w3c trace-context specification.
// See more at https://www.w3.org/TR/trace-context/#parent-id
func SpanIDFromHex(h string) (SpanID, error) {
s := SpanID{}
if len(h) != 16 {
return s, ErrInvalidSpanIDLength
}
if err := decodeHex(h, s[:]); err != nil {
return s, err
}
if !s.IsValid() {
return s, ErrNilSpanID
}
return s, nil
}
func decodeHex(h string, b []byte) error {
for _, r := range h {
switch {
case 'a' <= r && r <= 'f':
continue
case '0' <= r && r <= '9':
continue
default:
return ErrInvalidHexID
}
}
decoded, err := hex.DecodeString(h)
if err != nil {
return err
}
copy(b, decoded)
return nil
}
// SpanContext contains basic information about the span - its trace
// ID, span ID and trace flags.
type SpanContext struct {
TraceID ID
SpanID SpanID
TraceFlags byte
}
// EmptySpanContext is meant for internal use to return invalid span
// context during error conditions.
func EmptySpanContext() SpanContext {
return SpanContext{}
}
// IsValid checks if the span context is valid. A valid span context
// has a valid trace ID and a valid span ID.
func (sc SpanContext) IsValid() bool {
return sc.HasTraceID() && sc.HasSpanID()
}
// HasTraceID checks if the span context has a valid trace ID.
func (sc SpanContext) HasTraceID() bool {
return sc.TraceID.IsValid()
}
// HasSpanID checks if the span context has a valid span ID.
func (sc SpanContext) HasSpanID() bool {
return sc.SpanID.IsValid()
}
// IsDeferred returns if the deferred bit is set in the trace flags.
func (sc SpanContext) IsDeferred() bool {
return sc.TraceFlags&FlagsDeferred == FlagsDeferred
}
// IsDebug returns if the debug bit is set in the trace flags.
func (sc SpanContext) IsDebug() bool {
return sc.TraceFlags&FlagsDebug == FlagsDebug
}
// IsSampled returns if the sampling bit is set in the trace flags.
func (sc SpanContext) IsSampled() bool {
return sc.TraceFlags&FlagsSampled == FlagsSampled
}

View File

@ -1,67 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otel
import (
"context"
"go.opentelemetry.io/otel/internal/baggage"
"go.opentelemetry.io/otel/label"
)
// Baggage returns a copy of the baggage in ctx.
func Baggage(ctx context.Context) label.Set {
// TODO (MrAlias, #1222): The underlying storage, the Map, shares many of
// the functional elements of the label.Set. These should be unified so
// this conversion is unnecessary and there is no performance hit calling
// this.
m := baggage.MapFromContext(ctx)
values := make([]label.KeyValue, 0, m.Len())
m.Foreach(func(kv label.KeyValue) bool {
values = append(values, kv)
return true
})
return label.NewSet(values...)
}
// BaggageValue returns the value related to key in the baggage of ctx. If no
// value is set, the returned label.Value will be an uninitialized zero-value
// with type INVALID.
func BaggageValue(ctx context.Context, key label.Key) label.Value {
v, _ := baggage.MapFromContext(ctx).Value(key)
return v
}
// ContextWithBaggageValues returns a copy of parent with pairs updated in the baggage.
func ContextWithBaggageValues(parent context.Context, pairs ...label.KeyValue) context.Context {
m := baggage.MapFromContext(parent).Apply(baggage.MapUpdate{
MultiKV: pairs,
})
return baggage.ContextWithMap(parent, m)
}
// ContextWithoutBaggageValues returns a copy of parent in which the values related
// to keys have been removed from the baggage.
func ContextWithoutBaggageValues(parent context.Context, keys ...label.Key) context.Context {
m := baggage.MapFromContext(parent).Apply(baggage.MapUpdate{
DropMultiK: keys,
})
return baggage.ContextWithMap(parent, m)
}
// ContextWithoutBaggage returns a copy of parent without baggage.
func ContextWithoutBaggage(parent context.Context) context.Context {
return baggage.ContextWithNoCorrelationData(parent)
}

View File

@ -1,99 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package codes defines the canonical error codes used by OpenTelemetry.
//
// It conforms to [the OpenTelemetry
// specification](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#statuscanonicalcode).
package codes // import "go.opentelemetry.io/otel/codes"
import (
"fmt"
"strconv"
)
const (
// Unset is the default status code.
Unset Code = 0
// Error indicates the operation contains an error.
Error Code = 1
// Ok indicates operation has been validated by an Application developers
// or Operator to have completed successfully, or contain no error.
Ok Code = 2
maxCode = 3
)
// Code is an 32-bit representation of a status state.
type Code uint32
var codeToStr = map[Code]string{
Unset: "Unset",
Error: "Error",
Ok: "Ok",
}
var strToCode = map[string]Code{
"Unset": Unset,
"Error": Error,
"Ok": Ok,
}
// String returns the Code as a string.
func (c Code) String() string {
return codeToStr[c]
}
// UnmarshalJSON unmarshals b into the Code.
//
// This is based on the functionality in the gRPC codes package:
// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244
func (c *Code) UnmarshalJSON(b []byte) error {
// From json.Unmarshaler: By convention, to approximate the behavior of
// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
// a no-op.
if string(b) == "null" {
return nil
}
if c == nil {
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
}
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
if ci >= maxCode {
return fmt.Errorf("invalid code: %q", ci)
}
*c = Code(ci)
return nil
}
if jc, ok := strToCode[string(b)]; ok {
*c = jc
return nil
}
return fmt.Errorf("invalid code: %q", string(b))
}
// MarshalJSON returns c as the JSON encoding of c.
func (c *Code) MarshalJSON() ([]byte, error) {
if c == nil {
return []byte("null"), nil
}
str, ok := codeToStr[*c]
if !ok {
return nil, fmt.Errorf("invalid code: %d", *c)
}
return []byte(fmt.Sprintf("%q", str)), nil
}

View File

@ -1,16 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package otel contains OpenTelemetry Go packages.
package otel // import "go.opentelemetry.io/otel"

View File

@ -1,36 +0,0 @@
#!/usr/bin/env bash
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -euo pipefail
top_dir='.'
if [[ $# -gt 0 ]]; then
top_dir="${1}"
fi
p=$(pwd)
mod_dirs=()
mapfile -t mod_dirs < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
for mod_dir in "${mod_dirs[@]}"; do
cd "${mod_dir}"
main_dirs=()
mapfile -t main_dirs < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
for main_dir in "${main_dirs[@]}"; do
echo ".${main_dir#${p}}"
done
cd "${p}"
done

View File

@ -1,338 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package baggage provides types and functions to manage W3C Baggage.
package baggage
import (
"context"
"go.opentelemetry.io/otel/label"
)
type rawMap map[label.Key]label.Value
type keySet map[label.Key]struct{}
// Map is an immutable storage for correlations.
type Map struct {
m rawMap
}
// MapUpdate contains information about correlation changes to be
// made.
type MapUpdate struct {
// DropSingleK contains a single key to be dropped from
// correlations. Use this to avoid an overhead of a slice
// allocation if there is only one key to drop.
DropSingleK label.Key
// DropMultiK contains all the keys to be dropped from
// correlations.
DropMultiK []label.Key
// SingleKV contains a single key-value pair to be added to
// correlations. Use this to avoid an overhead of a slice
// allocation if there is only one key-value pair to add.
SingleKV label.KeyValue
// MultiKV contains all the key-value pairs to be added to
// correlations.
MultiKV []label.KeyValue
}
func newMap(raw rawMap) Map {
return Map{
m: raw,
}
}
// NewEmptyMap creates an empty correlations map.
func NewEmptyMap() Map {
return newMap(nil)
}
// NewMap creates a map with the contents of the update applied. In
// this function, having an update with DropSingleK or DropMultiK
// makes no sense - those fields are effectively ignored.
func NewMap(update MapUpdate) Map {
return NewEmptyMap().Apply(update)
}
// Apply creates a copy of the map with the contents of the update
// applied. Apply will first drop the keys from DropSingleK and
// DropMultiK, then add key-value pairs from SingleKV and MultiKV.
func (m Map) Apply(update MapUpdate) Map {
delSet, addSet := getModificationSets(update)
mapSize := getNewMapSize(m.m, delSet, addSet)
r := make(rawMap, mapSize)
for k, v := range m.m {
// do not copy items we want to drop
if _, ok := delSet[k]; ok {
continue
}
// do not copy items we would overwrite
if _, ok := addSet[k]; ok {
continue
}
r[k] = v
}
if update.SingleKV.Key.Defined() {
r[update.SingleKV.Key] = update.SingleKV.Value
}
for _, kv := range update.MultiKV {
r[kv.Key] = kv.Value
}
if len(r) == 0 {
r = nil
}
return newMap(r)
}
func getModificationSets(update MapUpdate) (delSet, addSet keySet) {
deletionsCount := len(update.DropMultiK)
if update.DropSingleK.Defined() {
deletionsCount++
}
if deletionsCount > 0 {
delSet = make(map[label.Key]struct{}, deletionsCount)
for _, k := range update.DropMultiK {
delSet[k] = struct{}{}
}
if update.DropSingleK.Defined() {
delSet[update.DropSingleK] = struct{}{}
}
}
additionsCount := len(update.MultiKV)
if update.SingleKV.Key.Defined() {
additionsCount++
}
if additionsCount > 0 {
addSet = make(map[label.Key]struct{}, additionsCount)
for _, k := range update.MultiKV {
addSet[k.Key] = struct{}{}
}
if update.SingleKV.Key.Defined() {
addSet[update.SingleKV.Key] = struct{}{}
}
}
return
}
func getNewMapSize(m rawMap, delSet, addSet keySet) int {
mapSizeDiff := 0
for k := range addSet {
if _, ok := m[k]; !ok {
mapSizeDiff++
}
}
for k := range delSet {
if _, ok := m[k]; ok {
if _, inAddSet := addSet[k]; !inAddSet {
mapSizeDiff--
}
}
}
return len(m) + mapSizeDiff
}
// Value gets a value from correlations map and returns a boolean
// value indicating whether the key exist in the map.
func (m Map) Value(k label.Key) (label.Value, bool) {
value, ok := m.m[k]
return value, ok
}
// HasValue returns a boolean value indicating whether the key exist
// in the map.
func (m Map) HasValue(k label.Key) bool {
_, has := m.Value(k)
return has
}
// Len returns a length of the map.
func (m Map) Len() int {
return len(m.m)
}
// Foreach calls a passed callback once on each key-value pair until
// all the key-value pairs of the map were iterated or the callback
// returns false, whichever happens first.
func (m Map) Foreach(f func(label.KeyValue) bool) {
for k, v := range m.m {
if !f(label.KeyValue{
Key: k,
Value: v,
}) {
return
}
}
}
type correlationsType struct{}
// SetHookFunc describes a type of a callback that is called when
// storing baggage in the context.
type SetHookFunc func(context.Context) context.Context
// GetHookFunc describes a type of a callback that is called when
// getting baggage from the context.
type GetHookFunc func(context.Context, Map) Map
// value under this key is either of type Map or correlationsData
var correlationsKey = &correlationsType{}
type correlationsData struct {
m Map
setHook SetHookFunc
getHook GetHookFunc
}
func (d correlationsData) isHookless() bool {
return d.setHook == nil && d.getHook == nil
}
type hookKind int
const (
hookKindSet hookKind = iota
hookKindGet
)
func (d *correlationsData) overrideHook(kind hookKind, setHook SetHookFunc, getHook GetHookFunc) {
switch kind {
case hookKindSet:
d.setHook = setHook
case hookKindGet:
d.getHook = getHook
}
}
// ContextWithSetHook installs a hook function that will be invoked
// every time ContextWithMap is called. To avoid unnecessary callback
// invocations (recursive or not), the callback can temporarily clear
// the hooks from the context with the ContextWithNoHooks function.
//
// Note that NewContext also calls ContextWithMap, so the hook will be
// invoked.
//
// Passing nil SetHookFunc creates a context with no set hook to call.
//
// This function should not be used by applications or libraries. It
// is mostly for interoperation with other observability APIs.
func ContextWithSetHook(ctx context.Context, hook SetHookFunc) context.Context {
return contextWithHook(ctx, hookKindSet, hook, nil)
}
// ContextWithGetHook installs a hook function that will be invoked
// every time MapFromContext is called. To avoid unnecessary callback
// invocations (recursive or not), the callback can temporarily clear
// the hooks from the context with the ContextWithNoHooks function.
//
// Note that NewContext also calls MapFromContext, so the hook will be
// invoked.
//
// Passing nil GetHookFunc creates a context with no get hook to call.
//
// This function should not be used by applications or libraries. It
// is mostly for interoperation with other observability APIs.
func ContextWithGetHook(ctx context.Context, hook GetHookFunc) context.Context {
return contextWithHook(ctx, hookKindGet, nil, hook)
}
func contextWithHook(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc) context.Context {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
v.overrideHook(kind, setHook, getHook)
if v.isHookless() {
return context.WithValue(ctx, correlationsKey, v.m)
}
return context.WithValue(ctx, correlationsKey, v)
case Map:
return contextWithOneHookAndMap(ctx, kind, setHook, getHook, v)
default:
m := NewEmptyMap()
return contextWithOneHookAndMap(ctx, kind, setHook, getHook, m)
}
}
func contextWithOneHookAndMap(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc, m Map) context.Context {
d := correlationsData{m: m}
d.overrideHook(kind, setHook, getHook)
if d.isHookless() {
return ctx
}
return context.WithValue(ctx, correlationsKey, d)
}
// ContextWithNoHooks creates a context with all the hooks
// disabled. Also returns old set and get hooks. This function can be
// used to temporarily clear the context from hooks and then reinstate
// them by calling ContextWithSetHook and ContextWithGetHook functions
// passing the hooks returned by this function.
//
// This function should not be used by applications or libraries. It
// is mostly for interoperation with other observability APIs.
func ContextWithNoHooks(ctx context.Context) (context.Context, SetHookFunc, GetHookFunc) {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
return context.WithValue(ctx, correlationsKey, v.m), v.setHook, v.getHook
default:
return ctx, nil, nil
}
}
// ContextWithMap returns a context with the Map entered into it.
func ContextWithMap(ctx context.Context, m Map) context.Context {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
v.m = m
ctx = context.WithValue(ctx, correlationsKey, v)
if v.setHook != nil {
ctx = v.setHook(ctx)
}
return ctx
default:
return context.WithValue(ctx, correlationsKey, m)
}
}
// ContextWithNoCorrelationData returns a context stripped of correlation
// data.
func ContextWithNoCorrelationData(ctx context.Context) context.Context {
return context.WithValue(ctx, correlationsKey, nil)
}
// NewContext returns a context with the map from passed context
// updated with the passed key-value pairs.
func NewContext(ctx context.Context, keyvalues ...label.KeyValue) context.Context {
return ContextWithMap(ctx, MapFromContext(ctx).Apply(MapUpdate{
MultiKV: keyvalues,
}))
}
// MapFromContext gets the current Map from a Context.
func MapFromContext(ctx context.Context) Map {
switch v := ctx.Value(correlationsKey).(type) {
case correlationsData:
if v.getHook != nil {
return v.getHook(ctx, v.m)
}
return v.m
case Map:
return v
default:
return NewEmptyMap()
}
}

View File

@ -1,91 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"math"
"unsafe"
)
func BoolToRaw(b bool) uint64 {
if b {
return 1
}
return 0
}
func RawToBool(r uint64) bool {
return r != 0
}
func Int64ToRaw(i int64) uint64 {
return uint64(i)
}
func RawToInt64(r uint64) int64 {
return int64(r)
}
func Uint64ToRaw(u uint64) uint64 {
return u
}
func RawToUint64(r uint64) uint64 {
return r
}
func Float64ToRaw(f float64) uint64 {
return math.Float64bits(f)
}
func RawToFloat64(r uint64) float64 {
return math.Float64frombits(r)
}
func Int32ToRaw(i int32) uint64 {
return uint64(i)
}
func RawToInt32(r uint64) int32 {
return int32(r)
}
func Uint32ToRaw(u uint32) uint64 {
return uint64(u)
}
func RawToUint32(r uint64) uint32 {
return uint32(r)
}
func Float32ToRaw(f float32) uint64 {
return Uint32ToRaw(math.Float32bits(f))
}
func RawToFloat32(r uint64) float32 {
return math.Float32frombits(RawToUint32(r))
}
func RawPtrToFloat64Ptr(r *uint64) *float64 {
return (*float64)(unsafe.Pointer(r))
}
func RawPtrToInt64Ptr(r *uint64) *int64 {
return (*int64)(unsafe.Pointer(r))
}
func RawPtrToUint64Ptr(r *uint64) *uint64 {
return r
}

View File

@ -1,35 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package noop provides noop tracing implementations for tracer and span.
package noop
import (
"context"
"go.opentelemetry.io/otel/api/trace"
)
var (
// Tracer is a noop tracer that starts noop spans.
Tracer trace.Tracer
// Span is a noop Span.
Span trace.Span
)
func init() {
Tracer = trace.NoopTracerProvider().Tracer("")
_, Span = Tracer.Start(context.Background(), "")
}

View File

@ -1,16 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package label provides key and value labels.
package label // import "go.opentelemetry.io/otel/label"

Some files were not shown because too many files have changed in this diff Show More