mirror of
https://git.ptzo.gdn/feditools/relay.git
synced 2024-09-21 09:37:11 +00:00
Bump github.com/uptrace/uptrace-go from 1.9.0 to 1.11.4 (#149)
Bumps [github.com/uptrace/uptrace-go](https://github.com/uptrace/uptrace-go) from 1.9.0 to 1.11.4. - [Release notes](https://github.com/uptrace/uptrace-go/releases) - [Changelog](https://github.com/uptrace/uptrace-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/uptrace/uptrace-go/compare/v1.9.0...v1.11.4) --- updated-dependencies: - dependency-name: github.com/uptrace/uptrace-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
547c58b1cf
commit
6037670c2e
47
go.mod
47
go.mod
@ -35,13 +35,13 @@ require (
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.8
|
||||
github.com/uptrace/bun/extra/bundebug v1.1.8
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.8
|
||||
github.com/uptrace/uptrace-go v1.9.0
|
||||
github.com/uptrace/uptrace-go v1.11.4
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.36.1
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/trace v1.10.0
|
||||
golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29
|
||||
golang.org/x/text v0.3.7
|
||||
go.opentelemetry.io/otel v1.11.1
|
||||
go.opentelemetry.io/otel/trace v1.11.1
|
||||
golang.org/x/net v0.1.0
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
|
||||
golang.org/x/text v0.4.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
modernc.org/sqlite v1.17.1
|
||||
)
|
||||
@ -62,7 +62,7 @@ require (
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/gorilla/securecookie v1.1.1 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
@ -99,24 +99,23 @@ require (
|
||||
github.com/valyala/fasthttp v1.40.0 // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.9.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.9.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.18.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.36.4 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v0.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64 // indirect
|
||||
golang.org/x/tools v0.1.10 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78 // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect
|
||||
google.golang.org/grpc v1.50.1 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
102
go.sum
102
go.sum
@ -50,7 +50,6 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@ -70,7 +69,6 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
@ -101,7 +99,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
@ -200,7 +197,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
@ -236,8 +233,8 @@ github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0U
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauGGCMg2zAZIiNZ9uIQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
@ -469,8 +466,8 @@ github.com/uptrace/bun/extra/bunotel v1.1.8 h1:Sg7XcLLI+KTalKg36aRpOJovYF9UUVuN5
|
||||
github.com/uptrace/bun/extra/bunotel v1.1.8/go.mod h1:1chzcC5wkd7FP+/SRUbmmqF0DL1ed+NgdHlGE+GIkPs=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.15 h1:s6BZwhj/2oZ9GSkfcTH8YRHjxj3MGo1j2Pg83Pc1xjw=
|
||||
github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.15/go.mod h1:aZXwJzbTHnhh8vpd1bPjK68iTuNEtfvpHcJq73FdmhQ=
|
||||
github.com/uptrace/uptrace-go v1.9.0 h1:GlSrbkUgirS99mVUkeSSXz9LQyknv2FTtDSxqv2u5lE=
|
||||
github.com/uptrace/uptrace-go v1.9.0/go.mod h1:EJpFb3syIxnGa6cuvIxAMGzySSNn+CFOI7BczxLA52k=
|
||||
github.com/uptrace/uptrace-go v1.11.4 h1:BZWpzr2yhwFvmOLsRG97hgs60xYwaTnGFx2zPl///Gs=
|
||||
github.com/uptrace/uptrace-go v1.11.4/go.mod h1:AwD14IBYEip4q21cxks+CsNkEINWsKpsLd8AS8HFdMs=
|
||||
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
@ -494,44 +491,44 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.36.1 h1:rwxTUxEo8rKQvQgi+9kuGJYy1eEBkAMCehLyc0R3h54=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.36.1/go.mod h1:XU1EB47dq4JIrePPJWy6DrqTZcMSA93K+NWaEIx0qYU=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.34.0 h1:zt4RDodWkgiHk8tyUmFOjFoOOfyGH7vwIbUzKP6CCh8=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.34.0/go.mod h1:5wIoZE96WbcQVU3D6UF/ukRfFQXbB6OYgeWi9CjHa90=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.36.4 h1:7AY5NdRzyU5s1ek3E4VK3FBnPtQ6La1i7sIn9hNgjsk=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.36.4/go.mod h1:yFSLOnffweT7Es+IzY1DF5KP0xa2Wl15SJfKqAyDXq8=
|
||||
go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY=
|
||||
go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4=
|
||||
go.opentelemetry.io/otel v1.5.0/go.mod h1:Jm/m+rNp/z0eqJc74H7LPwQ3G87qkU/AnnAydAjSAHk=
|
||||
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
|
||||
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0 h1:ggqApEjDKczicksfvZUCxuvoyDmR6Sbm56LwiK8DVR0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0 h1:H0+xwv4shKw0gfj/ZqR13qO2N/dBQogB1OcRjJjV39Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0/go.mod h1:nkenGD8vcvs0uN6WhR90ZVHQlgDsRmXicnNadMnk+XQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.31.0 h1:BaQ2xM5cPmldVCMvbLoy5tcLUhXCtIhItDYBNw83B7Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.31.0/go.mod h1:VRr8tlXQEsTdesDCh0qBe2iKDWhpi3ZqDYw6VlZ8MhI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0 h1:NN90Cuna0CnBg8YNu1Q0V35i2E8LDByFOwHRCq/ZP9I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0/go.mod h1:0EsCXjZAiiZGnLdEUXM9YjCKuuLZMYyglh2QDXcYKVA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0 h1:M0/hqGuJBLeIEu20f89H74RGtqV2dn+SFWEz9ATAAwY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0/go.mod h1:K5G92gbtCrYJ0mn6zj9Pst7YFsDFuvSYEhYKRMcufnM=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.9.0 h1:0uV0qzHk48i1SF8qRI8odMYiwPOLh9gBhiJFpj8H6JY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.9.0/go.mod h1:Fl1iS5ZhWgXXXTdJMuBSVsS5nkL5XluHbg97kjOuYU4=
|
||||
go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
|
||||
go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
|
||||
go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4=
|
||||
go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 h1:X2GndnMCsUPh6CiY2a+frAbNsXaPLbB0soHRYhAZ5Ig=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1/go.mod h1:i8vjiSzbiUC7wOQplijSXMYUpNM93DtlS5CbUT+C6oQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.33.0 h1:OT/UjHcjog4A1s1UMCtyehIKS+vpjM5Du0r7KGsH6TE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.33.0/go.mod h1:0XctNDHEWmiSDIU8NPbJElrK05gBJFcYlGP4FMGo4g4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.33.0 h1:1SVtGtRsNyGgv1fRfNXfh+sJowIwzF0gkf+61lvTgdg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.33.0/go.mod h1:ryB27ubOBXsiqfh6MwtSdx5knzbSZtjvPnMMmt3AykQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 h1:MEQNafcNCB0uQIti/oHgU7CZpUMYQ7qigBwMVKycHvc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1/go.mod h1:19O5I2U5iys38SsmT2uDJja/300woyzE1KPIQxEUBUc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1 h1:LYyG/f1W/jzAix16jbksJfMQFpOH/Ma6T639pVPMgfI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1/go.mod h1:QrRRQiY3kzAoYPNLP0W/Ikg0gR6V3LMc+ODSxr7yyvg=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.11.1 h1:3Yvzs7lgOw8MmbxmLRsQGwYdCubFmUHSooKaEhQunFQ=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.11.1/go.mod h1:pyHDt0YlyuENkD2VwHsiRDf+5DfI3EH7pfhUYW6sQUE=
|
||||
go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E=
|
||||
go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI=
|
||||
go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE=
|
||||
go.opentelemetry.io/otel/sdk v1.9.0 h1:LNXp1vrr83fNXTHgU8eO89mhzxb/bbWAsHG6fNf3qWo=
|
||||
go.opentelemetry.io/otel/sdk v1.9.0/go.mod h1:AEZc8nt5bd2F7BC24J5R0mrjYnpEgYHyTcM/vrSple4=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0 h1:2sZx4R43ZMhJdteKAlKoHvRgrMp53V1aRxvEf5lCq8Q=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0/go.mod h1:fl0SmNnX9mN9xgU6OLYLMBMrNAsaZQi7qBwprwO3abk=
|
||||
go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs=
|
||||
go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.33.0 h1:oTqyWfksgKoJmbrs2q7O7ahkJzt+Ipekihf8vhpa9qo=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.33.0/go.mod h1:xdypMeA21JBOvjjzDUtD0kzIcHO/SPez+a8HOzJPGp0=
|
||||
go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc=
|
||||
go.opentelemetry.io/otel/trace v1.5.0/go.mod h1:sq55kfhjXYr1zVSyexg0w1mpa03AYXR5eyTkB9NPPdE=
|
||||
go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
|
||||
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
|
||||
go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ=
|
||||
go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80=
|
||||
go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
@ -589,8 +586,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -632,8 +629,8 @@ golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20211101193420-4a448f8816b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b h1:3ogNYyK4oIQdIKzTu68hQrr4iuVxF3AxKl9Aj/eDrw0=
|
||||
golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -654,8 +651,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4=
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180525142821-c11f84a56e43/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -718,12 +715,12 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64 h1:UiNENfZ8gDvpiWw7IpOMQ27spWmThO1RwwdQVbJahJM=
|
||||
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -732,8 +729,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -791,16 +789,14 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
@ -865,8 +861,8 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78 h1:QntLWYqZeuBtJkth3m/6DLznnI0AHJr+AgJXvVh/izw=
|
||||
google.golang.org/genproto v0.0.0-20220802133213-ce4fa296bf78/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@ -887,8 +883,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
||||
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
|
||||
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
@ -275,11 +275,12 @@ func (p *parser) accept(term termType) (string, error) {
|
||||
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
|
||||
//
|
||||
// https://www.ietf.org/rfc/rfc3986.txt, P.49
|
||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
// / "*" / "+" / "," / ";" / "="
|
||||
// pct-encoded = "%" HEXDIG HEXDIG
|
||||
//
|
||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
// / "*" / "+" / "," / ";" / "="
|
||||
// pct-encoded = "%" HEXDIG HEXDIG
|
||||
func expectPChars(t string) error {
|
||||
const (
|
||||
init = iota
|
||||
|
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
@ -162,10 +162,11 @@ func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
|
||||
|
||||
// DefaultRoutingErrorHandler is our default handler for routing errors.
|
||||
// By default http error codes mapped on the following error codes:
|
||||
// NotFound -> grpc.NotFound
|
||||
// StatusBadRequest -> grpc.InvalidArgument
|
||||
// MethodNotAllowed -> grpc.Unimplemented
|
||||
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||
//
|
||||
// NotFound -> grpc.NotFound
|
||||
// StatusBadRequest -> grpc.InvalidArgument
|
||||
// MethodNotAllowed -> grpc.Unimplemented
|
||||
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
|
||||
sterr := status.Error(codes.Internal, "Unexpected routing error")
|
||||
switch httpStatus {
|
||||
|
23
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
23
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
@ -343,8 +343,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Verb out here is to memoize for the fallback case below
|
||||
var verb string
|
||||
lastComponent := components[len(components)-1]
|
||||
|
||||
for _, h := range s.handlers[r.Method] {
|
||||
// If the pattern has a verb, explicitly look for a suffix in the last
|
||||
@ -355,10 +354,11 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// parser because we know what verb we're looking for, however, there
|
||||
// are still some cases that the parser itself cannot disambiguate. See
|
||||
// the comment there if interested.
|
||||
|
||||
var verb string
|
||||
patVerb := h.pat.Verb()
|
||||
l := len(components)
|
||||
lastComponent := components[l-1]
|
||||
var idx int = -1
|
||||
|
||||
idx := -1
|
||||
if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
|
||||
idx = len(lastComponent) - len(patVerb) - 1
|
||||
}
|
||||
@ -368,7 +368,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
if idx > 0 {
|
||||
components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:]
|
||||
components[len(components)-1], verb = lastComponent[:idx], lastComponent[idx+1:]
|
||||
}
|
||||
|
||||
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
|
||||
@ -394,6 +394,17 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
continue
|
||||
}
|
||||
for _, h := range handlers {
|
||||
var verb string
|
||||
patVerb := h.pat.Verb()
|
||||
|
||||
idx := -1
|
||||
if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
|
||||
idx = len(lastComponent) - len(patVerb) - 1
|
||||
}
|
||||
if idx > 0 {
|
||||
components[len(components)-1], verb = lastComponent[:idx], lastComponent[idx+1:]
|
||||
}
|
||||
|
||||
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
|
||||
if err != nil {
|
||||
var mse MalformedSequenceError
|
||||
|
17
vendor/github.com/uptrace/uptrace-go/uptrace/client.go
generated
vendored
17
vendor/github.com/uptrace/uptrace-go/uptrace/client.go
generated
vendored
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
@ -18,8 +18,8 @@ type client struct {
|
||||
dsn *DSN
|
||||
tracer trace.Tracer
|
||||
|
||||
tp *sdktrace.TracerProvider
|
||||
ctrl *controller.Controller
|
||||
tp *sdktrace.TracerProvider
|
||||
mp *metric.MeterProvider
|
||||
}
|
||||
|
||||
func newClient(dsn *DSN) *client {
|
||||
@ -36,11 +36,11 @@ func (c *client) Shutdown(ctx context.Context) (lastErr error) {
|
||||
}
|
||||
c.tp = nil
|
||||
}
|
||||
if c.ctrl != nil {
|
||||
if err := c.ctrl.Stop(ctx); err != nil {
|
||||
if c.mp != nil {
|
||||
if err := c.mp.Shutdown(ctx); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
c.ctrl = nil
|
||||
c.mp = nil
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
@ -51,6 +51,11 @@ func (c *client) ForceFlush(ctx context.Context) (lastErr error) {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
if c.mp != nil {
|
||||
if err := c.mp.ForceFlush(ctx); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
|
115
vendor/github.com/uptrace/uptrace-go/uptrace/config.go
generated
vendored
115
vendor/github.com/uptrace/uptrace-go/uptrace/config.go
generated
vendored
@ -2,6 +2,7 @@ package uptrace
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"os"
|
||||
|
||||
"github.com/uptrace/uptrace-go/internal"
|
||||
@ -23,6 +24,8 @@ type config struct {
|
||||
resourceDetectors []resource.Detector
|
||||
resource *resource.Resource
|
||||
|
||||
tlsConf *tls.Config
|
||||
|
||||
// Tracing options
|
||||
|
||||
tracingEnabled bool
|
||||
@ -38,33 +41,33 @@ type config struct {
|
||||
}
|
||||
|
||||
func newConfig(opts []Option) *config {
|
||||
cfg := &config{
|
||||
conf := &config{
|
||||
tracingEnabled: true,
|
||||
metricsEnabled: true,
|
||||
}
|
||||
|
||||
if dsn, ok := os.LookupEnv("UPTRACE_DSN"); ok {
|
||||
cfg.dsn = dsn
|
||||
conf.dsn = dsn
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt.apply(cfg)
|
||||
opt.apply(conf)
|
||||
}
|
||||
|
||||
return cfg
|
||||
return conf
|
||||
}
|
||||
|
||||
func (cfg *config) newResource() *resource.Resource {
|
||||
if cfg.resource != nil {
|
||||
if len(cfg.resourceAttributes) > 0 {
|
||||
func (conf *config) newResource() *resource.Resource {
|
||||
if conf.resource != nil {
|
||||
if len(conf.resourceAttributes) > 0 {
|
||||
internal.Logger.Printf("WithResource overrides WithResourceAttributes (discarding %v)",
|
||||
cfg.resourceAttributes)
|
||||
conf.resourceAttributes)
|
||||
}
|
||||
if len(cfg.resourceDetectors) > 0 {
|
||||
if len(conf.resourceDetectors) > 0 {
|
||||
internal.Logger.Printf("WithResource overrides WithResourceDetectors (discarding %v)",
|
||||
cfg.resourceDetectors)
|
||||
conf.resourceDetectors)
|
||||
}
|
||||
return cfg.resource
|
||||
return conf.resource
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
@ -73,8 +76,8 @@ func (cfg *config) newResource() *resource.Resource {
|
||||
resource.WithFromEnv(),
|
||||
resource.WithTelemetrySDK(),
|
||||
resource.WithHost(),
|
||||
resource.WithDetectors(cfg.resourceDetectors...),
|
||||
resource.WithAttributes(cfg.resourceAttributes...))
|
||||
resource.WithDetectors(conf.resourceDetectors...),
|
||||
resource.WithAttributes(conf.resourceAttributes...))
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
return resource.Environment()
|
||||
@ -85,47 +88,47 @@ func (cfg *config) newResource() *resource.Resource {
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type Option interface {
|
||||
apply(cfg *config)
|
||||
apply(conf *config)
|
||||
}
|
||||
|
||||
type option func(cfg *config)
|
||||
type option func(conf *config)
|
||||
|
||||
func (fn option) apply(cfg *config) {
|
||||
fn(cfg)
|
||||
func (fn option) apply(conf *config) {
|
||||
fn(conf)
|
||||
}
|
||||
|
||||
// WithDSN configures a data source name that is used to connect to Uptrace, for example,
|
||||
// `https://<token>@api.uptrace.dev/<project_id>`.
|
||||
// `https://<token>@uptrace.dev/<project_id>`.
|
||||
//
|
||||
// The default is to use UPTRACE_DSN environment variable.
|
||||
func WithDSN(dsn string) Option {
|
||||
return option(func(cfg *config) {
|
||||
cfg.dsn = dsn
|
||||
return option(func(conf *config) {
|
||||
conf.dsn = dsn
|
||||
})
|
||||
}
|
||||
|
||||
// WithServiceVersion configures `service.name` resource attribute.
|
||||
func WithServiceName(serviceName string) Option {
|
||||
return option(func(cfg *config) {
|
||||
return option(func(conf *config) {
|
||||
attr := semconv.ServiceNameKey.String(serviceName)
|
||||
cfg.resourceAttributes = append(cfg.resourceAttributes, attr)
|
||||
conf.resourceAttributes = append(conf.resourceAttributes, attr)
|
||||
})
|
||||
}
|
||||
|
||||
// WithServiceVersion configures `service.version` resource attribute, for example, `1.0.0`.
|
||||
func WithServiceVersion(serviceVersion string) Option {
|
||||
return option(func(cfg *config) {
|
||||
return option(func(conf *config) {
|
||||
attr := semconv.ServiceVersionKey.String(serviceVersion)
|
||||
cfg.resourceAttributes = append(cfg.resourceAttributes, attr)
|
||||
conf.resourceAttributes = append(conf.resourceAttributes, attr)
|
||||
})
|
||||
}
|
||||
|
||||
// WithDeploymentEnvironment configures `deployment.environment` resource attribute,
|
||||
// for example, `production`.
|
||||
func WithDeploymentEnvironment(env string) Option {
|
||||
return option(func(cfg *config) {
|
||||
return option(func(conf *config) {
|
||||
attr := semconv.DeploymentEnvironmentKey.String(env)
|
||||
cfg.resourceAttributes = append(cfg.resourceAttributes, attr)
|
||||
conf.resourceAttributes = append(conf.resourceAttributes, attr)
|
||||
})
|
||||
}
|
||||
|
||||
@ -134,16 +137,16 @@ func WithDeploymentEnvironment(env string) Option {
|
||||
//
|
||||
// The default is to use `OTEL_RESOURCE_ATTRIBUTES` env var, for example,
|
||||
// `OTEL_RESOURCE_ATTRIBUTES=service.name=myservice,service.version=1.0.0`.
|
||||
func WithResourceAttributes(attrs []attribute.KeyValue) Option {
|
||||
return option(func(cfg *config) {
|
||||
cfg.resourceAttributes = append(cfg.resourceAttributes, attrs...)
|
||||
func WithResourceAttributes(attrs ...attribute.KeyValue) Option {
|
||||
return option(func(conf *config) {
|
||||
conf.resourceAttributes = append(conf.resourceAttributes, attrs...)
|
||||
})
|
||||
}
|
||||
|
||||
// WithResourceDetectors adds detectors to be evaluated for the configured resource.
|
||||
func WithResourceDetectors(detectors ...resource.Detector) Option {
|
||||
return option(func(cfg *config) {
|
||||
cfg.resourceDetectors = append(cfg.resourceDetectors, detectors...)
|
||||
return option(func(conf *config) {
|
||||
conf.resourceDetectors = append(conf.resourceDetectors, detectors...)
|
||||
})
|
||||
}
|
||||
|
||||
@ -153,8 +156,14 @@ func WithResourceDetectors(detectors ...resource.Detector) Option {
|
||||
//
|
||||
// WithResource overrides and replaces any other resource attributes.
|
||||
func WithResource(resource *resource.Resource) Option {
|
||||
return option(func(cfg *config) {
|
||||
cfg.resource = resource
|
||||
return option(func(conf *config) {
|
||||
conf.resource = resource
|
||||
})
|
||||
}
|
||||
|
||||
func WithTLSConfig(tlsConf *tls.Config) Option {
|
||||
return option(func(conf *config) {
|
||||
conf.tlsConf = tlsConf
|
||||
})
|
||||
}
|
||||
|
||||
@ -165,20 +174,20 @@ type TracingOption interface {
|
||||
tracing()
|
||||
}
|
||||
|
||||
type tracingOption func(cfg *config)
|
||||
type tracingOption func(conf *config)
|
||||
|
||||
var _ TracingOption = (*tracingOption)(nil)
|
||||
|
||||
func (fn tracingOption) apply(cfg *config) {
|
||||
fn(cfg)
|
||||
func (fn tracingOption) apply(conf *config) {
|
||||
fn(conf)
|
||||
}
|
||||
|
||||
func (fn tracingOption) tracing() {}
|
||||
|
||||
// WithTracingEnabled can be used to enable/disable tracing.
|
||||
func WithTracingEnabled(on bool) TracingOption {
|
||||
return tracingOption(func(cfg *config) {
|
||||
cfg.tracingEnabled = on
|
||||
return tracingOption(func(conf *config) {
|
||||
conf.tracingEnabled = on
|
||||
})
|
||||
}
|
||||
|
||||
@ -190,23 +199,23 @@ func WithTracingDisabled() TracingOption {
|
||||
// TracerProvider overwrites the default Uptrace tracer provider.
|
||||
// You can use it to configure Uptrace distro to use OTLP exporter.
|
||||
func WithTracerProvider(provider *sdktrace.TracerProvider) TracingOption {
|
||||
return tracingOption(func(cfg *config) {
|
||||
cfg.tracerProvider = provider
|
||||
return tracingOption(func(conf *config) {
|
||||
conf.tracerProvider = provider
|
||||
})
|
||||
}
|
||||
|
||||
// WithTraceSampler configures a span sampler.
|
||||
func WithTraceSampler(sampler sdktrace.Sampler) TracingOption {
|
||||
return tracingOption(func(cfg *config) {
|
||||
cfg.traceSampler = sampler
|
||||
return tracingOption(func(conf *config) {
|
||||
conf.traceSampler = sampler
|
||||
})
|
||||
}
|
||||
|
||||
// WithPropagator sets the global TextMapPropagator used by OpenTelemetry.
|
||||
// The default is propagation.TraceContext and propagation.Baggage.
|
||||
func WithPropagator(propagator propagation.TextMapPropagator) TracingOption {
|
||||
return tracingOption(func(cfg *config) {
|
||||
cfg.textMapPropagator = propagator
|
||||
return tracingOption(func(conf *config) {
|
||||
conf.textMapPropagator = propagator
|
||||
})
|
||||
}
|
||||
|
||||
@ -218,15 +227,15 @@ func WithTextMapPropagator(propagator propagation.TextMapPropagator) TracingOpti
|
||||
// WithPrettyPrintSpanExporter adds a span exproter that prints spans to stdout.
|
||||
// It is useful for debugging or demonstration purposes.
|
||||
func WithPrettyPrintSpanExporter() TracingOption {
|
||||
return tracingOption(func(cfg *config) {
|
||||
cfg.prettyPrint = true
|
||||
return tracingOption(func(conf *config) {
|
||||
conf.prettyPrint = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithBatchSpanProcessorOption specifies options used to created BatchSpanProcessor.
|
||||
func WithBatchSpanProcessorOption(opts ...sdktrace.BatchSpanProcessorOption) TracingOption {
|
||||
return tracingOption(func(cfg *config) {
|
||||
cfg.bspOptions = append(cfg.bspOptions, opts...)
|
||||
return tracingOption(func(conf *config) {
|
||||
conf.bspOptions = append(conf.bspOptions, opts...)
|
||||
})
|
||||
}
|
||||
|
||||
@ -237,20 +246,20 @@ type MetricsOption interface {
|
||||
metrics()
|
||||
}
|
||||
|
||||
type metricsOption func(cfg *config)
|
||||
type metricsOption func(conf *config)
|
||||
|
||||
var _ MetricsOption = (*metricsOption)(nil)
|
||||
|
||||
func (fn metricsOption) apply(cfg *config) {
|
||||
fn(cfg)
|
||||
func (fn metricsOption) apply(conf *config) {
|
||||
fn(conf)
|
||||
}
|
||||
|
||||
func (fn metricsOption) metrics() {}
|
||||
|
||||
// WithMetricsEnabled can be used to enable/disable metrics.
|
||||
func WithMetricsEnabled(on bool) MetricsOption {
|
||||
return metricsOption(func(cfg *config) {
|
||||
cfg.metricsEnabled = on
|
||||
return metricsOption(func(conf *config) {
|
||||
conf.metricsEnabled = on
|
||||
})
|
||||
}
|
||||
|
||||
|
54
vendor/github.com/uptrace/uptrace-go/uptrace/metrics.go
generated
vendored
54
vendor/github.com/uptrace/uptrace-go/uptrace/metrics.go
generated
vendored
@ -5,13 +5,11 @@ import (
|
||||
"time"
|
||||
|
||||
runtimemetrics "go.opentelemetry.io/contrib/instrumentation/runtime"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
selector "go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/metric/view"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding/gzip"
|
||||
|
||||
@ -19,39 +17,31 @@ import (
|
||||
)
|
||||
|
||||
func configureMetrics(ctx context.Context, client *client, cfg *config) {
|
||||
exportKindSelector := aggregation.StatelessTemporalitySelector()
|
||||
|
||||
exp, err := otlpmetric.New(ctx, otlpmetricClient(client.dsn),
|
||||
otlpmetric.WithMetricAggregationTemporalitySelector(exportKindSelector))
|
||||
exp, err := otlpmetricClient(ctx, client.dsn)
|
||||
if err != nil {
|
||||
internal.Logger.Printf("otlpmetric.New failed: %s", err)
|
||||
internal.Logger.Printf("otlpmetricClient failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
ctrl := controller.New(
|
||||
processor.NewFactory(
|
||||
selector.NewWithHistogramDistribution(),
|
||||
exportKindSelector,
|
||||
),
|
||||
controller.WithExporter(exp),
|
||||
controller.WithCollectPeriod(10*time.Second), // same as default
|
||||
controller.WithResource(cfg.newResource()),
|
||||
reader := metric.NewPeriodicReader(
|
||||
exp,
|
||||
metric.WithInterval(60*time.Second),
|
||||
metric.WithTemporalitySelector(statelessTemporalitySelector),
|
||||
)
|
||||
provider := metric.NewMeterProvider(
|
||||
metric.WithReader(reader),
|
||||
metric.WithResource(cfg.newResource()),
|
||||
)
|
||||
|
||||
if err := ctrl.Start(ctx); err != nil {
|
||||
internal.Logger.Printf("ctrl.Start failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
global.SetMeterProvider(ctrl)
|
||||
client.ctrl = ctrl
|
||||
global.SetMeterProvider(provider)
|
||||
client.mp = provider
|
||||
|
||||
if err := runtimemetrics.Start(); err != nil {
|
||||
internal.Logger.Printf("runtimemetrics.Start failed: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func otlpmetricClient(dsn *DSN) otlpmetric.Client {
|
||||
func otlpmetricClient(ctx context.Context, dsn *DSN) (metric.Exporter, error) {
|
||||
options := []otlpmetricgrpc.Option{
|
||||
otlpmetricgrpc.WithEndpoint(dsn.OTLPHost()),
|
||||
otlpmetricgrpc.WithHeaders(map[string]string{
|
||||
@ -69,5 +59,15 @@ func otlpmetricClient(dsn *DSN) otlpmetric.Client {
|
||||
options = append(options, otlpmetricgrpc.WithInsecure())
|
||||
}
|
||||
|
||||
return otlpmetricgrpc.NewClient(options...)
|
||||
return otlpmetricgrpc.New(ctx, options...)
|
||||
}
|
||||
|
||||
func statelessTemporalitySelector(kind view.InstrumentKind) metricdata.Temporality {
|
||||
return metricdata.CumulativeTemporality
|
||||
// switch kind {
|
||||
// case view.SyncCounter, view.AsyncCounter, view.SyncHistogram:
|
||||
// return metricdata.DeltaTemporality
|
||||
// default:
|
||||
// return metricdata.CumulativeTemporality
|
||||
// }
|
||||
}
|
||||
|
83
vendor/github.com/uptrace/uptrace-go/uptrace/tracing.go
generated
vendored
83
vendor/github.com/uptrace/uptrace-go/uptrace/tracing.go
generated
vendored
@ -2,7 +2,11 @@ package uptrace
|
||||
|
||||
import (
|
||||
"context"
|
||||
cryptorand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
@ -10,29 +14,31 @@ import (
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding/gzip"
|
||||
|
||||
"github.com/uptrace/uptrace-go/internal"
|
||||
)
|
||||
|
||||
func configureTracing(ctx context.Context, client *client, cfg *config) {
|
||||
provider := cfg.tracerProvider
|
||||
func configureTracing(ctx context.Context, client *client, conf *config) {
|
||||
provider := conf.tracerProvider
|
||||
if provider == nil {
|
||||
var opts []sdktrace.TracerProviderOption
|
||||
|
||||
if res := cfg.newResource(); res != nil {
|
||||
opts = append(opts, sdktrace.WithIDGenerator(defaultIDGenerator()))
|
||||
if res := conf.newResource(); res != nil {
|
||||
opts = append(opts, sdktrace.WithResource(res))
|
||||
}
|
||||
if cfg.traceSampler != nil {
|
||||
opts = append(opts, sdktrace.WithSampler(cfg.traceSampler))
|
||||
if conf.traceSampler != nil {
|
||||
opts = append(opts, sdktrace.WithSampler(conf.traceSampler))
|
||||
}
|
||||
|
||||
provider = sdktrace.NewTracerProvider(opts...)
|
||||
otel.SetTracerProvider(provider)
|
||||
}
|
||||
|
||||
exp, err := otlptrace.New(ctx, otlpTraceClient(client.dsn))
|
||||
exp, err := otlptrace.New(ctx, otlpTraceClient(conf, client.dsn))
|
||||
if err != nil {
|
||||
internal.Logger.Printf("otlptrace.New failed: %s", err)
|
||||
return
|
||||
@ -44,12 +50,12 @@ func configureTracing(ctx context.Context, client *client, cfg *config) {
|
||||
sdktrace.WithMaxExportBatchSize(queueSize),
|
||||
sdktrace.WithBatchTimeout(10 * time.Second),
|
||||
}
|
||||
bspOptions = append(bspOptions, cfg.bspOptions...)
|
||||
bspOptions = append(bspOptions, conf.bspOptions...)
|
||||
|
||||
bsp := sdktrace.NewBatchSpanProcessor(exp, bspOptions...)
|
||||
provider.RegisterSpanProcessor(bsp)
|
||||
|
||||
if cfg.prettyPrint {
|
||||
if conf.prettyPrint {
|
||||
exporter, err := stdouttrace.New(stdouttrace.WithPrettyPrint())
|
||||
if err != nil {
|
||||
internal.Logger.Printf(err.Error())
|
||||
@ -61,7 +67,7 @@ func configureTracing(ctx context.Context, client *client, cfg *config) {
|
||||
client.tp = provider
|
||||
}
|
||||
|
||||
func otlpTraceClient(dsn *DSN) otlptrace.Client {
|
||||
func otlpTraceClient(conf *config, dsn *DSN) otlptrace.Client {
|
||||
options := []otlptracegrpc.Option{
|
||||
otlptracegrpc.WithEndpoint(dsn.OTLPHost()),
|
||||
otlptracegrpc.WithHeaders(map[string]string{
|
||||
@ -71,7 +77,10 @@ func otlpTraceClient(dsn *DSN) otlptrace.Client {
|
||||
otlptracegrpc.WithCompressor(gzip.Name),
|
||||
}
|
||||
|
||||
if dsn.Scheme == "https" {
|
||||
if conf.tlsConf != nil {
|
||||
creds := credentials.NewTLS(conf.tlsConf)
|
||||
options = append(options, otlptracegrpc.WithTLSCredentials(creds))
|
||||
} else if dsn.Scheme == "https" {
|
||||
// Create credentials using system certificates.
|
||||
creds := credentials.NewClientTLSFromCert(nil, "")
|
||||
options = append(options, otlptracegrpc.WithTLSCredentials(creds))
|
||||
@ -95,3 +104,57 @@ func queueSize() int {
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
const (
|
||||
spanIDPrec = int64(time.Millisecond)
|
||||
traceIDPrec = int64(time.Microsecond)
|
||||
)
|
||||
|
||||
type idGenerator struct {
|
||||
sync.Mutex
|
||||
randSource *rand.Rand
|
||||
}
|
||||
|
||||
var _ sdktrace.IDGenerator = (*idGenerator)(nil)
|
||||
|
||||
// NewIDs returns a new trace and span ID.
|
||||
func (gen *idGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
|
||||
unixNano := time.Now().UnixNano()
|
||||
|
||||
gen.Lock()
|
||||
defer gen.Unlock()
|
||||
|
||||
tid := trace.TraceID{}
|
||||
binary.LittleEndian.PutUint64(tid[:8], uint64(unixNano/traceIDPrec))
|
||||
_, _ = gen.randSource.Read(tid[8:])
|
||||
|
||||
sid := trace.SpanID{}
|
||||
_, _ = gen.randSource.Read(sid[:4])
|
||||
binary.LittleEndian.PutUint32(sid[4:], uint32(unixNano/spanIDPrec))
|
||||
|
||||
return tid, sid
|
||||
}
|
||||
|
||||
// NewSpanID returns a ID for a new span in the trace with traceID.
|
||||
func (gen *idGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
|
||||
unixNano := time.Now().UnixNano()
|
||||
|
||||
gen.Lock()
|
||||
defer gen.Unlock()
|
||||
|
||||
sid := trace.SpanID{}
|
||||
_, _ = gen.randSource.Read(sid[:4])
|
||||
binary.LittleEndian.PutUint32(sid[4:], uint32(unixNano/spanIDPrec))
|
||||
|
||||
return sid
|
||||
}
|
||||
|
||||
func defaultIDGenerator() *idGenerator {
|
||||
gen := &idGenerator{}
|
||||
var rngSeed int64
|
||||
_ = binary.Read(cryptorand.Reader, binary.LittleEndian, &rngSeed)
|
||||
gen.randSource = rand.New(rand.NewSource(rngSeed))
|
||||
return gen
|
||||
}
|
||||
|
34
vendor/github.com/uptrace/uptrace-go/uptrace/uptrace.go
generated
vendored
34
vendor/github.com/uptrace/uptrace-go/uptrace/uptrace.go
generated
vendored
@ -3,6 +3,7 @@ package uptrace
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/uptrace/uptrace-go/internal"
|
||||
@ -26,33 +27,42 @@ func ConfigureOpentelemetry(opts ...Option) {
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
cfg := newConfig(opts)
|
||||
conf := newConfig(opts)
|
||||
|
||||
if !cfg.tracingEnabled && !cfg.metricsEnabled {
|
||||
if !conf.tracingEnabled && !conf.metricsEnabled {
|
||||
return
|
||||
}
|
||||
|
||||
dsn, err := ParseDSN(cfg.dsn)
|
||||
dsn, err := ParseDSN(conf.dsn)
|
||||
if err != nil {
|
||||
internal.Logger.Printf("uptrace is disabled: %s", err)
|
||||
internal.Logger.Printf("invalid Uptrace DSN: %s (Uptrace is disabled)", err)
|
||||
return
|
||||
}
|
||||
|
||||
if dsn.ProjectID == "<project_id>" || dsn.Token == "<token>" {
|
||||
internal.Logger.Printf("dummy Uptrace DSN detected: %q (Uptrace is disabled)", conf.dsn)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasSuffix(dsn.Host, ":14318") {
|
||||
internal.Logger.Printf("uptrace-go uses OTLP/gRPC exporter, but got host %q", dsn.Host)
|
||||
}
|
||||
|
||||
client := newClient(dsn)
|
||||
|
||||
configurePropagator(cfg)
|
||||
if cfg.tracingEnabled {
|
||||
configureTracing(ctx, client, cfg)
|
||||
configurePropagator(conf)
|
||||
if conf.tracingEnabled {
|
||||
configureTracing(ctx, client, conf)
|
||||
}
|
||||
if cfg.metricsEnabled {
|
||||
configureMetrics(ctx, client, cfg)
|
||||
if conf.metricsEnabled {
|
||||
configureMetrics(ctx, client, conf)
|
||||
}
|
||||
|
||||
atomicClient.Store(client)
|
||||
}
|
||||
|
||||
func configurePropagator(cfg *config) {
|
||||
textMapPropagator := cfg.textMapPropagator
|
||||
func configurePropagator(conf *config) {
|
||||
textMapPropagator := conf.textMapPropagator
|
||||
if textMapPropagator == nil {
|
||||
textMapPropagator = propagation.NewCompositeTextMapPropagator(
|
||||
propagation.TraceContext{},
|
||||
@ -70,7 +80,7 @@ var (
|
||||
Token: "<token>",
|
||||
|
||||
Scheme: "https",
|
||||
Host: "api.uptrace.dev",
|
||||
Host: "uptrace.dev",
|
||||
})
|
||||
atomicClient atomic.Value
|
||||
)
|
||||
|
2
vendor/github.com/uptrace/uptrace-go/uptrace/version.go
generated
vendored
2
vendor/github.com/uptrace/uptrace-go/uptrace/version.go
generated
vendored
@ -2,5 +2,5 @@ package uptrace
|
||||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.9.0"
|
||||
return "1.11.4"
|
||||
}
|
||||
|
29
vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go
generated
vendored
29
vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go
generated
vendored
@ -15,18 +15,19 @@
|
||||
// Package runtime implements the conventional runtime metrics specified by OpenTelemetry.
|
||||
//
|
||||
// The metric events produced are:
|
||||
// runtime.go.cgo.calls - Number of cgo calls made by the current process
|
||||
// runtime.go.gc.count - Number of completed garbage collection cycles
|
||||
// runtime.go.gc.pause_ns (ns) Amount of nanoseconds in GC stop-the-world pauses
|
||||
// runtime.go.gc.pause_total_ns (ns) Cumulative nanoseconds in GC stop-the-world pauses since the program started
|
||||
// runtime.go.goroutines - Number of goroutines that currently exist
|
||||
// runtime.go.lookups - Number of pointer lookups performed by the runtime
|
||||
// runtime.go.mem.heap_alloc (bytes) Bytes of allocated heap objects
|
||||
// runtime.go.mem.heap_idle (bytes) Bytes in idle (unused) spans
|
||||
// runtime.go.mem.heap_inuse (bytes) Bytes in in-use spans
|
||||
// runtime.go.mem.heap_objects - Number of allocated heap objects
|
||||
// runtime.go.mem.heap_released (bytes) Bytes of idle spans whose physical memory has been returned to the OS
|
||||
// runtime.go.mem.heap_sys (bytes) Bytes of heap memory obtained from the OS
|
||||
// runtime.go.mem.live_objects - Number of live objects is the number of cumulative Mallocs - Frees
|
||||
// runtime.uptime (ms) Milliseconds since application was initialized
|
||||
//
|
||||
// runtime.go.cgo.calls - Number of cgo calls made by the current process
|
||||
// runtime.go.gc.count - Number of completed garbage collection cycles
|
||||
// runtime.go.gc.pause_ns (ns) Amount of nanoseconds in GC stop-the-world pauses
|
||||
// runtime.go.gc.pause_total_ns (ns) Cumulative nanoseconds in GC stop-the-world pauses since the program started
|
||||
// runtime.go.goroutines - Number of goroutines that currently exist
|
||||
// runtime.go.lookups - Number of pointer lookups performed by the runtime
|
||||
// runtime.go.mem.heap_alloc (bytes) Bytes of allocated heap objects
|
||||
// runtime.go.mem.heap_idle (bytes) Bytes in idle (unused) spans
|
||||
// runtime.go.mem.heap_inuse (bytes) Bytes in in-use spans
|
||||
// runtime.go.mem.heap_objects - Number of allocated heap objects
|
||||
// runtime.go.mem.heap_released (bytes) Bytes of idle spans whose physical memory has been returned to the OS
|
||||
// runtime.go.mem.heap_sys (bytes) Bytes of heap memory obtained from the OS
|
||||
// runtime.go.mem.live_objects - Number of live objects is the number of cumulative Mallocs - Frees
|
||||
// runtime.uptime (ms) Milliseconds since application was initialized
|
||||
package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime"
|
||||
|
2
vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go
generated
vendored
2
vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go
generated
vendored
@ -16,7 +16,7 @@ package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime"
|
||||
|
||||
// Version is the current release version of the runtime instrumentation.
|
||||
func Version() string {
|
||||
return "0.34.0"
|
||||
return "0.36.4"
|
||||
// This string is updated by the pre_release.sh script during release
|
||||
}
|
||||
|
||||
|
129
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
129
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
@ -8,6 +8,126 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.11.1/0.33.0] 2022-10-19
|
||||
|
||||
### Added
|
||||
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
|
||||
By default, it will register with the default Prometheus registerer.
|
||||
A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
|
||||
- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
|
||||
|
||||
### Changed
|
||||
|
||||
- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
|
||||
It will return an error if the exporter fails to register with Prometheus. (#3239)
|
||||
|
||||
### Fixed
|
||||
|
||||
- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
|
||||
- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
|
||||
This fixes the implementation to be compliant with the W3C specification. (#3226)
|
||||
- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
|
||||
- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
|
||||
- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
|
||||
- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
|
||||
- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
|
||||
Instead the exporter is defined as an "unchecked" collector for Prometheus.
|
||||
This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
|
||||
- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
|
||||
- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
|
||||
This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
|
||||
|
||||
## [1.11.0/0.32.3] 2022-10-12
|
||||
|
||||
### Added
|
||||
|
||||
- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261)
|
||||
|
||||
### Changed
|
||||
|
||||
- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214)
|
||||
- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`.
|
||||
This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235)
|
||||
|
||||
## [0.32.2] Metric SDK (Alpha) - 2022-10-11
|
||||
|
||||
### Added
|
||||
|
||||
- Added an example of using metric views to customize instruments. (#3177)
|
||||
- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261)
|
||||
|
||||
### Changed
|
||||
|
||||
- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
|
||||
- Update histogram default bounds to match the requirements of the latest specification. (#3222)
|
||||
- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Use default view if instrument does not match any registered view of a reader. (#3224, #3237)
|
||||
- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251)
|
||||
- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251)
|
||||
- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251)
|
||||
- The OpenCensus bridge no longer sends empty batches of metrics. (#3263)
|
||||
|
||||
## [0.32.1] Metric SDK (Alpha) - 2022-09-22
|
||||
|
||||
### Changed
|
||||
|
||||
- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting.
|
||||
Invalid characters are replaced with `_`. (#3212)
|
||||
|
||||
### Added
|
||||
|
||||
- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192)
|
||||
- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Updated go.mods to point to valid versions of the sdk. (#3216)
|
||||
- Set the `MeterProvider` resource on all exported metric data. (#3218)
|
||||
|
||||
## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18
|
||||
|
||||
### Changed
|
||||
|
||||
- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
|
||||
Please see the package documentation for how the new SDK is initialized and configured. (#3175)
|
||||
- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179)
|
||||
|
||||
### Removed
|
||||
|
||||
- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
|
||||
A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
|
||||
A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
|
||||
- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
|
||||
|
||||
## [1.10.0] - 2022-09-09
|
||||
|
||||
### Added
|
||||
@ -191,7 +311,7 @@ Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be mod
|
||||
- `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
|
||||
- `OTEL_SPAN_LINK_COUNT_LIMIT`
|
||||
- `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
|
||||
|
||||
|
||||
If the provided environment variables are invalid (negative), the default values would be used.
|
||||
- Rename the `gc` runtime name to `go` (#2560)
|
||||
- Add resource container ID detection. (#2418)
|
||||
@ -1907,7 +2027,12 @@ It contains api and sdk for trace and meter.
|
||||
- CircleCI build CI manifest files.
|
||||
- CODEOWNERS file to track owners of this project.
|
||||
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.10.0...HEAD
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.1...HEAD
|
||||
[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
|
||||
[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
|
||||
[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
|
||||
[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
|
||||
[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0
|
||||
[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
|
||||
[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
|
||||
[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
|
||||
|
2
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
2
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
@ -156,7 +156,7 @@ go-mod-tidy/%: DIR=$*
|
||||
go-mod-tidy/%: | crosslink
|
||||
@echo "$(GO) mod tidy in $(DIR)" \
|
||||
&& cd $(DIR) \
|
||||
&& $(GO) mod tidy -compat=1.17
|
||||
&& $(GO) mod tidy -compat=1.18
|
||||
|
||||
.PHONY: lint-modules
|
||||
lint-modules: go-mod-tidy
|
||||
|
5
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
5
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
@ -52,19 +52,14 @@ Currently, this project supports the following environments.
|
||||
| ------- | ---------- | ------------ |
|
||||
| Ubuntu | 1.19 | amd64 |
|
||||
| Ubuntu | 1.18 | amd64 |
|
||||
| Ubuntu | 1.17 | amd64 |
|
||||
| Ubuntu | 1.19 | 386 |
|
||||
| Ubuntu | 1.18 | 386 |
|
||||
| Ubuntu | 1.17 | 386 |
|
||||
| MacOS | 1.19 | amd64 |
|
||||
| MacOS | 1.18 | amd64 |
|
||||
| MacOS | 1.17 | amd64 |
|
||||
| Windows | 1.19 | amd64 |
|
||||
| Windows | 1.18 | amd64 |
|
||||
| Windows | 1.17 | amd64 |
|
||||
| Windows | 1.19 | 386 |
|
||||
| Windows | 1.18 | 386 |
|
||||
| Windows | 1.17 | 386 |
|
||||
|
||||
While this project should work for other systems, no compatibility guarantees
|
||||
are made for those systems currently.
|
||||
|
67
vendor/go.opentelemetry.io/otel/attribute/value.go
generated
vendored
67
vendor/go.opentelemetry.io/otel/attribute/value.go
generated
vendored
@ -17,9 +17,11 @@ package attribute // import "go.opentelemetry.io/otel/attribute"
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"go.opentelemetry.io/otel/internal"
|
||||
"go.opentelemetry.io/otel/internal/attribute"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=Type
|
||||
@ -66,12 +68,7 @@ func BoolValue(v bool) Value {
|
||||
|
||||
// BoolSliceValue creates a BOOLSLICE Value.
|
||||
func BoolSliceValue(v []bool) Value {
|
||||
cp := make([]bool, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: BOOLSLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// IntValue creates an INT64 Value.
|
||||
@ -81,13 +78,14 @@ func IntValue(v int) Value {
|
||||
|
||||
// IntSliceValue creates an INTSLICE Value.
|
||||
func IntSliceValue(v []int) Value {
|
||||
cp := make([]int64, 0, len(v))
|
||||
for _, i := range v {
|
||||
cp = append(cp, int64(i))
|
||||
var int64Val int64
|
||||
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
|
||||
for i, val := range v {
|
||||
cp.Elem().Index(i).SetInt(int64(val))
|
||||
}
|
||||
return Value{
|
||||
vtype: INT64SLICE,
|
||||
slice: &cp,
|
||||
slice: cp.Elem().Interface(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,12 +99,7 @@ func Int64Value(v int64) Value {
|
||||
|
||||
// Int64SliceValue creates an INT64SLICE Value.
|
||||
func Int64SliceValue(v []int64) Value {
|
||||
cp := make([]int64, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: INT64SLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// Float64Value creates a FLOAT64 Value.
|
||||
@ -119,12 +112,7 @@ func Float64Value(v float64) Value {
|
||||
|
||||
// Float64SliceValue creates a FLOAT64SLICE Value.
|
||||
func Float64SliceValue(v []float64) Value {
|
||||
cp := make([]float64, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: FLOAT64SLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// StringValue creates a STRING Value.
|
||||
@ -137,12 +125,7 @@ func StringValue(v string) Value {
|
||||
|
||||
// StringSliceValue creates a STRINGSLICE Value.
|
||||
func StringSliceValue(v []string) Value {
|
||||
cp := make([]string, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: STRINGSLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// Type returns a type of the Value.
|
||||
@ -159,10 +142,7 @@ func (v Value) AsBool() bool {
|
||||
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
|
||||
// BOOLSLICE.
|
||||
func (v Value) AsBoolSlice() []bool {
|
||||
if s, ok := v.slice.(*[]bool); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[bool](v.slice)
|
||||
}
|
||||
|
||||
// AsInt64 returns the int64 value. Make sure that the Value's type is
|
||||
@ -174,10 +154,7 @@ func (v Value) AsInt64() int64 {
|
||||
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
|
||||
// INT64SLICE.
|
||||
func (v Value) AsInt64Slice() []int64 {
|
||||
if s, ok := v.slice.(*[]int64); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[int64](v.slice)
|
||||
}
|
||||
|
||||
// AsFloat64 returns the float64 value. Make sure that the Value's
|
||||
@ -189,10 +166,7 @@ func (v Value) AsFloat64() float64 {
|
||||
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
|
||||
// FLOAT64SLICE.
|
||||
func (v Value) AsFloat64Slice() []float64 {
|
||||
if s, ok := v.slice.(*[]float64); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[float64](v.slice)
|
||||
}
|
||||
|
||||
// AsString returns the string value. Make sure that the Value's type
|
||||
@ -204,10 +178,7 @@ func (v Value) AsString() string {
|
||||
// AsStringSlice returns the []string value. Make sure that the Value's type is
|
||||
// STRINGSLICE.
|
||||
func (v Value) AsStringSlice() []string {
|
||||
if s, ok := v.slice.(*[]string); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[string](v.slice)
|
||||
}
|
||||
|
||||
type unknownValueType struct{}
|
||||
@ -239,19 +210,19 @@ func (v Value) AsInterface() interface{} {
|
||||
func (v Value) Emit() string {
|
||||
switch v.Type() {
|
||||
case BOOLSLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]bool)))
|
||||
return fmt.Sprint(v.AsBoolSlice())
|
||||
case BOOL:
|
||||
return strconv.FormatBool(v.AsBool())
|
||||
case INT64SLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]int64)))
|
||||
return fmt.Sprint(v.AsInt64Slice())
|
||||
case INT64:
|
||||
return strconv.FormatInt(v.AsInt64(), 10)
|
||||
case FLOAT64SLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]float64)))
|
||||
return fmt.Sprint(v.AsFloat64Slice())
|
||||
case FLOAT64:
|
||||
return fmt.Sprint(v.AsFloat64())
|
||||
case STRINGSLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]string)))
|
||||
return fmt.Sprint(v.AsStringSlice())
|
||||
case STRING:
|
||||
return v.stringly
|
||||
default:
|
||||
|
18
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
18
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
@ -250,8 +250,9 @@ type Member struct {
|
||||
hasData bool
|
||||
}
|
||||
|
||||
// NewMember returns a new Member from the passed arguments. An error is
|
||||
// returned if the created Member would be invalid according to the W3C
|
||||
// NewMember returns a new Member from the passed arguments. The key will be
|
||||
// used directly while the value will be url decoded after validation. An error
|
||||
// is returned if the created Member would be invalid according to the W3C
|
||||
// Baggage specification.
|
||||
func NewMember(key, value string, props ...Property) (Member, error) {
|
||||
m := Member{
|
||||
@ -263,7 +264,11 @@ func NewMember(key, value string, props ...Property) (Member, error) {
|
||||
if err := m.validate(); err != nil {
|
||||
return newInvalidMember(), err
|
||||
}
|
||||
|
||||
decodedValue, err := url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||
}
|
||||
m.value = decodedValue
|
||||
return m, nil
|
||||
}
|
||||
|
||||
@ -328,8 +333,9 @@ func parseMember(member string) (Member, error) {
|
||||
return Member{key: key, value: value, properties: props, hasData: true}, nil
|
||||
}
|
||||
|
||||
// validate ensures m conforms to the W3C Baggage specification, returning an
|
||||
// error otherwise.
|
||||
// validate ensures m conforms to the W3C Baggage specification.
|
||||
// A key is just an ASCII string, but a value must be URL encoded UTF-8,
|
||||
// returning an error otherwise.
|
||||
func (m Member) validate() error {
|
||||
if !m.hasData {
|
||||
return fmt.Errorf("%w: %q", errInvalidMember, m)
|
||||
@ -465,6 +471,7 @@ func (b Baggage) Member(key string) Member {
|
||||
key: key,
|
||||
value: v.Value,
|
||||
properties: fromInternalProperties(v.Properties),
|
||||
hasData: true,
|
||||
}
|
||||
}
|
||||
|
||||
@ -484,6 +491,7 @@ func (b Baggage) Members() []Member {
|
||||
key: k,
|
||||
value: v.Value,
|
||||
properties: fromInternalProperties(v.Properties),
|
||||
hasData: true,
|
||||
})
|
||||
}
|
||||
return members
|
||||
|
24
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go
generated
vendored
Normal file
24
vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package internal contains common functionality for all OTLP exporters.
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
|
||||
|
||||
import "go.opentelemetry.io/otel"
|
||||
|
||||
// GetUserAgentHeader return an OTLP header value form "OTel OTLP Exporter Go/{{ .Version }}"
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#user-agent
|
||||
func GetUserAgentHeader() string {
|
||||
return "OTel OTLP Exporter Go/" + otel.Version()
|
||||
}
|
49
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/client.go
generated
vendored
Normal file
49
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/client.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// Client handles the transmission of OTLP data to an OTLP receiving endpoint.
|
||||
type Client interface {
|
||||
// UploadMetrics transmits metric data to an OTLP receiver.
|
||||
//
|
||||
// All retry logic must be handled by UploadMetrics alone, the Exporter
|
||||
// does not implement any retry logic. All returned errors are considered
|
||||
// unrecoverable.
|
||||
UploadMetrics(context.Context, *mpb.ResourceMetrics) error
|
||||
|
||||
// ForceFlush flushes any metric data held by an Client.
|
||||
//
|
||||
// The deadline or cancellation of the passed context must be honored. An
|
||||
// appropriate error should be returned in these situations.
|
||||
ForceFlush(context.Context) error
|
||||
|
||||
// Shutdown flushes all metric data held by a Client and closes any
|
||||
// connections it holds open.
|
||||
//
|
||||
// The deadline or cancellation of the passed context must be honored. An
|
||||
// appropriate error should be returned in these situations.
|
||||
//
|
||||
// Shutdown will only be called once by the Exporter. Once a return value
|
||||
// is received by the Exporter from Shutdown the Client will not be used
|
||||
// anymore. Therefore all computational resources need to be released
|
||||
// after this is called so the Client can be garbage collected.
|
||||
Shutdown(context.Context) error
|
||||
}
|
43
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/clients.go
generated
vendored
43
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/clients.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// Client manages connections to the collector, handles the
|
||||
// transformation of data into wire format, and the transmission of that
|
||||
// data to the collector.
|
||||
type Client interface {
|
||||
// Start should establish connection(s) to endpoint(s). It is
|
||||
// called just once by the exporter, so the implementation
|
||||
// does not need to worry about idempotence and locking.
|
||||
Start(ctx context.Context) error
|
||||
// Stop should close the connections. The function is called
|
||||
// only once by the exporter, so the implementation does not
|
||||
// need to worry about idempotence, but it may be called
|
||||
// concurrently with UploadMetrics, so proper
|
||||
// locking is required. The function serves as a
|
||||
// synchronization point - after the function returns, the
|
||||
// process of closing connections is assumed to be finished.
|
||||
Stop(ctx context.Context) error
|
||||
// UploadMetrics should transform the passed metrics to the
|
||||
// wire format and send it to the collector. May be called
|
||||
// concurrently.
|
||||
UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error
|
||||
}
|
@ -12,13 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package registry provides a non-standalone implementation of
|
||||
MeterProvider that adds uniqueness checking for instrument descriptors
|
||||
on top of other MeterProvider it wraps.
|
||||
|
||||
This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
may be introduced in subsequent minor version releases as we work to track the
|
||||
evolving OpenTelemetry specification and user feedback.
|
||||
*/
|
||||
package registry // import "go.opentelemetry.io/otel/sdk/metric/registry"
|
||||
// Package otlpmetric provides an OpenTelemetry metric Exporter that can be
|
||||
// used with PeriodicReader. It transforms metricdata into OTLP and transmits
|
||||
// the transformed data to OTLP receivers. The Exporter is configurable to use
|
||||
// different Clients, each using a distinct transport protocol to communicate
|
||||
// to an OTLP receiving endpoint.
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
168
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/exporter.go
generated
vendored
168
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/exporter.go
generated
vendored
@ -16,117 +16,89 @@ package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
errAlreadyStarted = errors.New("already started")
|
||||
)
|
||||
// exporter exports metrics data as OTLP.
|
||||
type exporter struct {
|
||||
// Ensure synchronous access to the client across all functionality.
|
||||
clientMu sync.Mutex
|
||||
client Client
|
||||
|
||||
// Exporter exports metrics data in the OTLP wire format.
|
||||
type Exporter struct {
|
||||
client Client
|
||||
temporalitySelector aggregation.TemporalitySelector
|
||||
|
||||
mu sync.RWMutex
|
||||
started bool
|
||||
|
||||
startOnce sync.Once
|
||||
stopOnce sync.Once
|
||||
shutdownOnce sync.Once
|
||||
}
|
||||
|
||||
// Export exports a batch of metrics.
|
||||
func (e *Exporter) Export(ctx context.Context, res *resource.Resource, ilr export.InstrumentationLibraryReader) error {
|
||||
rm, err := metrictransform.InstrumentationLibraryReader(ctx, e, res, ilr, 1)
|
||||
if err != nil {
|
||||
// Export transforms and transmits metric data to an OTLP receiver.
|
||||
func (e *exporter) Export(ctx context.Context, rm metricdata.ResourceMetrics) error {
|
||||
otlpRm, err := transform.ResourceMetrics(rm)
|
||||
// Best effort upload of transformable metrics.
|
||||
e.clientMu.Lock()
|
||||
upErr := e.client.UploadMetrics(ctx, otlpRm)
|
||||
e.clientMu.Unlock()
|
||||
if upErr != nil {
|
||||
if err == nil {
|
||||
return upErr
|
||||
}
|
||||
// Merge the two errors.
|
||||
return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ForceFlush flushes any metric data held by an exporter.
|
||||
func (e *exporter) ForceFlush(ctx context.Context) error {
|
||||
// The Exporter does not hold data, forward the command to the client.
|
||||
e.clientMu.Lock()
|
||||
defer e.clientMu.Unlock()
|
||||
return e.client.ForceFlush(ctx)
|
||||
}
|
||||
|
||||
var errShutdown = fmt.Errorf("exporter is shutdown")
|
||||
|
||||
// Shutdown flushes all metric data held by an exporter and releases any held
|
||||
// computational resources.
|
||||
func (e *exporter) Shutdown(ctx context.Context) error {
|
||||
err := errShutdown
|
||||
e.shutdownOnce.Do(func() {
|
||||
e.clientMu.Lock()
|
||||
client := e.client
|
||||
e.client = shutdownClient{}
|
||||
e.clientMu.Unlock()
|
||||
err = client.Shutdown(ctx)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// New return an Exporter that uses client to transmits the OTLP data it
|
||||
// produces. The client is assumed to be fully started and able to communicate
|
||||
// with its OTLP receiving endpoint.
|
||||
func New(client Client) metric.Exporter {
|
||||
return &exporter{client: client}
|
||||
}
|
||||
|
||||
type shutdownClient struct{}
|
||||
|
||||
func (c shutdownClient) err(ctx context.Context) error {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if rm == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: There is never more than one resource emitted by this
|
||||
// call, as per the specification. We can change the
|
||||
// signature of UploadMetrics correspondingly. Here create a
|
||||
// singleton list to reduce the size of the current PR:
|
||||
return e.client.UploadMetrics(ctx, rm)
|
||||
return errShutdown
|
||||
}
|
||||
|
||||
// Start establishes a connection to the receiving endpoint.
|
||||
func (e *Exporter) Start(ctx context.Context) error {
|
||||
var err = errAlreadyStarted
|
||||
e.startOnce.Do(func() {
|
||||
e.mu.Lock()
|
||||
e.started = true
|
||||
e.mu.Unlock()
|
||||
err = e.client.Start(ctx)
|
||||
})
|
||||
|
||||
return err
|
||||
func (c shutdownClient) UploadMetrics(ctx context.Context, _ *mpb.ResourceMetrics) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
||||
// Shutdown flushes all exports and closes all connections to the receiving endpoint.
|
||||
func (e *Exporter) Shutdown(ctx context.Context) error {
|
||||
e.mu.RLock()
|
||||
started := e.started
|
||||
e.mu.RUnlock()
|
||||
|
||||
if !started {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
e.stopOnce.Do(func() {
|
||||
err = e.client.Stop(ctx)
|
||||
e.mu.Lock()
|
||||
e.started = false
|
||||
e.mu.Unlock()
|
||||
})
|
||||
|
||||
return err
|
||||
func (c shutdownClient) ForceFlush(ctx context.Context) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
||||
// TemporalityFor returns the accepted temporality for a metric measurment.
|
||||
func (e *Exporter) TemporalityFor(descriptor *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality {
|
||||
return e.temporalitySelector.TemporalityFor(descriptor, kind)
|
||||
}
|
||||
|
||||
var _ export.Exporter = (*Exporter)(nil)
|
||||
|
||||
// New constructs a new Exporter and starts it.
|
||||
func New(ctx context.Context, client Client, opts ...Option) (*Exporter, error) {
|
||||
exp := NewUnstarted(client, opts...)
|
||||
if err := exp.Start(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exp, nil
|
||||
}
|
||||
|
||||
// NewUnstarted constructs a new Exporter and does not start it.
|
||||
func NewUnstarted(client Client, opts ...Option) *Exporter {
|
||||
cfg := config{
|
||||
// Note: the default TemporalitySelector is specified
|
||||
// as Cumulative:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/issues/731
|
||||
temporalitySelector: aggregation.CumulativeTemporalitySelector(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
cfg = opt.apply(cfg)
|
||||
}
|
||||
|
||||
e := &Exporter{
|
||||
client: client,
|
||||
temporalitySelector: cfg.temporalitySelector,
|
||||
}
|
||||
|
||||
return e
|
||||
func (c shutdownClient) Shutdown(ctx context.Context) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
@ -1,158 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metrictransform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
)
|
||||
|
||||
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||
func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*commonpb.KeyValue, 0, len(attrs))
|
||||
for _, kv := range attrs {
|
||||
out = append(out, KeyValue(kv))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Iterator transforms an attribute iterator into OTLP key-values.
|
||||
func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
|
||||
l := iter.Len()
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*commonpb.KeyValue, 0, l)
|
||||
for iter.Next() {
|
||||
out = append(out, KeyValue(iter.Attribute()))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ResourceAttributes transforms a Resource OTLP key-values.
|
||||
func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
|
||||
return Iterator(res.Iter())
|
||||
}
|
||||
|
||||
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||
func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
|
||||
return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||
}
|
||||
|
||||
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||
func Value(v attribute.Value) *commonpb.AnyValue {
|
||||
av := new(commonpb.AnyValue)
|
||||
switch v.Type() {
|
||||
case attribute.BOOL:
|
||||
av.Value = &commonpb.AnyValue_BoolValue{
|
||||
BoolValue: v.AsBool(),
|
||||
}
|
||||
case attribute.BOOLSLICE:
|
||||
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: boolSliceValues(v.AsBoolSlice()),
|
||||
},
|
||||
}
|
||||
case attribute.INT64:
|
||||
av.Value = &commonpb.AnyValue_IntValue{
|
||||
IntValue: v.AsInt64(),
|
||||
}
|
||||
case attribute.INT64SLICE:
|
||||
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: int64SliceValues(v.AsInt64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.FLOAT64:
|
||||
av.Value = &commonpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v.AsFloat64(),
|
||||
}
|
||||
case attribute.FLOAT64SLICE:
|
||||
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.STRING:
|
||||
av.Value = &commonpb.AnyValue_StringValue{
|
||||
StringValue: v.AsString(),
|
||||
}
|
||||
case attribute.STRINGSLICE:
|
||||
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: stringSliceValues(v.AsStringSlice()),
|
||||
},
|
||||
}
|
||||
default:
|
||||
av.Value = &commonpb.AnyValue_StringValue{
|
||||
StringValue: "INVALID",
|
||||
}
|
||||
}
|
||||
return av
|
||||
}
|
||||
|
||||
func boolSliceValues(vals []bool) []*commonpb.AnyValue {
|
||||
converted := make([]*commonpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_BoolValue{
|
||||
BoolValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func int64SliceValues(vals []int64) []*commonpb.AnyValue {
|
||||
converted := make([]*commonpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_IntValue{
|
||||
IntValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func float64SliceValues(vals []float64) []*commonpb.AnyValue {
|
||||
converted := make([]*commonpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func stringSliceValues(vals []string) []*commonpb.AnyValue {
|
||||
converted := make([]*commonpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_StringValue{
|
||||
StringValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
437
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform/metric.go
generated
vendored
437
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform/metric.go
generated
vendored
@ -1,437 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metrictransform provides translations for opentelemetry-go concepts and
|
||||
// structures to otlp structures.
|
||||
package metrictransform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnimplementedAgg is returned when a transformation of an unimplemented
|
||||
// aggregator is attempted.
|
||||
ErrUnimplementedAgg = errors.New("unimplemented aggregator")
|
||||
|
||||
// ErrIncompatibleAgg is returned when
|
||||
// aggregation.Kind implies an interface conversion that has
|
||||
// failed.
|
||||
ErrIncompatibleAgg = errors.New("incompatible aggregation type")
|
||||
|
||||
// ErrUnknownValueType is returned when a transformation of an unknown value
|
||||
// is attempted.
|
||||
ErrUnknownValueType = errors.New("invalid value type")
|
||||
|
||||
// ErrContextCanceled is returned when a context cancellation halts a
|
||||
// transformation.
|
||||
ErrContextCanceled = errors.New("context canceled")
|
||||
|
||||
// ErrTransforming is returned when an unexected error is encountered transforming.
|
||||
ErrTransforming = errors.New("transforming failed")
|
||||
)
|
||||
|
||||
// result is the product of transforming Records into OTLP Metrics.
|
||||
type result struct {
|
||||
Metric *metricpb.Metric
|
||||
Err error
|
||||
}
|
||||
|
||||
// toNanos returns the number of nanoseconds since the UNIX epoch.
|
||||
func toNanos(t time.Time) uint64 {
|
||||
if t.IsZero() {
|
||||
return 0
|
||||
}
|
||||
return uint64(t.UnixNano())
|
||||
}
|
||||
|
||||
// InstrumentationLibraryReader transforms all records contained in a checkpoint into
|
||||
// batched OTLP ResourceMetrics.
|
||||
func InstrumentationLibraryReader(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, res *resource.Resource, ilmr export.InstrumentationLibraryReader, numWorkers uint) (*metricpb.ResourceMetrics, error) {
|
||||
var sms []*metricpb.ScopeMetrics
|
||||
|
||||
err := ilmr.ForEach(func(lib instrumentation.Library, mr export.Reader) error {
|
||||
records, errc := source(ctx, temporalitySelector, mr)
|
||||
|
||||
// Start a fixed number of goroutines to transform records.
|
||||
transformed := make(chan result)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(int(numWorkers))
|
||||
for i := uint(0); i < numWorkers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
transformer(ctx, temporalitySelector, records, transformed)
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(transformed)
|
||||
}()
|
||||
|
||||
// Synchronously collect the transformed records and transmit.
|
||||
ms, err := sink(ctx, transformed)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// source is complete, check for any errors.
|
||||
if err := <-errc; err != nil {
|
||||
return err
|
||||
}
|
||||
if len(ms) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sms = append(sms, &metricpb.ScopeMetrics{
|
||||
Metrics: ms,
|
||||
SchemaUrl: lib.SchemaURL,
|
||||
Scope: &commonpb.InstrumentationScope{
|
||||
Name: lib.Name,
|
||||
Version: lib.Version,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if len(sms) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rms := &metricpb.ResourceMetrics{
|
||||
Resource: Resource(res),
|
||||
SchemaUrl: res.SchemaURL(),
|
||||
ScopeMetrics: sms,
|
||||
}
|
||||
|
||||
return rms, err
|
||||
}
|
||||
|
||||
// source starts a goroutine that sends each one of the Records yielded by
|
||||
// the Reader on the returned chan. Any error encountered will be sent
|
||||
// on the returned error chan after seeding is complete.
|
||||
func source(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, mr export.Reader) (<-chan export.Record, <-chan error) {
|
||||
errc := make(chan error, 1)
|
||||
out := make(chan export.Record)
|
||||
// Seed records into process.
|
||||
go func() {
|
||||
defer close(out)
|
||||
// No select is needed since errc is buffered.
|
||||
errc <- mr.ForEach(temporalitySelector, func(r export.Record) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ErrContextCanceled
|
||||
case out <- r:
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
return out, errc
|
||||
}
|
||||
|
||||
// transformer transforms records read from the passed in chan into
|
||||
// OTLP Metrics which are sent on the out chan.
|
||||
func transformer(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, in <-chan export.Record, out chan<- result) {
|
||||
for r := range in {
|
||||
m, err := Record(temporalitySelector, r)
|
||||
// Propagate errors, but do not send empty results.
|
||||
if err == nil && m == nil {
|
||||
continue
|
||||
}
|
||||
res := result{
|
||||
Metric: m,
|
||||
Err: err,
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case out <- res:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sink collects transformed Records and batches them.
|
||||
//
|
||||
// Any errors encountered transforming input will be reported with an
|
||||
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
|
||||
// caller to handle any incorrect data in these ResourceMetric.
|
||||
func sink(ctx context.Context, in <-chan result) ([]*metricpb.Metric, error) {
|
||||
var errStrings []string
|
||||
|
||||
// Group by the MetricDescriptor.
|
||||
grouped := map[string]*metricpb.Metric{}
|
||||
for res := range in {
|
||||
if res.Err != nil {
|
||||
errStrings = append(errStrings, res.Err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
mID := res.Metric.GetName()
|
||||
m, ok := grouped[mID]
|
||||
if !ok {
|
||||
grouped[mID] = res.Metric
|
||||
continue
|
||||
}
|
||||
// Note: There is extra work happening in this code that can be
|
||||
// improved when the work described in #2119 is completed. The SDK has
|
||||
// a guarantee that no more than one point per period per attribute
|
||||
// set is produced, so this fallthrough should never happen. The final
|
||||
// step of #2119 is to remove all the grouping logic here.
|
||||
switch res.Metric.Data.(type) {
|
||||
case *metricpb.Metric_Gauge:
|
||||
m.GetGauge().DataPoints = append(m.GetGauge().DataPoints, res.Metric.GetGauge().DataPoints...)
|
||||
case *metricpb.Metric_Sum:
|
||||
m.GetSum().DataPoints = append(m.GetSum().DataPoints, res.Metric.GetSum().DataPoints...)
|
||||
case *metricpb.Metric_Histogram:
|
||||
m.GetHistogram().DataPoints = append(m.GetHistogram().DataPoints, res.Metric.GetHistogram().DataPoints...)
|
||||
case *metricpb.Metric_Summary:
|
||||
m.GetSummary().DataPoints = append(m.GetSummary().DataPoints, res.Metric.GetSummary().DataPoints...)
|
||||
default:
|
||||
err := fmt.Sprintf("unsupported metric type: %T", res.Metric.Data)
|
||||
errStrings = append(errStrings, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(grouped) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ms := make([]*metricpb.Metric, 0, len(grouped))
|
||||
for _, m := range grouped {
|
||||
ms = append(ms, m)
|
||||
}
|
||||
|
||||
// Report any transform errors.
|
||||
if len(errStrings) > 0 {
|
||||
return ms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
|
||||
}
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg
|
||||
// error is returned if the Record Aggregator is not supported.
|
||||
func Record(temporalitySelector aggregation.TemporalitySelector, r export.Record) (*metricpb.Metric, error) {
|
||||
agg := r.Aggregation()
|
||||
switch agg.Kind() {
|
||||
case aggregation.HistogramKind:
|
||||
h, ok := agg.(aggregation.Histogram)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
return histogramPoint(r, temporalitySelector.TemporalityFor(r.Descriptor(), aggregation.HistogramKind), h)
|
||||
|
||||
case aggregation.SumKind:
|
||||
s, ok := agg.(aggregation.Sum)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
sum, err := s.Sum()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sumPoint(r, sum, r.StartTime(), r.EndTime(), temporalitySelector.TemporalityFor(r.Descriptor(), aggregation.SumKind), r.Descriptor().InstrumentKind().Monotonic())
|
||||
|
||||
case aggregation.LastValueKind:
|
||||
lv, ok := agg.(aggregation.LastValue)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
value, tm, err := lv.LastValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gaugePoint(r, value, time.Time{}, tm)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %T", ErrUnimplementedAgg, agg)
|
||||
}
|
||||
}
|
||||
|
||||
func gaugePoint(record export.Record, num number.Number, start, end time.Time) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
attrs := record.Attributes()
|
||||
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
}
|
||||
|
||||
switch n := desc.NumberKind(); n {
|
||||
case number.Int64Kind:
|
||||
m.Data = &metricpb.Metric_Gauge{
|
||||
Gauge: &metricpb.Gauge{
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{
|
||||
AsInt: num.CoerceToInt64(n),
|
||||
},
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case number.Float64Kind:
|
||||
m.Data = &metricpb.Metric_Gauge{
|
||||
Gauge: &metricpb.Gauge{
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsDouble{
|
||||
AsDouble: num.CoerceToFloat64(n),
|
||||
},
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func sdkTemporalityToTemporality(temporality aggregation.Temporality) metricpb.AggregationTemporality {
|
||||
switch temporality {
|
||||
case aggregation.DeltaTemporality:
|
||||
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA
|
||||
case aggregation.CumulativeTemporality:
|
||||
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
|
||||
}
|
||||
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
|
||||
}
|
||||
|
||||
func sumPoint(record export.Record, num number.Number, start, end time.Time, temporality aggregation.Temporality, monotonic bool) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
attrs := record.Attributes()
|
||||
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
}
|
||||
|
||||
switch n := desc.NumberKind(); n {
|
||||
case number.Int64Kind:
|
||||
m.Data = &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: monotonic,
|
||||
AggregationTemporality: sdkTemporalityToTemporality(temporality),
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{
|
||||
AsInt: num.CoerceToInt64(n),
|
||||
},
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case number.Float64Kind:
|
||||
m.Data = &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: monotonic,
|
||||
AggregationTemporality: sdkTemporalityToTemporality(temporality),
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsDouble{
|
||||
AsDouble: num.CoerceToFloat64(n),
|
||||
},
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []uint64, err error) {
|
||||
var buckets aggregation.Buckets
|
||||
if buckets, err = a.Histogram(); err != nil {
|
||||
return
|
||||
}
|
||||
boundaries, counts = buckets.Boundaries, buckets.Counts
|
||||
if len(counts) != len(boundaries)+1 {
|
||||
err = ErrTransforming
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// histogram transforms a Histogram Aggregator into an OTLP Metric.
|
||||
func histogramPoint(record export.Record, temporality aggregation.Temporality, a aggregation.Histogram) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
attrs := record.Attributes()
|
||||
boundaries, counts, err := histogramValues(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count, err := a.Count()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sum, err := a.Sum()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sumFloat64 := sum.CoerceToFloat64(desc.NumberKind())
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
Data: &metricpb.Metric_Histogram{
|
||||
Histogram: &metricpb.Histogram{
|
||||
AggregationTemporality: sdkTemporalityToTemporality(temporality),
|
||||
DataPoints: []*metricpb.HistogramDataPoint{
|
||||
{
|
||||
Sum: &sumFloat64,
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(record.StartTime()),
|
||||
TimeUnixNano: toNanos(record.EndTime()),
|
||||
Count: uint64(count),
|
||||
BucketCounts: counts,
|
||||
ExplicitBounds: boundaries,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return m, nil
|
||||
}
|
@ -12,11 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
@ -29,7 +28,7 @@ import (
|
||||
// DefaultEnvOptionsReader is the default environments reader.
|
||||
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||
GetEnv: os.Getenv,
|
||||
ReadFile: ioutil.ReadFile,
|
||||
ReadFile: os.ReadFile,
|
||||
Namespace: "OTEL_EXPORTER_OTLP",
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
@ -104,6 +104,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||
Timeout: DefaultTimeout,
|
||||
},
|
||||
RetryConfig: retry.DefaultConfig,
|
||||
DialOptions: []grpc.DialOption{grpc.WithUserAgent(internal.GetUserAgentHeader())},
|
||||
}
|
||||
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||
for _, opt := range opts {
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
|
||||
import "time"
|
||||
|
@ -12,19 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||
// a tls.Config that will use this certifate to verify a server certificate.
|
||||
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||
b, err := ioutil.ReadFile(path)
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
152
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform/attribute.go
generated
vendored
Normal file
152
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform/attribute.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
)
|
||||
|
||||
// AttrIter transforms an attribute iterator into OTLP key-values.
|
||||
func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
|
||||
l := iter.Len()
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*cpb.KeyValue, 0, l)
|
||||
for iter.Next() {
|
||||
out = append(out, KeyValue(iter.Attribute()))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||
func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*cpb.KeyValue, 0, len(attrs))
|
||||
for _, kv := range attrs {
|
||||
out = append(out, KeyValue(kv))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||
func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
|
||||
return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||
}
|
||||
|
||||
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||
func Value(v attribute.Value) *cpb.AnyValue {
|
||||
av := new(cpb.AnyValue)
|
||||
switch v.Type() {
|
||||
case attribute.BOOL:
|
||||
av.Value = &cpb.AnyValue_BoolValue{
|
||||
BoolValue: v.AsBool(),
|
||||
}
|
||||
case attribute.BOOLSLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: boolSliceValues(v.AsBoolSlice()),
|
||||
},
|
||||
}
|
||||
case attribute.INT64:
|
||||
av.Value = &cpb.AnyValue_IntValue{
|
||||
IntValue: v.AsInt64(),
|
||||
}
|
||||
case attribute.INT64SLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: int64SliceValues(v.AsInt64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.FLOAT64:
|
||||
av.Value = &cpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v.AsFloat64(),
|
||||
}
|
||||
case attribute.FLOAT64SLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.STRING:
|
||||
av.Value = &cpb.AnyValue_StringValue{
|
||||
StringValue: v.AsString(),
|
||||
}
|
||||
case attribute.STRINGSLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: stringSliceValues(v.AsStringSlice()),
|
||||
},
|
||||
}
|
||||
default:
|
||||
av.Value = &cpb.AnyValue_StringValue{
|
||||
StringValue: "INVALID",
|
||||
}
|
||||
}
|
||||
return av
|
||||
}
|
||||
|
||||
func boolSliceValues(vals []bool) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_BoolValue{
|
||||
BoolValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func int64SliceValues(vals []int64) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_IntValue{
|
||||
IntValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func float64SliceValues(vals []float64) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{
|
||||
StringValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
@ -12,14 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// Deprecated: will be removed soon.
|
||||
func AtomicFieldOffsets() map[string]uintptr {
|
||||
return map[string]uintptr{
|
||||
"record.refMapped.value": unsafe.Offsetof(record{}.refMapped.value),
|
||||
"record.updateCount": unsafe.Offsetof(record{}.updateCount),
|
||||
}
|
||||
}
|
||||
// Package transform provides transformation functionality from the
|
||||
// sdk/metric/metricdata data-types into OTLP data-types.
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
111
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform/error.go
generated
vendored
Normal file
111
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform/error.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnknownAggregation = errors.New("unknown aggregation")
|
||||
errUnknownTemporality = errors.New("unknown temporality")
|
||||
)
|
||||
|
||||
type errMetric struct {
|
||||
m *mpb.Metric
|
||||
err error
|
||||
}
|
||||
|
||||
func (e errMetric) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func (e errMetric) Error() string {
|
||||
format := "invalid metric (name: %q, description: %q, unit: %q): %s"
|
||||
return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
|
||||
}
|
||||
|
||||
func (e errMetric) Is(target error) bool {
|
||||
return errors.Is(e.err, target)
|
||||
}
|
||||
|
||||
// multiErr is used by the data-type transform functions to wrap multiple
|
||||
// errors into a single return value. The error message will show all errors
|
||||
// as a list and scope them by the datatype name that is returning them.
|
||||
type multiErr struct {
|
||||
datatype string
|
||||
errs []error
|
||||
}
|
||||
|
||||
// errOrNil returns nil if e contains no errors, otherwise it returns e.
|
||||
func (e *multiErr) errOrNil() error {
|
||||
if len(e.errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// append adds err to e. If err is a multiErr, its errs are flattened into e.
|
||||
func (e *multiErr) append(err error) {
|
||||
// Do not use errors.As here, this should only be flattened one layer. If
|
||||
// there is a *multiErr several steps down the chain, all the errors above
|
||||
// it will be discarded if errors.As is used instead.
|
||||
switch other := err.(type) {
|
||||
case *multiErr:
|
||||
// Flatten err errors into e.
|
||||
e.errs = append(e.errs, other.errs...)
|
||||
default:
|
||||
e.errs = append(e.errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *multiErr) Error() string {
|
||||
es := make([]string, len(e.errs))
|
||||
for i, err := range e.errs {
|
||||
es[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
format := "%d errors occurred transforming %s:\n\t%s"
|
||||
return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
|
||||
}
|
||||
|
||||
func (e *multiErr) Unwrap() error {
|
||||
switch len(e.errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return e.errs[0]
|
||||
}
|
||||
|
||||
// Return a multiErr without the leading error.
|
||||
cp := &multiErr{
|
||||
datatype: e.datatype,
|
||||
errs: make([]error, len(e.errs)-1),
|
||||
}
|
||||
copy(cp.errs, e.errs[1:])
|
||||
return cp
|
||||
}
|
||||
|
||||
func (e *multiErr) Is(target error) bool {
|
||||
if len(e.errs) == 0 {
|
||||
return false
|
||||
}
|
||||
// Check if the first error is target.
|
||||
return errors.Is(e.errs[0], target)
|
||||
}
|
205
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform/metricdata.go
generated
vendored
Normal file
205
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform/metricdata.go
generated
vendored
Normal file
@ -0,0 +1,205 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
)
|
||||
|
||||
// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
|
||||
// contains invalid ScopeMetrics, an error will be returned along with an OTLP
|
||||
// ResourceMetrics that contains partial OTLP ScopeMetrics.
|
||||
func ResourceMetrics(rm metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
|
||||
sms, err := ScopeMetrics(rm.ScopeMetrics)
|
||||
return &mpb.ResourceMetrics{
|
||||
Resource: &rpb.Resource{
|
||||
Attributes: AttrIter(rm.Resource.Iter()),
|
||||
},
|
||||
ScopeMetrics: sms,
|
||||
SchemaUrl: rm.Resource.SchemaURL(),
|
||||
}, err
|
||||
}
|
||||
|
||||
// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
|
||||
// sms contains invalid metric values, an error will be returned along with a
|
||||
// slice that contains partial OTLP ScopeMetrics.
|
||||
func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
||||
errs := &multiErr{datatype: "ScopeMetrics"}
|
||||
out := make([]*mpb.ScopeMetrics, 0, len(sms))
|
||||
for _, sm := range sms {
|
||||
ms, err := Metrics(sm.Metrics)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
|
||||
out = append(out, &mpb.ScopeMetrics{
|
||||
Scope: &cpb.InstrumentationScope{
|
||||
Name: sm.Scope.Name,
|
||||
Version: sm.Scope.Version,
|
||||
},
|
||||
Metrics: ms,
|
||||
SchemaUrl: sm.Scope.SchemaURL,
|
||||
})
|
||||
}
|
||||
return out, errs.errOrNil()
|
||||
}
|
||||
|
||||
// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
|
||||
// invalid metric values, an error will be returned along with a slice that
|
||||
// contains partial OTLP Metrics.
|
||||
func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
|
||||
errs := &multiErr{datatype: "Metrics"}
|
||||
out := make([]*mpb.Metric, 0, len(ms))
|
||||
for _, m := range ms {
|
||||
o, err := metric(m)
|
||||
if err != nil {
|
||||
// Do not include invalid data. Drop the metric, report the error.
|
||||
errs.append(errMetric{m: o, err: err})
|
||||
continue
|
||||
}
|
||||
out = append(out, o)
|
||||
}
|
||||
return out, errs.errOrNil()
|
||||
}
|
||||
|
||||
func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
||||
var err error
|
||||
out := &mpb.Metric{
|
||||
Name: m.Name,
|
||||
Description: m.Description,
|
||||
Unit: string(m.Unit),
|
||||
}
|
||||
switch a := m.Data.(type) {
|
||||
case metricdata.Gauge[int64]:
|
||||
out.Data = Gauge[int64](a)
|
||||
case metricdata.Gauge[float64]:
|
||||
out.Data = Gauge[float64](a)
|
||||
case metricdata.Sum[int64]:
|
||||
out.Data, err = Sum[int64](a)
|
||||
case metricdata.Sum[float64]:
|
||||
out.Data, err = Sum[float64](a)
|
||||
case metricdata.Histogram:
|
||||
out.Data, err = Histogram(a)
|
||||
default:
|
||||
return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// Gauge returns an OTLP Metric_Gauge generated from g.
|
||||
func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
|
||||
return &mpb.Metric_Gauge{
|
||||
Gauge: &mpb.Gauge{
|
||||
DataPoints: DataPoints(g.DataPoints),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Sum returns an OTLP Metric_Sum generated from s. An error is returned with
|
||||
// a partial Metric_Sum if the temporality of s is unknown.
|
||||
func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
|
||||
t, err := Temporality(s.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_Sum{
|
||||
Sum: &mpb.Sum{
|
||||
AggregationTemporality: t,
|
||||
IsMonotonic: s.IsMonotonic,
|
||||
DataPoints: DataPoints(s.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
|
||||
func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
|
||||
out := make([]*mpb.NumberDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
ndp := &mpb.NumberDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: uint64(dPt.StartTime.UnixNano()),
|
||||
TimeUnixNano: uint64(dPt.Time.UnixNano()),
|
||||
}
|
||||
switch v := any(dPt.Value).(type) {
|
||||
case int64:
|
||||
ndp.Value = &mpb.NumberDataPoint_AsInt{
|
||||
AsInt: v,
|
||||
}
|
||||
case float64:
|
||||
ndp.Value = &mpb.NumberDataPoint_AsDouble{
|
||||
AsDouble: v,
|
||||
}
|
||||
}
|
||||
out = append(out, ndp)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Histogram returns an OTLP Metric_Histogram generated from h. An error is
|
||||
// returned with a partial Metric_Histogram if the temporality of h is
|
||||
// unknown.
|
||||
func Histogram(h metricdata.Histogram) (*mpb.Metric_Histogram, error) {
|
||||
t, err := Temporality(h.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_Histogram{
|
||||
Histogram: &mpb.Histogram{
|
||||
AggregationTemporality: t,
|
||||
DataPoints: HistogramDataPoints(h.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
|
||||
// from dPts.
|
||||
func HistogramDataPoints(dPts []metricdata.HistogramDataPoint) []*mpb.HistogramDataPoint {
|
||||
out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
sum := dPt.Sum
|
||||
out = append(out, &mpb.HistogramDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: uint64(dPt.StartTime.UnixNano()),
|
||||
TimeUnixNano: uint64(dPt.Time.UnixNano()),
|
||||
Count: dPt.Count,
|
||||
Sum: &sum,
|
||||
BucketCounts: dPt.BucketCounts,
|
||||
ExplicitBounds: dPt.Bounds,
|
||||
Min: dPt.Min,
|
||||
Max: dPt.Max,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Temporality returns an OTLP AggregationTemporality generated from t. If t
|
||||
// is unknown, an error is returned along with the invalid
|
||||
// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
|
||||
func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
||||
switch t {
|
||||
case metricdata.DeltaTemporality:
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
|
||||
case metricdata.CumulativeTemporality:
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
|
||||
default:
|
||||
err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
|
||||
}
|
||||
}
|
43
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/options.go
generated
vendored
43
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/options.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
|
||||
import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
|
||||
// Option are setting options passed to an Exporter on creation.
|
||||
type Option interface {
|
||||
apply(config) config
|
||||
}
|
||||
|
||||
type exporterOptionFunc func(config) config
|
||||
|
||||
func (fn exporterOptionFunc) apply(cfg config) config {
|
||||
return fn(cfg)
|
||||
}
|
||||
|
||||
type config struct {
|
||||
temporalitySelector aggregation.TemporalitySelector
|
||||
}
|
||||
|
||||
// WithMetricAggregationTemporalitySelector defines the aggregation.TemporalitySelector used
|
||||
// for selecting aggregation.Temporality (i.e., Cumulative vs. Delta
|
||||
// aggregation). If not specified otherwise, exporter will use a
|
||||
// cumulative temporality selector.
|
||||
func WithMetricAggregationTemporalitySelector(selector aggregation.TemporalitySelector) Option {
|
||||
return exporterOptionFunc(func(cfg config) config {
|
||||
cfg.temporalitySelector = selector
|
||||
return cfg
|
||||
})
|
||||
}
|
159
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
159
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
@ -16,8 +16,6 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
@ -28,54 +26,49 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
|
||||
// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
|
||||
// endpoint using gRPC.
|
||||
//
|
||||
// If an already established gRPC ClientConn is not passed in options using
|
||||
// WithGRPCConn, a connection to the OTLP endpoint will be established based
|
||||
// on options. If a connection cannot be establishes in the lifetime of ctx,
|
||||
// an error will be returned.
|
||||
func New(ctx context.Context, options ...Option) (metric.Exporter, error) {
|
||||
c, err := newClient(ctx, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return otlpmetric.New(c), nil
|
||||
}
|
||||
|
||||
type client struct {
|
||||
endpoint string
|
||||
dialOpts []grpc.DialOption
|
||||
metadata metadata.MD
|
||||
exportTimeout time.Duration
|
||||
requestFunc retry.RequestFunc
|
||||
|
||||
// stopCtx is used as a parent context for all exports. Therefore, when it
|
||||
// is canceled with the stopFunc all exports are canceled.
|
||||
stopCtx context.Context
|
||||
// stopFunc cancels stopCtx, stopping any active exports.
|
||||
stopFunc context.CancelFunc
|
||||
|
||||
// ourConn keeps track of where conn was created: true if created here on
|
||||
// Start, or false if passed with an option. This is important on Shutdown
|
||||
// as the conn should only be closed if created here on start. Otherwise,
|
||||
// ourConn keeps track of where conn was created: true if created here in
|
||||
// NewClient, or false if passed with an option. This is important on
|
||||
// Shutdown as the conn should only be closed if we created it. Otherwise,
|
||||
// it is up to the processes that passed the conn to close it.
|
||||
ourConn bool
|
||||
conn *grpc.ClientConn
|
||||
mscMu sync.RWMutex
|
||||
msc colmetricpb.MetricsServiceClient
|
||||
}
|
||||
|
||||
// Compile time check *client implements otlpmetric.Client.
|
||||
var _ otlpmetric.Client = (*client)(nil)
|
||||
|
||||
// NewClient creates a new gRPC metric client.
|
||||
func NewClient(opts ...Option) otlpmetric.Client {
|
||||
return newClient(opts...)
|
||||
}
|
||||
|
||||
func newClient(opts ...Option) *client {
|
||||
cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// newClient creates a new gRPC metric client.
|
||||
func newClient(ctx context.Context, options ...Option) (otlpmetric.Client, error) {
|
||||
cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...)
|
||||
|
||||
c := &client{
|
||||
endpoint: cfg.Metrics.Endpoint,
|
||||
exportTimeout: cfg.Metrics.Timeout,
|
||||
requestFunc: cfg.RetryConfig.RequestFunc(retryable),
|
||||
dialOpts: cfg.DialOptions,
|
||||
stopCtx: ctx,
|
||||
stopFunc: cancel,
|
||||
conn: cfg.GRPCConn,
|
||||
}
|
||||
|
||||
@ -83,17 +76,12 @@ func newClient(opts ...Option) *client {
|
||||
c.metadata = metadata.New(cfg.Metrics.Headers)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Start establishes a gRPC connection to the collector.
|
||||
func (c *client) Start(ctx context.Context) error {
|
||||
if c.conn == nil {
|
||||
// If the caller did not provide a ClientConn when the client was
|
||||
// created, create one using the configuration they did provide.
|
||||
conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
|
||||
conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, cfg.DialOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// Keep track that we own the lifecycle of this conn and need to close
|
||||
// it on Shutdown.
|
||||
@ -101,69 +89,30 @@ func (c *client) Start(ctx context.Context) error {
|
||||
c.conn = conn
|
||||
}
|
||||
|
||||
// The otlpmetric.Client interface states this method is called just once,
|
||||
// so no need to check if already started.
|
||||
c.mscMu.Lock()
|
||||
c.msc = colmetricpb.NewMetricsServiceClient(c.conn)
|
||||
c.mscMu.Unlock()
|
||||
|
||||
return nil
|
||||
return c, nil
|
||||
}
|
||||
|
||||
var errAlreadyStopped = errors.New("the client is already stopped")
|
||||
// ForceFlush does nothing, the client holds no state.
|
||||
func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() }
|
||||
|
||||
// Stop shuts down the client.
|
||||
// Shutdown shuts down the client, freeing all resource.
|
||||
//
|
||||
// Any active connections to a remote endpoint are closed if they were created
|
||||
// by the client. Any gRPC connection passed during creation using
|
||||
// WithGRPCConn will not be closed. It is the caller's responsibility to
|
||||
// handle cleanup of that resource.
|
||||
//
|
||||
// This method synchronizes with the UploadMetrics method of the client. It
|
||||
// will wait for any active calls to that method to complete unimpeded, or it
|
||||
// will cancel any active calls if ctx expires. If ctx expires, the context
|
||||
// error will be forwarded as the returned error. All client held resources
|
||||
// will still be released in this situation.
|
||||
//
|
||||
// If the client has already stopped, an error will be returned describing
|
||||
// this.
|
||||
func (c *client) Stop(ctx context.Context) error {
|
||||
// Acquire the c.mscMu lock within the ctx lifetime.
|
||||
acquired := make(chan struct{})
|
||||
go func() {
|
||||
c.mscMu.Lock()
|
||||
close(acquired)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// The Stop timeout is reached. Kill any remaining exports to force
|
||||
// the clear of the lock and save the timeout error to return and
|
||||
// signal the shutdown timed out before cleanly stopping.
|
||||
c.stopFunc()
|
||||
err = ctx.Err()
|
||||
func (c *client) Shutdown(ctx context.Context) error {
|
||||
// The otlpmetric.Exporter synchronizes access to client methods and
|
||||
// ensures this is called only once. The only thing that needs to be done
|
||||
// here is to release any computational resources the client holds.
|
||||
|
||||
// To ensure the client is not left in a dirty state c.msc needs to be
|
||||
// set to nil. To avoid the race condition when doing this, ensure
|
||||
// that all the exports are killed (initiated by c.stopFunc).
|
||||
<-acquired
|
||||
case <-acquired:
|
||||
}
|
||||
// Hold the mscMu lock for the rest of the function to ensure no new
|
||||
// exports are started.
|
||||
defer c.mscMu.Unlock()
|
||||
|
||||
// The otlpmetric.Client interface states this method is called only
|
||||
// once, but there is no guarantee it is called after Start. Ensure the
|
||||
// client is started before doing anything and let the called know if they
|
||||
// made a mistake.
|
||||
if c.msc == nil {
|
||||
return errAlreadyStopped
|
||||
}
|
||||
|
||||
// Clear c.msc to signal the client is stopped.
|
||||
c.metadata = nil
|
||||
c.requestFunc = nil
|
||||
c.msc = nil
|
||||
|
||||
err := ctx.Err()
|
||||
if c.ourConn {
|
||||
closeErr := c.conn.Close()
|
||||
// A context timeout error takes precedence over this error.
|
||||
@ -171,25 +120,24 @@ func (c *client) Stop(ctx context.Context) error {
|
||||
err = closeErr
|
||||
}
|
||||
}
|
||||
c.conn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
var errShutdown = errors.New("the client is shutdown")
|
||||
|
||||
// UploadMetrics sends a batch of spans.
|
||||
// UploadMetrics sends protoMetrics to connected endpoint.
|
||||
//
|
||||
// Retryable errors from the server will be handled according to any
|
||||
// RetryConfig the client was created with.
|
||||
func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
|
||||
// Hold a read lock to ensure a shut down initiated after this starts does
|
||||
// not abandon the export. This read lock acquire has less priority than a
|
||||
// write lock acquire (i.e. Stop), meaning if the client is shutting down
|
||||
// this will come after the shut down.
|
||||
c.mscMu.RLock()
|
||||
defer c.mscMu.RUnlock()
|
||||
// The otlpmetric.Exporter synchronizes access to client methods, and
|
||||
// ensures this is not called after the Exporter is shutdown. Only thing
|
||||
// to do here is send data.
|
||||
|
||||
if c.msc == nil {
|
||||
return errShutdown
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Do not upload if the context is already expired.
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
ctx, cancel := c.exportContext(ctx)
|
||||
@ -209,7 +157,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
|
||||
}
|
||||
|
||||
// exportContext returns a copy of parent with an appropriate deadline and
|
||||
// cancellation function.
|
||||
// cancellation function based on the clients configured export timeout.
|
||||
//
|
||||
// It is the callers responsibility to cancel the returned context once its
|
||||
// use is complete, via the parent or directly with the returned CancelFunc, to
|
||||
@ -230,23 +178,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
|
||||
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
|
||||
}
|
||||
|
||||
// Unify the client stopCtx with the parent.
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-c.stopCtx.Done():
|
||||
// Cancel the export as the shutdown has timed out.
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
// retryable returns if err identifies a request that can be retried and a
|
||||
// duration to wait for if an explicit throttle time is included in err.
|
||||
func retryable(err error) (bool, time.Duration) {
|
||||
//func retryable(err error) (bool, time.Duration) {
|
||||
s := status.Convert(err)
|
||||
switch s.Code() {
|
||||
case codes.Canceled,
|
||||
|
238
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
Normal file
238
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
)
|
||||
|
||||
// Option applies a configuration option to the Exporter.
|
||||
type Option interface {
|
||||
applyGRPCOption(oconf.Config) oconf.Config
|
||||
}
|
||||
|
||||
func asGRPCOptions(opts []Option) []oconf.GRPCOption {
|
||||
converted := make([]oconf.GRPCOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = oconf.NewGRPCOption(o.applyGRPCOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
// RetryConfig defines configuration for retrying the export of metric data
|
||||
// that failed.
|
||||
//
|
||||
// This configuration does not define any network retry strategy. That is
|
||||
// entirely handled by the gRPC ClientConn.
|
||||
type RetryConfig retry.Config
|
||||
|
||||
type wrappedOption struct {
|
||||
oconf.GRPCOption
|
||||
}
|
||||
|
||||
func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config {
|
||||
return w.ApplyGRPCOption(cfg)
|
||||
}
|
||||
|
||||
// WithInsecure disables client transport security for the Exporter's gRPC
|
||||
// connection, just like grpc.WithInsecure()
|
||||
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used to determine client security. If the endpoint has a
|
||||
// scheme of "http" or "unix" client security will be disabled. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, client security will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithInsecure() Option {
|
||||
return wrappedOption{oconf.WithInsecure()}
|
||||
}
|
||||
|
||||
// WithEndpoint sets the target endpoint the Exporter will connect to.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, "localhost:4317" will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithEndpoint(endpoint string) Option {
|
||||
return wrappedOption{oconf.WithEndpoint(endpoint)}
|
||||
}
|
||||
|
||||
// WithReconnectionPeriod set the minimum amount of time between connection
|
||||
// attempts to the target endpoint.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithReconnectionPeriod(rp time.Duration) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.ReconnectionPeriod = rp
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
func compressorToCompression(compressor string) oconf.Compression {
|
||||
if compressor == "gzip" {
|
||||
return oconf.GzipCompression
|
||||
}
|
||||
|
||||
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
|
||||
return oconf.NoCompression
|
||||
}
|
||||
|
||||
// WithCompressor sets the compressor the gRPC client uses.
|
||||
//
|
||||
// It is the responsibility of the caller to ensure that the compressor set
|
||||
// has been registered with google.golang.org/grpc/encoding (see
|
||||
// encoding.RegisterCompressor for more information). For example, to register
|
||||
// the gzip compressor import the package:
|
||||
//
|
||||
// import _ "google.golang.org/grpc/encoding/gzip"
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_COMPRESSION or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. That value can
|
||||
// be either "none" or "gzip". If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no compressor will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithCompressor(compressor string) Option {
|
||||
return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))}
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers with each gRPC requests.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as a list of key value pairs.
|
||||
// These pairs are expected to be in the W3C Correlation-Context format
|
||||
// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
|
||||
// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no user headers will be set.
|
||||
func WithHeaders(headers map[string]string) Option {
|
||||
return wrappedOption{oconf.WithHeaders(headers)}
|
||||
}
|
||||
|
||||
// WithTLSCredentials sets the gRPC connection to use creds.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. The value will
|
||||
// be parsed the filepath of the TLS certificate chain to use. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no TLS credentials will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.Metrics.GRPCCredentials = creds
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithServiceConfig defines the default gRPC service config used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithServiceConfig(serviceConfig string) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.ServiceConfig = serviceConfig
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithDialOption sets explicit grpc.DialOptions to use when establishing a
|
||||
// gRPC connection. The options here are appended to the internal grpc.DialOptions
|
||||
// used so they will take precedence over any other internal grpc.DialOptions
|
||||
// they might conflict with.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithDialOption(opts ...grpc.DialOption) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.DialOptions = opts
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
|
||||
//
|
||||
// This option takes precedence over any other option that relates to
|
||||
// establishing or persisting a gRPC connection to a target endpoint. Any
|
||||
// other option of those types passed will be ignored.
|
||||
//
|
||||
// It is the callers responsibility to close the passed conn. The Exporter
|
||||
// Shutdown method will not close this connection.
|
||||
func WithGRPCConn(conn *grpc.ClientConn) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.GRPCConn = conn
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithTimeout sets the max amount of time an Exporter will attempt an export.
|
||||
//
|
||||
// This takes precedence over any retry settings defined by WithRetry. Once
|
||||
// this time limit has been reached the export is abandoned and the metric
|
||||
// data is dropped.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as an integer representing the
|
||||
// timeout in milliseconds. If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, a timeout of 10 seconds will be used.
|
||||
func WithTimeout(duration time.Duration) Option {
|
||||
return wrappedOption{oconf.WithTimeout(duration)}
|
||||
}
|
||||
|
||||
// WithRetry sets the retry policy for transient retryable errors that are
|
||||
// returned by the target endpoint.
|
||||
//
|
||||
// If the target endpoint responds with not only a retryable error, but
|
||||
// explicitly returns a backoff time in the response, that time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
// after each error for no more than a total time of 1 minute.
|
||||
func WithRetry(settings RetryConfig) Option {
|
||||
return wrappedOption{oconf.WithRetry(retry.Config(settings))}
|
||||
}
|
@ -12,20 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package otlpmetricgrpc provides an otlpmetric.Exporter that communicates
|
||||
// with an OTLP receiving endpoint using gRPC.
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
)
|
||||
|
||||
// New constructs a new Exporter and starts it.
|
||||
func New(ctx context.Context, opts ...Option) (*otlpmetric.Exporter, error) {
|
||||
return otlpmetric.New(ctx, NewClient(opts...))
|
||||
}
|
||||
|
||||
// NewUnstarted constructs a new Exporter and does not start it.
|
||||
func NewUnstarted(opts ...Option) *otlpmetric.Exporter {
|
||||
return otlpmetric.NewUnstarted(NewClient(opts...))
|
||||
}
|
189
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/options.go
generated
vendored
189
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/options.go
generated
vendored
@ -1,189 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
)
|
||||
|
||||
// Option applies an option to the gRPC driver.
|
||||
type Option interface {
|
||||
applyGRPCOption(otlpconfig.Config) otlpconfig.Config
|
||||
}
|
||||
|
||||
func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
|
||||
converted := make([]otlpconfig.GRPCOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
// RetryConfig defines configuration for retrying export of span batches that
|
||||
// failed to be received by the target endpoint.
|
||||
//
|
||||
// This configuration does not define any network retry strategy. That is
|
||||
// entirely handled by the gRPC ClientConn.
|
||||
type RetryConfig retry.Config
|
||||
|
||||
type wrappedOption struct {
|
||||
otlpconfig.GRPCOption
|
||||
}
|
||||
|
||||
func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
return w.ApplyGRPCOption(cfg)
|
||||
}
|
||||
|
||||
// WithInsecure disables client transport security for the exporter's gRPC
|
||||
// connection just like grpc.WithInsecure()
|
||||
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
|
||||
// default, client security is required unless WithInsecure is used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithInsecure() Option {
|
||||
return wrappedOption{otlpconfig.WithInsecure()}
|
||||
}
|
||||
|
||||
// WithEndpoint sets the target endpoint the exporter will connect to. If
|
||||
// unset, localhost:4317 will be used as a default.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithEndpoint(endpoint string) Option {
|
||||
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
|
||||
}
|
||||
|
||||
// WithReconnectionPeriod set the minimum amount of time between connection
|
||||
// attempts to the target endpoint.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithReconnectionPeriod(rp time.Duration) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.ReconnectionPeriod = rp
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
func compressorToCompression(compressor string) otlpconfig.Compression {
|
||||
if compressor == "gzip" {
|
||||
return otlpconfig.GzipCompression
|
||||
}
|
||||
|
||||
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
|
||||
return otlpconfig.NoCompression
|
||||
}
|
||||
|
||||
// WithCompressor sets the compressor for the gRPC client to use when sending
|
||||
// requests. It is the responsibility of the caller to ensure that the
|
||||
// compressor set has been registered with google.golang.org/grpc/encoding.
|
||||
// This can be done by encoding.RegisterCompressor. Some compressors
|
||||
// auto-register on import, such as gzip, which can be registered by calling
|
||||
// `import _ "google.golang.org/grpc/encoding/gzip"`.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithCompressor(compressor string) Option {
|
||||
return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers with each gRPC requests.
|
||||
func WithHeaders(headers map[string]string) Option {
|
||||
return wrappedOption{otlpconfig.WithHeaders(headers)}
|
||||
}
|
||||
|
||||
// WithTLSCredentials allows the connection to use TLS credentials when
|
||||
// talking to the server. It takes in grpc.TransportCredentials instead of say
|
||||
// a Certificate file or a tls.Certificate, because the retrieving of these
|
||||
// credentials can be done in many ways e.g. plain file, in code tls.Config or
|
||||
// by certificate rotation, so it is up to the caller to decide what to use.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.Metrics.GRPCCredentials = creds
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithServiceConfig defines the default gRPC service config used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithServiceConfig(serviceConfig string) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.ServiceConfig = serviceConfig
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithDialOption sets explicit grpc.DialOptions to use when making a
|
||||
// connection. The options here are appended to the internal grpc.DialOptions
|
||||
// used so they will take precedence over any other internal grpc.DialOptions
|
||||
// they might conflict with.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithDialOption(opts ...grpc.DialOption) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.DialOptions = opts
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
|
||||
//
|
||||
// This option takes precedence over any other option that relates to
|
||||
// establishing or persisting a gRPC connection to a target endpoint. Any
|
||||
// other option of those types passed will be ignored.
|
||||
//
|
||||
// It is the callers responsibility to close the passed conn. The client
|
||||
// Shutdown method will not close this connection.
|
||||
func WithGRPCConn(conn *grpc.ClientConn) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.GRPCConn = conn
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithTimeout sets the max amount of time a client will attempt to export a
|
||||
// batch of spans. This takes precedence over any retry settings defined with
|
||||
// WithRetry, once this time limit has been reached the export is abandoned
|
||||
// and the batch of spans is dropped.
|
||||
//
|
||||
// If unset, the default timeout will be set to 10 seconds.
|
||||
func WithTimeout(duration time.Duration) Option {
|
||||
return wrappedOption{otlpconfig.WithTimeout(duration)}
|
||||
}
|
||||
|
||||
// WithRetry sets the retry policy for transient retryable errors that may be
|
||||
// returned by the target endpoint when exporting a batch of spans.
|
||||
//
|
||||
// If the target endpoint responds with not only a retryable error, but
|
||||
// explicitly returns a backoff time in the response. That time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
// after each error for no more than a total time of 1 minute.
|
||||
func WithRetry(settings RetryConfig) Option {
|
||||
return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
|
||||
}
|
@ -16,7 +16,6 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
@ -29,7 +28,7 @@ import (
|
||||
// DefaultEnvOptionsReader is the default environments reader.
|
||||
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||
GetEnv: os.Getenv,
|
||||
ReadFile: ioutil.ReadFile,
|
||||
ReadFile: os.ReadFile,
|
||||
Namespace: "OTEL_EXPORTER_OTLP",
|
||||
}
|
||||
|
||||
|
@ -97,6 +97,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||
Timeout: DefaultTimeout,
|
||||
},
|
||||
RetryConfig: retry.DefaultConfig,
|
||||
DialOptions: []grpc.DialOption{grpc.WithUserAgent(internal.GetUserAgentHeader())},
|
||||
}
|
||||
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||
for _, opt := range opts {
|
||||
|
11
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
11
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
@ -26,6 +26,8 @@ import (
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
|
||||
@ -196,9 +198,16 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
|
||||
defer cancel()
|
||||
|
||||
return c.requestFunc(ctx, func(iCtx context.Context) error {
|
||||
_, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
|
||||
resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
|
||||
ResourceSpans: protoSpans,
|
||||
})
|
||||
if resp != nil && resp.PartialSuccess != nil {
|
||||
otel.Handle(internal.PartialSuccessToError(
|
||||
internal.TracingPartialSuccess,
|
||||
resp.PartialSuccess.RejectedSpans,
|
||||
resp.PartialSuccess.ErrorMessage,
|
||||
))
|
||||
}
|
||||
// nil is converted to OK.
|
||||
if status.Code(err) == codes.OK {
|
||||
// Success.
|
||||
|
45
vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
generated
vendored
Normal file
45
vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package attribute provide several helper functions for some commonly used
|
||||
logic of processing attributes.
|
||||
*/
|
||||
package attribute // import "go.opentelemetry.io/otel/internal/attribute"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// SliceValue convert a slice into an array with same elements as slice.
|
||||
func SliceValue[T bool | int64 | float64 | string](v []T) any {
|
||||
var zero T
|
||||
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
|
||||
copy(cp.Elem().Slice(0, len(v)).Interface().([]T), v)
|
||||
return cp.Elem().Interface()
|
||||
}
|
||||
|
||||
// AsSlice convert an array into a slice into with same elements as array.
|
||||
func AsSlice[T bool | int64 | float64 | string](v any) []T {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Type().Kind() != reflect.Array {
|
||||
return nil
|
||||
}
|
||||
var zero T
|
||||
correctLen := rv.Len()
|
||||
correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
|
||||
cpy := reflect.New(correctType)
|
||||
_ = reflect.Copy(cpy.Elem(), rv)
|
||||
return cpy.Elem().Slice(0, correctLen).Interface().([]T)
|
||||
}
|
8
vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
generated
vendored
@ -35,7 +35,8 @@ type InstrumentProvider interface {
|
||||
|
||||
// Counter is an instrument that records increasing values.
|
||||
type Counter interface {
|
||||
// Observe records the state of the instrument.
|
||||
// Observe records the state of the instrument to be x. The value of x is
|
||||
// assumed to be the exact Counter value to record.
|
||||
//
|
||||
// It is only valid to call this within a callback. If called outside of the
|
||||
// registered callback it should have no effect on the instrument, and an
|
||||
@ -47,7 +48,8 @@ type Counter interface {
|
||||
|
||||
// UpDownCounter is an instrument that records increasing or decreasing values.
|
||||
type UpDownCounter interface {
|
||||
// Observe records the state of the instrument.
|
||||
// Observe records the state of the instrument to be x. The value of x is
|
||||
// assumed to be the exact UpDownCounter value to record.
|
||||
//
|
||||
// It is only valid to call this within a callback. If called outside of the
|
||||
// registered callback it should have no effect on the instrument, and an
|
||||
@ -59,7 +61,7 @@ type UpDownCounter interface {
|
||||
|
||||
// Gauge is an instrument that records independent readings.
|
||||
type Gauge interface {
|
||||
// Observe records the state of the instrument.
|
||||
// Observe records the state of the instrument to be x.
|
||||
//
|
||||
// It is only valid to call this within a callback. If called outside of the
|
||||
// registered callback it should have no effect on the instrument, and an
|
||||
|
8
vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go
generated
vendored
@ -35,7 +35,8 @@ type InstrumentProvider interface {
|
||||
|
||||
// Counter is an instrument that records increasing values.
|
||||
type Counter interface {
|
||||
// Observe records the state of the instrument.
|
||||
// Observe records the state of the instrument to be x. The value of x is
|
||||
// assumed to be the exact Counter value to record.
|
||||
//
|
||||
// It is only valid to call this within a callback. If called outside of the
|
||||
// registered callback it should have no effect on the instrument, and an
|
||||
@ -47,7 +48,8 @@ type Counter interface {
|
||||
|
||||
// UpDownCounter is an instrument that records increasing or decreasing values.
|
||||
type UpDownCounter interface {
|
||||
// Observe records the state of the instrument.
|
||||
// Observe records the state of the instrument to be x. The value of x is
|
||||
// assumed to be the exact UpDownCounter value to record.
|
||||
//
|
||||
// It is only valid to call this within a callback. If called outside of the
|
||||
// registered callback it should have no effect on the instrument, and an
|
||||
@ -59,7 +61,7 @@ type UpDownCounter interface {
|
||||
|
||||
// Gauge is an instrument that records independent readings.
|
||||
type Gauge interface {
|
||||
// Observe records the state of the instrument.
|
||||
// Observe records the state of the instrument to be x.
|
||||
//
|
||||
// It is only valid to call this within a callback. If called outside of the
|
||||
// registered callback it should have no effect on the instrument, and an
|
||||
|
2
vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
generated
vendored
@ -214,7 +214,7 @@ func (i *aiGauge) unwrap() instrument.Asynchronous {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Sync Instruments.
|
||||
// Sync Instruments.
|
||||
type sfCounter struct {
|
||||
name string
|
||||
opts []instrument.Option
|
||||
|
24
vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
generated
vendored
Normal file
24
vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package instrumentation provides types to represent the code libraries that
|
||||
// provide OpenTelemetry instrumentation. These types are used in the
|
||||
// OpenTelemetry signal pipelines to identify the source of telemetry.
|
||||
//
|
||||
// See
|
||||
// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
|
||||
// and
|
||||
// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
|
||||
// for more information.
|
||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
7
vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
generated
vendored
@ -12,13 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package instrumentation provides an instrumentation library structure to be
|
||||
passed to both the OpenTelemetry Tracer and Meter components.
|
||||
|
||||
For more information see
|
||||
[this](https://github.com/open-telemetry/oteps/blob/main/text/0083-component.md).
|
||||
*/
|
||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
|
||||
// Library represents the instrumentation library.
|
||||
|
7
vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
generated
vendored
@ -12,13 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package instrumentation provides an instrumentation scope structure to be
|
||||
passed to both the OpenTelemetry Tracer and Meter components.
|
||||
|
||||
For more information see
|
||||
[this](https://github.com/open-telemetry/oteps/blob/main/text/0083-component.md).
|
||||
*/
|
||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
|
||||
// Scope represents the instrumentation scope.
|
||||
|
161
vendor/go.opentelemetry.io/otel/sdk/metric/aggregation/aggregation.go
generated
vendored
Normal file
161
vendor/go.opentelemetry.io/otel/sdk/metric/aggregation/aggregation.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package aggregation contains configuration types that define the
|
||||
// aggregation operation used to summarizes recorded measurements.
|
||||
package aggregation // import "go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// errAgg is wrapped by misconfigured aggregations.
|
||||
var errAgg = errors.New("aggregation")
|
||||
|
||||
// Aggregation is the aggregation used to summarize recorded measurements.
|
||||
type Aggregation interface {
|
||||
// private attempts to ensure no user-defined Aggregation are allowed. The
|
||||
// OTel specification does not allow user-defined Aggregation currently.
|
||||
private()
|
||||
|
||||
// Copy returns a deep copy of the Aggregation.
|
||||
Copy() Aggregation
|
||||
|
||||
// Err returns an error for any misconfigured Aggregation.
|
||||
Err() error
|
||||
}
|
||||
|
||||
// Drop is an aggregation that drops all recorded data.
|
||||
type Drop struct{} // Drop has no parameters.
|
||||
|
||||
var _ Aggregation = Drop{}
|
||||
|
||||
func (Drop) private() {}
|
||||
|
||||
// Copy returns a deep copy of d.
|
||||
func (d Drop) Copy() Aggregation { return d }
|
||||
|
||||
// Err returns an error for any misconfiguration. A Drop aggregation has no
|
||||
// parameters and cannot be misconfigured, therefore this always returns nil.
|
||||
func (Drop) Err() error { return nil }
|
||||
|
||||
// Default is an aggregation that uses the default instrument kind selection
|
||||
// mapping to select another aggregation. A metric reader can be configured to
|
||||
// make an aggregation selection based on instrument kind that differs from
|
||||
// the default. This aggregation ensures the default is used.
|
||||
//
|
||||
// See the "go.opentelemetry.io/otel/sdk/metric".DefaultAggregationSelector
|
||||
// for information about the default instrument kind selection mapping.
|
||||
type Default struct{} // Default has no parameters.
|
||||
|
||||
var _ Aggregation = Default{}
|
||||
|
||||
func (Default) private() {}
|
||||
|
||||
// Copy returns a deep copy of d.
|
||||
func (d Default) Copy() Aggregation { return d }
|
||||
|
||||
// Err returns an error for any misconfiguration. A Default aggregation has no
|
||||
// parameters and cannot be misconfigured, therefore this always returns nil.
|
||||
func (Default) Err() error { return nil }
|
||||
|
||||
// Sum is an aggregation that summarizes a set of measurements as their
|
||||
// arithmetic sum.
|
||||
type Sum struct{} // Sum has no parameters.
|
||||
|
||||
var _ Aggregation = Sum{}
|
||||
|
||||
func (Sum) private() {}
|
||||
|
||||
// Copy returns a deep copy of s.
|
||||
func (s Sum) Copy() Aggregation { return s }
|
||||
|
||||
// Err returns an error for any misconfiguration. A Sum aggregation has no
|
||||
// parameters and cannot be misconfigured, therefore this always returns nil.
|
||||
func (Sum) Err() error { return nil }
|
||||
|
||||
// LastValue is an aggregation that summarizes a set of measurements as the
|
||||
// last one made.
|
||||
type LastValue struct{} // LastValue has no parameters.
|
||||
|
||||
var _ Aggregation = LastValue{}
|
||||
|
||||
func (LastValue) private() {}
|
||||
|
||||
// Copy returns a deep copy of l.
|
||||
func (l LastValue) Copy() Aggregation { return l }
|
||||
|
||||
// Err returns an error for any misconfiguration. A LastValue aggregation has
|
||||
// no parameters and cannot be misconfigured, therefore this always returns
|
||||
// nil.
|
||||
func (LastValue) Err() error { return nil }
|
||||
|
||||
// ExplicitBucketHistogram is an aggregation that summarizes a set of
|
||||
// measurements as an histogram with explicitly defined buckets.
|
||||
type ExplicitBucketHistogram struct {
|
||||
// Boundaries are the increasing bucket boundary values. Boundary values
|
||||
// define bucket upper bounds. Buckets are exclusive of their lower
|
||||
// boundary and inclusive of their upper bound (except at positive
|
||||
// infinity). A measurement is defined to fall into the greatest-numbered
|
||||
// bucket with a boundary that is greater than or equal to the
|
||||
// measurement. As an example, boundaries defined as:
|
||||
//
|
||||
// []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}
|
||||
//
|
||||
// Will define these buckets:
|
||||
//
|
||||
// (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0],
|
||||
// (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0],
|
||||
// (500.0, 1000.0], (1000.0, +∞)
|
||||
Boundaries []float64
|
||||
// NoMinMax indicates whether to not record the min and max of the
|
||||
// distribution. By default, these extremes are recorded.
|
||||
NoMinMax bool
|
||||
}
|
||||
|
||||
var _ Aggregation = ExplicitBucketHistogram{}
|
||||
|
||||
func (ExplicitBucketHistogram) private() {}
|
||||
|
||||
// errHist is returned by misconfigured ExplicitBucketHistograms.
|
||||
var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg)
|
||||
|
||||
// Err returns an error for any misconfiguration.
|
||||
func (h ExplicitBucketHistogram) Err() error {
|
||||
if len(h.Boundaries) <= 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check boundaries are monotonic.
|
||||
i := h.Boundaries[0]
|
||||
for _, j := range h.Boundaries[1:] {
|
||||
if i >= j {
|
||||
return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries)
|
||||
}
|
||||
i = j
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of h.
|
||||
func (h ExplicitBucketHistogram) Copy() Aggregation {
|
||||
b := make([]float64, len(h.Boundaries))
|
||||
copy(b, h.Boundaries)
|
||||
return ExplicitBucketHistogram{
|
||||
Boundaries: b,
|
||||
NoMinMax: h.NoMinMax,
|
||||
}
|
||||
}
|
114
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/aggregator.go
generated
vendored
114
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/aggregator.go
generated
vendored
@ -1,114 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
// Aggregator implements a specific aggregation behavior, e.g., a
|
||||
// behavior to track a sequence of updates to an instrument. Counter
|
||||
// instruments commonly use a simple Sum aggregator, but for the
|
||||
// distribution instruments (Histogram, GaugeObserver) there are a
|
||||
// number of possible aggregators with different cost and accuracy
|
||||
// tradeoffs.
|
||||
//
|
||||
// Note that any Aggregator may be attached to any instrument--this is
|
||||
// the result of the OpenTelemetry API/SDK separation. It is possible
|
||||
// to attach a Sum aggregator to a Histogram instrument.
|
||||
type Aggregator interface {
|
||||
// Aggregation returns an Aggregation interface to access the
|
||||
// current state of this Aggregator. The caller is
|
||||
// responsible for synchronization and must not call any the
|
||||
// other methods in this interface concurrently while using
|
||||
// the Aggregation.
|
||||
Aggregation() aggregation.Aggregation
|
||||
|
||||
// Update receives a new measured value and incorporates it
|
||||
// into the aggregation. Update() calls may be called
|
||||
// concurrently.
|
||||
//
|
||||
// Descriptor.NumberKind() should be consulted to determine
|
||||
// whether the provided number is an int64 or float64.
|
||||
//
|
||||
// The Context argument comes from user-level code and could be
|
||||
// inspected for a `correlation.Map` or `trace.SpanContext`.
|
||||
Update(ctx context.Context, n number.Number, descriptor *sdkapi.Descriptor) error
|
||||
|
||||
// SynchronizedMove is called during collection to finish one
|
||||
// period of aggregation by atomically saving the
|
||||
// currently-updating state into the argument Aggregator AND
|
||||
// resetting the current value to the zero state.
|
||||
//
|
||||
// SynchronizedMove() is called concurrently with Update(). These
|
||||
// two methods must be synchronized with respect to each
|
||||
// other, for correctness.
|
||||
//
|
||||
// After saving a synchronized copy, the Aggregator can be converted
|
||||
// into one or more of the interfaces in the `aggregation` sub-package,
|
||||
// according to kind of Aggregator that was selected.
|
||||
//
|
||||
// This method will return an InconsistentAggregatorError if
|
||||
// this Aggregator cannot be copied into the destination due
|
||||
// to an incompatible type.
|
||||
//
|
||||
// This call has no Context argument because it is expected to
|
||||
// perform only computation.
|
||||
//
|
||||
// When called with a nil `destination`, this Aggregator is reset
|
||||
// and the current value is discarded.
|
||||
SynchronizedMove(destination Aggregator, descriptor *sdkapi.Descriptor) error
|
||||
|
||||
// Merge combines the checkpointed state from the argument
|
||||
// Aggregator into this Aggregator. Merge is not synchronized
|
||||
// with respect to Update or SynchronizedMove.
|
||||
//
|
||||
// The owner of an Aggregator being merged is responsible for
|
||||
// synchronization of both Aggregator states.
|
||||
Merge(aggregator Aggregator, descriptor *sdkapi.Descriptor) error
|
||||
}
|
||||
|
||||
// NewInconsistentAggregatorError formats an error describing an attempt to
|
||||
// Checkpoint or Merge different-type aggregators. The result can be unwrapped as
|
||||
// an ErrInconsistentType.
|
||||
func NewInconsistentAggregatorError(a1, a2 Aggregator) error {
|
||||
return fmt.Errorf("%w: %T and %T", aggregation.ErrInconsistentType, a1, a2)
|
||||
}
|
||||
|
||||
// RangeTest is a common routine for testing for valid input values.
|
||||
// This rejects NaN values. This rejects negative values when the
|
||||
// metric instrument does not support negative values, including
|
||||
// monotonic counter metrics and absolute Histogram metrics.
|
||||
func RangeTest(num number.Number, descriptor *sdkapi.Descriptor) error {
|
||||
numberKind := descriptor.NumberKind()
|
||||
|
||||
if numberKind == number.Float64Kind && math.IsNaN(num.AsFloat64()) {
|
||||
return aggregation.ErrNaNInput
|
||||
}
|
||||
|
||||
switch descriptor.InstrumentKind() {
|
||||
case sdkapi.CounterInstrumentKind, sdkapi.CounterObserverInstrumentKind:
|
||||
if num.IsNegative(numberKind) {
|
||||
return aggregation.ErrNegativeInput
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
269
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/histogram/histogram.go
generated
vendored
269
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/histogram/histogram.go
generated
vendored
@ -1,269 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package histogram // import "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
// Note: This code uses a Mutex to govern access to the exclusive
|
||||
// aggregator state. This is in contrast to a lock-free approach
|
||||
// (as in the Go prometheus client) that was reverted here:
|
||||
// https://github.com/open-telemetry/opentelemetry-go/pull/669
|
||||
|
||||
type (
|
||||
// Aggregator observe events and counts them in pre-determined buckets.
|
||||
// It also calculates the sum and count of all events.
|
||||
Aggregator struct {
|
||||
lock sync.Mutex
|
||||
boundaries []float64
|
||||
kind number.Kind
|
||||
state *state
|
||||
}
|
||||
|
||||
// config describes how the histogram is aggregated.
|
||||
config struct {
|
||||
// explicitBoundaries support arbitrary bucketing schemes. This
|
||||
// is the general case.
|
||||
explicitBoundaries []float64
|
||||
}
|
||||
|
||||
// Option configures a histogram config.
|
||||
Option interface {
|
||||
// apply sets one or more config fields.
|
||||
apply(*config)
|
||||
}
|
||||
|
||||
// state represents the state of a histogram, consisting of
|
||||
// the sum and counts for all observed values and
|
||||
// the less than equal bucket count for the pre-determined boundaries.
|
||||
state struct {
|
||||
bucketCounts []uint64
|
||||
sum number.Number
|
||||
count uint64
|
||||
}
|
||||
)
|
||||
|
||||
// WithExplicitBoundaries sets the ExplicitBoundaries configuration option of a config.
|
||||
func WithExplicitBoundaries(explicitBoundaries []float64) Option {
|
||||
return explicitBoundariesOption{explicitBoundaries}
|
||||
}
|
||||
|
||||
type explicitBoundariesOption struct {
|
||||
boundaries []float64
|
||||
}
|
||||
|
||||
func (o explicitBoundariesOption) apply(config *config) {
|
||||
config.explicitBoundaries = o.boundaries
|
||||
}
|
||||
|
||||
// defaultExplicitBoundaries have been copied from prometheus.DefBuckets.
|
||||
//
|
||||
// Note we anticipate the use of a high-precision histogram sketch as
|
||||
// the standard histogram aggregator for OTLP export.
|
||||
// (https://github.com/open-telemetry/opentelemetry-specification/issues/982).
|
||||
var defaultFloat64ExplicitBoundaries = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
||||
|
||||
// defaultInt64ExplicitBoundaryMultiplier determines the default
|
||||
// integer histogram boundaries.
|
||||
const defaultInt64ExplicitBoundaryMultiplier = 1e6
|
||||
|
||||
// defaultInt64ExplicitBoundaries applies a multiplier to the default
|
||||
// float64 boundaries: [ 5K, 10K, 25K, ..., 2.5M, 5M, 10M ].
|
||||
var defaultInt64ExplicitBoundaries = func(bounds []float64) (asint []float64) {
|
||||
for _, f := range bounds {
|
||||
asint = append(asint, defaultInt64ExplicitBoundaryMultiplier*f)
|
||||
}
|
||||
return
|
||||
}(defaultFloat64ExplicitBoundaries)
|
||||
|
||||
var _ aggregator.Aggregator = &Aggregator{}
|
||||
var _ aggregation.Sum = &Aggregator{}
|
||||
var _ aggregation.Count = &Aggregator{}
|
||||
var _ aggregation.Histogram = &Aggregator{}
|
||||
|
||||
// New returns a new aggregator for computing Histograms.
|
||||
//
|
||||
// A Histogram observe events and counts them in pre-defined buckets.
|
||||
// And also provides the total sum and count of all observations.
|
||||
//
|
||||
// Note that this aggregator maintains each value using independent
|
||||
// atomic operations, which introduces the possibility that
|
||||
// checkpoints are inconsistent.
|
||||
func New(cnt int, desc *sdkapi.Descriptor, opts ...Option) []Aggregator {
|
||||
var cfg config
|
||||
|
||||
if desc.NumberKind() == number.Int64Kind {
|
||||
cfg.explicitBoundaries = defaultInt64ExplicitBoundaries
|
||||
} else {
|
||||
cfg.explicitBoundaries = defaultFloat64ExplicitBoundaries
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt.apply(&cfg)
|
||||
}
|
||||
|
||||
aggs := make([]Aggregator, cnt)
|
||||
|
||||
// Boundaries MUST be ordered otherwise the histogram could not
|
||||
// be properly computed.
|
||||
sortedBoundaries := make([]float64, len(cfg.explicitBoundaries))
|
||||
|
||||
copy(sortedBoundaries, cfg.explicitBoundaries)
|
||||
sort.Float64s(sortedBoundaries)
|
||||
|
||||
for i := range aggs {
|
||||
aggs[i] = Aggregator{
|
||||
kind: desc.NumberKind(),
|
||||
boundaries: sortedBoundaries,
|
||||
}
|
||||
aggs[i].state = aggs[i].newState()
|
||||
}
|
||||
return aggs
|
||||
}
|
||||
|
||||
// Aggregation returns an interface for reading the state of this aggregator.
|
||||
func (c *Aggregator) Aggregation() aggregation.Aggregation {
|
||||
return c
|
||||
}
|
||||
|
||||
// Kind returns aggregation.HistogramKind.
|
||||
func (c *Aggregator) Kind() aggregation.Kind {
|
||||
return aggregation.HistogramKind
|
||||
}
|
||||
|
||||
// Sum returns the sum of all values in the checkpoint.
|
||||
func (c *Aggregator) Sum() (number.Number, error) {
|
||||
return c.state.sum, nil
|
||||
}
|
||||
|
||||
// Count returns the number of values in the checkpoint.
|
||||
func (c *Aggregator) Count() (uint64, error) {
|
||||
return c.state.count, nil
|
||||
}
|
||||
|
||||
// Histogram returns the count of events in pre-determined buckets.
|
||||
func (c *Aggregator) Histogram() (aggregation.Buckets, error) {
|
||||
return aggregation.Buckets{
|
||||
Boundaries: c.boundaries,
|
||||
Counts: c.state.bucketCounts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SynchronizedMove saves the current state into oa and resets the current state to
|
||||
// the empty set. Since no locks are taken, there is a chance that
|
||||
// the independent Sum, Count and Bucket Count are not consistent with each
|
||||
// other.
|
||||
func (c *Aggregator) SynchronizedMove(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
|
||||
if oa != nil && o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
|
||||
if o != nil {
|
||||
// Swap case: This is the ordinary case for a
|
||||
// synchronous instrument, where the SDK allocates two
|
||||
// Aggregators and lock contention is anticipated.
|
||||
// Reset the target state before swapping it under the
|
||||
// lock below.
|
||||
o.clearState()
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
if o != nil {
|
||||
c.state, o.state = o.state, c.state
|
||||
} else {
|
||||
// No swap case: This is the ordinary case for an
|
||||
// asynchronous instrument, where the SDK allocates a
|
||||
// single Aggregator and there is no anticipated lock
|
||||
// contention.
|
||||
c.clearState()
|
||||
}
|
||||
c.lock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Aggregator) newState() *state {
|
||||
return &state{
|
||||
bucketCounts: make([]uint64, len(c.boundaries)+1),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Aggregator) clearState() {
|
||||
for i := range c.state.bucketCounts {
|
||||
c.state.bucketCounts[i] = 0
|
||||
}
|
||||
c.state.sum = 0
|
||||
c.state.count = 0
|
||||
}
|
||||
|
||||
// Update adds the recorded measurement to the current data set.
|
||||
func (c *Aggregator) Update(_ context.Context, n number.Number, desc *sdkapi.Descriptor) error {
|
||||
kind := desc.NumberKind()
|
||||
asFloat := n.CoerceToFloat64(kind)
|
||||
|
||||
bucketID := len(c.boundaries)
|
||||
for i, boundary := range c.boundaries {
|
||||
if asFloat < boundary {
|
||||
bucketID = i
|
||||
break
|
||||
}
|
||||
}
|
||||
// Note: Binary-search was compared using the benchmarks. The following
|
||||
// code is equivalent to the linear search above:
|
||||
//
|
||||
// bucketID := sort.Search(len(c.boundaries), func(i int) bool {
|
||||
// return asFloat < c.boundaries[i]
|
||||
// })
|
||||
//
|
||||
// The binary search wins for very large boundary sets, but
|
||||
// the linear search performs better up through arrays between
|
||||
// 256 and 512 elements, which is a relatively large histogram, so we
|
||||
// continue to prefer linear search.
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.state.count++
|
||||
c.state.sum.AddNumber(kind, n)
|
||||
c.state.bucketCounts[bucketID]++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge combines two histograms that have the same buckets into a single one.
|
||||
func (c *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
|
||||
c.state.sum.AddNumber(desc.NumberKind(), o.state.sum)
|
||||
c.state.count += o.state.count
|
||||
|
||||
for i := 0; i < len(c.state.bucketCounts); i++ {
|
||||
c.state.bucketCounts[i] += o.state.bucketCounts[i]
|
||||
}
|
||||
return nil
|
||||
}
|
133
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue/lastvalue.go
generated
vendored
133
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue/lastvalue.go
generated
vendored
@ -1,133 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package lastvalue // import "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
type (
|
||||
|
||||
// Aggregator aggregates lastValue events.
|
||||
Aggregator struct {
|
||||
// value is an atomic pointer to *lastValueData. It is never nil.
|
||||
value unsafe.Pointer
|
||||
}
|
||||
|
||||
// lastValueData stores the current value of a lastValue along with
|
||||
// a sequence number to determine the winner of a race.
|
||||
lastValueData struct {
|
||||
// value is the int64- or float64-encoded Set() data
|
||||
//
|
||||
// value needs to be aligned for 64-bit atomic operations.
|
||||
value number.Number
|
||||
|
||||
// timestamp indicates when this record was submitted. This can be
|
||||
// used to pick a winner when multiple records contain lastValue data
|
||||
// for the same attributes due to races.
|
||||
timestamp time.Time
|
||||
}
|
||||
)
|
||||
|
||||
var _ aggregator.Aggregator = &Aggregator{}
|
||||
var _ aggregation.LastValue = &Aggregator{}
|
||||
|
||||
// An unset lastValue has zero timestamp and zero value.
|
||||
var unsetLastValue = &lastValueData{}
|
||||
|
||||
// New returns a new lastValue aggregator. This aggregator retains the
|
||||
// last value and timestamp that were recorded.
|
||||
func New(cnt int) []Aggregator {
|
||||
aggs := make([]Aggregator, cnt)
|
||||
for i := range aggs {
|
||||
aggs[i] = Aggregator{
|
||||
value: unsafe.Pointer(unsetLastValue),
|
||||
}
|
||||
}
|
||||
return aggs
|
||||
}
|
||||
|
||||
// Aggregation returns an interface for reading the state of this aggregator.
|
||||
func (g *Aggregator) Aggregation() aggregation.Aggregation {
|
||||
return g
|
||||
}
|
||||
|
||||
// Kind returns aggregation.LastValueKind.
|
||||
func (g *Aggregator) Kind() aggregation.Kind {
|
||||
return aggregation.LastValueKind
|
||||
}
|
||||
|
||||
// LastValue returns the last-recorded lastValue value and the
|
||||
// corresponding timestamp. The error value aggregation.ErrNoData
|
||||
// will be returned if (due to a race condition) the checkpoint was
|
||||
// computed before the first value was set.
|
||||
func (g *Aggregator) LastValue() (number.Number, time.Time, error) {
|
||||
gd := (*lastValueData)(g.value)
|
||||
if gd == unsetLastValue {
|
||||
return 0, time.Time{}, aggregation.ErrNoData
|
||||
}
|
||||
return gd.value.AsNumber(), gd.timestamp, nil
|
||||
}
|
||||
|
||||
// SynchronizedMove atomically saves the current value.
|
||||
func (g *Aggregator) SynchronizedMove(oa aggregator.Aggregator, _ *sdkapi.Descriptor) error {
|
||||
if oa == nil {
|
||||
atomic.StorePointer(&g.value, unsafe.Pointer(unsetLastValue))
|
||||
return nil
|
||||
}
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(g, oa)
|
||||
}
|
||||
o.value = atomic.SwapPointer(&g.value, unsafe.Pointer(unsetLastValue))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update atomically sets the current "last" value.
|
||||
func (g *Aggregator) Update(_ context.Context, n number.Number, desc *sdkapi.Descriptor) error {
|
||||
ngd := &lastValueData{
|
||||
value: n,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
atomic.StorePointer(&g.value, unsafe.Pointer(ngd))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge combines state from two aggregators. The most-recently set
|
||||
// value is chosen.
|
||||
func (g *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(g, oa)
|
||||
}
|
||||
|
||||
ggd := (*lastValueData)(atomic.LoadPointer(&g.value))
|
||||
ogd := (*lastValueData)(atomic.LoadPointer(&o.value))
|
||||
|
||||
if ggd.timestamp.After(ogd.timestamp) {
|
||||
return nil
|
||||
}
|
||||
|
||||
g.value = unsafe.Pointer(ogd)
|
||||
return nil
|
||||
}
|
88
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/sum/sum.go
generated
vendored
88
vendor/go.opentelemetry.io/otel/sdk/metric/aggregator/sum/sum.go
generated
vendored
@ -1,88 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sum // import "go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
// Aggregator aggregates counter events.
|
||||
type Aggregator struct {
|
||||
// current holds current increments to this counter record
|
||||
// current needs to be aligned for 64-bit atomic operations.
|
||||
value number.Number
|
||||
}
|
||||
|
||||
var _ aggregator.Aggregator = &Aggregator{}
|
||||
var _ aggregation.Sum = &Aggregator{}
|
||||
|
||||
// New returns a new counter aggregator implemented by atomic
|
||||
// operations. This aggregator implements the aggregation.Sum
|
||||
// export interface.
|
||||
func New(cnt int) []Aggregator {
|
||||
return make([]Aggregator, cnt)
|
||||
}
|
||||
|
||||
// Aggregation returns an interface for reading the state of this aggregator.
|
||||
func (c *Aggregator) Aggregation() aggregation.Aggregation {
|
||||
return c
|
||||
}
|
||||
|
||||
// Kind returns aggregation.SumKind.
|
||||
func (c *Aggregator) Kind() aggregation.Kind {
|
||||
return aggregation.SumKind
|
||||
}
|
||||
|
||||
// Sum returns the last-checkpointed sum. This will never return an
|
||||
// error.
|
||||
func (c *Aggregator) Sum() (number.Number, error) {
|
||||
return c.value, nil
|
||||
}
|
||||
|
||||
// SynchronizedMove atomically saves the current value into oa and resets the
|
||||
// current sum to zero.
|
||||
func (c *Aggregator) SynchronizedMove(oa aggregator.Aggregator, _ *sdkapi.Descriptor) error {
|
||||
if oa == nil {
|
||||
c.value.SetRawAtomic(0)
|
||||
return nil
|
||||
}
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
o.value = c.value.SwapNumberAtomic(number.Number(0))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update atomically adds to the current value.
|
||||
func (c *Aggregator) Update(_ context.Context, num number.Number, desc *sdkapi.Descriptor) error {
|
||||
c.value.AddNumberAtomic(desc.NumberKind(), num)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge combines two counters by adding their sums.
|
||||
func (c *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error {
|
||||
o, _ := oa.(*Aggregator)
|
||||
if o == nil {
|
||||
return aggregator.NewInconsistentAggregatorError(c, oa)
|
||||
}
|
||||
c.value.AddNumber(desc.NumberKind(), o.value)
|
||||
return nil
|
||||
}
|
110
vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
generated
vendored
Normal file
110
vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
)
|
||||
|
||||
// cache is a locking storage used to quickly return already computed values.
|
||||
//
|
||||
// The zero value of a cache is empty and ready to use.
|
||||
//
|
||||
// A cache must not be copied after first use.
|
||||
//
|
||||
// All methods of a cache are safe to call concurrently.
|
||||
type cache[K comparable, V any] struct {
|
||||
sync.Mutex
|
||||
data map[K]V
|
||||
}
|
||||
|
||||
// Lookup returns the value stored in the cache with the accociated key if it
|
||||
// exists. Otherwise, f is called and its returned value is set in the cache
|
||||
// for key and returned.
|
||||
//
|
||||
// Lookup is safe to call concurrently. It will hold the cache lock, so f
|
||||
// should not block excessively.
|
||||
func (c *cache[K, V]) Lookup(key K, f func() V) V {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.data == nil {
|
||||
val := f()
|
||||
c.data = map[K]V{key: val}
|
||||
return val
|
||||
}
|
||||
if v, ok := c.data[key]; ok {
|
||||
return v
|
||||
}
|
||||
val := f()
|
||||
c.data[key] = val
|
||||
return val
|
||||
}
|
||||
|
||||
// instrumentCache is a cache of instruments. It is scoped at the Meter level
|
||||
// along with a number type. Meaning all instruments it contains need to belong
|
||||
// to the same instrumentation.Scope (implicitly) and number type (explicitly).
|
||||
type instrumentCache[N int64 | float64] struct {
|
||||
// aggregators is used to ensure duplicate creations of the same instrument
|
||||
// return the same instance of that instrument's aggregator.
|
||||
aggregators *cache[instrumentID, aggVal[N]]
|
||||
// views is used to ensure if instruments with the same name are created,
|
||||
// but do not have the same identifying properties, a warning is logged.
|
||||
views *cache[string, instrumentID]
|
||||
}
|
||||
|
||||
// newInstrumentCache returns a new instrumentCache that uses ac as the
|
||||
// underlying cache for aggregators and vc as the cache for views. If ac or vc
|
||||
// are nil, a new empty cache will be used.
|
||||
func newInstrumentCache[N int64 | float64](ac *cache[instrumentID, aggVal[N]], vc *cache[string, instrumentID]) instrumentCache[N] {
|
||||
if ac == nil {
|
||||
ac = &cache[instrumentID, aggVal[N]]{}
|
||||
}
|
||||
if vc == nil {
|
||||
vc = &cache[string, instrumentID]{}
|
||||
}
|
||||
return instrumentCache[N]{aggregators: ac, views: vc}
|
||||
}
|
||||
|
||||
// LookupAggregator returns the Aggregator and error for a cached instrument if
|
||||
// it exist in the cache. Otherwise, f is called and its returned value is set
|
||||
// in the cache and returned.
|
||||
//
|
||||
// LookupAggregator is safe to call concurrently.
|
||||
func (c instrumentCache[N]) LookupAggregator(id instrumentID, f func() (internal.Aggregator[N], error)) (agg internal.Aggregator[N], err error) {
|
||||
v := c.aggregators.Lookup(id, func() aggVal[N] {
|
||||
a, err := f()
|
||||
return aggVal[N]{Aggregator: a, Err: err}
|
||||
})
|
||||
return v.Aggregator, v.Err
|
||||
}
|
||||
|
||||
// aggVal is the cached value of an instrumentCache's aggregators cache.
|
||||
type aggVal[N int64 | float64] struct {
|
||||
Aggregator internal.Aggregator[N]
|
||||
Err error
|
||||
}
|
||||
|
||||
// Unique returns if id is unique or a duplicate instrument. If an instrument
|
||||
// with the same name has already been created, that instrumentID will be
|
||||
// returned along with false. Otherwise, id is returned with true.
|
||||
//
|
||||
// Unique is safe to call concurrently.
|
||||
func (c instrumentCache[N]) Unique(id instrumentID) (instrumentID, bool) {
|
||||
got := c.views.Lookup(id.Name, func() instrumentID { return id })
|
||||
return got, id == got
|
||||
}
|
132
vendor/go.opentelemetry.io/otel/sdk/metric/config.go
generated
vendored
Normal file
132
vendor/go.opentelemetry.io/otel/sdk/metric/config.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/view"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// config contains configuration options for a MeterProvider.
|
||||
type config struct {
|
||||
res *resource.Resource
|
||||
readers map[Reader][]view.View
|
||||
}
|
||||
|
||||
// readerSignals returns a force-flush and shutdown function for a
|
||||
// MeterProvider to call in their respective options. All Readers c contains
|
||||
// will have their force-flush and shutdown methods unified into returned
|
||||
// single functions.
|
||||
func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) {
|
||||
var fFuncs, sFuncs []func(context.Context) error
|
||||
for r := range c.readers {
|
||||
sFuncs = append(sFuncs, r.Shutdown)
|
||||
fFuncs = append(fFuncs, r.ForceFlush)
|
||||
}
|
||||
|
||||
return unify(fFuncs), unifyShutdown(sFuncs)
|
||||
}
|
||||
|
||||
// unify unifies calling all of funcs into a single function call. All errors
|
||||
// returned from calls to funcs will be unify into a single error return
|
||||
// value.
|
||||
func unify(funcs []func(context.Context) error) func(context.Context) error {
|
||||
return func(ctx context.Context) error {
|
||||
var errs []error
|
||||
for _, f := range funcs {
|
||||
if err := f(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return errs[0]
|
||||
default:
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unifyShutdown unifies calling all of funcs once for a shutdown. If called
|
||||
// more than once, an ErrReaderShutdown error is returned.
|
||||
func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error {
|
||||
f := unify(funcs)
|
||||
var once sync.Once
|
||||
return func(ctx context.Context) error {
|
||||
err := ErrReaderShutdown
|
||||
once.Do(func() { err = f(ctx) })
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// newConfig returns a config configured with options.
|
||||
func newConfig(options []Option) config {
|
||||
conf := config{res: resource.Default()}
|
||||
for _, o := range options {
|
||||
conf = o.apply(conf)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
// Option applies a configuration option value to a MeterProvider.
|
||||
type Option interface {
|
||||
apply(config) config
|
||||
}
|
||||
|
||||
// optionFunc applies a set of options to a config.
|
||||
type optionFunc func(config) config
|
||||
|
||||
// apply returns a config with option(s) applied.
|
||||
func (o optionFunc) apply(conf config) config {
|
||||
return o(conf)
|
||||
}
|
||||
|
||||
// WithResource associates a Resource with a MeterProvider. This Resource
|
||||
// represents the entity producing telemetry and is associated with all Meters
|
||||
// the MeterProvider will create.
|
||||
//
|
||||
// By default, if this Option is not used, the default Resource from the
|
||||
// go.opentelemetry.io/otel/sdk/resource package will be used.
|
||||
func WithResource(res *resource.Resource) Option {
|
||||
return optionFunc(func(conf config) config {
|
||||
conf.res = res
|
||||
return conf
|
||||
})
|
||||
}
|
||||
|
||||
// WithReader associates a Reader with a MeterProvider. Any passed view config
|
||||
// will be used to associate a view with the Reader. If no views are passed
|
||||
// the default view will be use for the Reader.
|
||||
//
|
||||
// Passing this option multiple times for the same Reader will overwrite. The
|
||||
// last option passed will be the one used for that Reader.
|
||||
//
|
||||
// By default, if this option is not used, the MeterProvider will perform no
|
||||
// operations; no data will be exported without a Reader.
|
||||
func WithReader(r Reader, views ...view.View) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
if cfg.readers == nil {
|
||||
cfg.readers = make(map[Reader][]view.View)
|
||||
}
|
||||
cfg.readers[r] = views
|
||||
return cfg
|
||||
})
|
||||
}
|
131
vendor/go.opentelemetry.io/otel/sdk/metric/controller/basic/config.go
generated
vendored
131
vendor/go.opentelemetry.io/otel/sdk/metric/controller/basic/config.go
generated
vendored
@ -1,131 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package basic // import "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// config contains configuration for a basic Controller.
|
||||
type config struct {
|
||||
// Resource is the OpenTelemetry resource associated with all Meters
|
||||
// created by the Controller.
|
||||
Resource *resource.Resource
|
||||
|
||||
// CollectPeriod is the interval between calls to Collect a
|
||||
// checkpoint.
|
||||
//
|
||||
// When pulling metrics and not exporting, this is the minimum
|
||||
// time between calls to Collect. In a pull-only
|
||||
// configuration, collection is performed on demand; set
|
||||
// CollectPeriod to 0 always recompute the export record set.
|
||||
//
|
||||
// When exporting metrics, this must be > 0.
|
||||
//
|
||||
// Default value is 10s.
|
||||
CollectPeriod time.Duration
|
||||
|
||||
// CollectTimeout is the timeout of the Context passed to
|
||||
// Collect() and subsequently to Observer instrument callbacks.
|
||||
//
|
||||
// Default value is 10s. If zero, no Collect timeout is applied.
|
||||
CollectTimeout time.Duration
|
||||
|
||||
// Exporter is used for exporting metric data.
|
||||
//
|
||||
// Note: Exporters such as Prometheus that pull data do not implement
|
||||
// export.Exporter. These will directly call Collect() and ForEach().
|
||||
Exporter export.Exporter
|
||||
|
||||
// PushTimeout is the timeout of the Context when a exporter is configured.
|
||||
//
|
||||
// Default value is 10s. If zero, no Export timeout is applied.
|
||||
PushTimeout time.Duration
|
||||
}
|
||||
|
||||
// Option is the interface that applies the value to a configuration option.
|
||||
type Option interface {
|
||||
// apply sets the Option value of a Config.
|
||||
apply(config) config
|
||||
}
|
||||
|
||||
// WithResource sets the Resource configuration option of a Config by merging it
|
||||
// with the Resource configuration in the environment.
|
||||
func WithResource(r *resource.Resource) Option {
|
||||
return resourceOption{r}
|
||||
}
|
||||
|
||||
type resourceOption struct{ *resource.Resource }
|
||||
|
||||
func (o resourceOption) apply(cfg config) config {
|
||||
res, err := resource.Merge(cfg.Resource, o.Resource)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
cfg.Resource = res
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WithCollectPeriod sets the CollectPeriod configuration option of a Config.
|
||||
func WithCollectPeriod(period time.Duration) Option {
|
||||
return collectPeriodOption(period)
|
||||
}
|
||||
|
||||
type collectPeriodOption time.Duration
|
||||
|
||||
func (o collectPeriodOption) apply(cfg config) config {
|
||||
cfg.CollectPeriod = time.Duration(o)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WithCollectTimeout sets the CollectTimeout configuration option of a Config.
|
||||
func WithCollectTimeout(timeout time.Duration) Option {
|
||||
return collectTimeoutOption(timeout)
|
||||
}
|
||||
|
||||
type collectTimeoutOption time.Duration
|
||||
|
||||
func (o collectTimeoutOption) apply(cfg config) config {
|
||||
cfg.CollectTimeout = time.Duration(o)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WithExporter sets the exporter configuration option of a Config.
|
||||
func WithExporter(exporter export.Exporter) Option {
|
||||
return exporterOption{exporter}
|
||||
}
|
||||
|
||||
type exporterOption struct{ exporter export.Exporter }
|
||||
|
||||
func (o exporterOption) apply(cfg config) config {
|
||||
cfg.Exporter = o.exporter
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WithPushTimeout sets the PushTimeout configuration option of a Config.
|
||||
func WithPushTimeout(timeout time.Duration) Option {
|
||||
return pushTimeoutOption(timeout)
|
||||
}
|
||||
|
||||
type pushTimeoutOption time.Duration
|
||||
|
||||
func (o pushTimeoutOption) apply(cfg config) config {
|
||||
cfg.PushTimeout = time.Duration(o)
|
||||
return cfg
|
||||
}
|
382
vendor/go.opentelemetry.io/otel/sdk/metric/controller/basic/controller.go
generated
vendored
382
vendor/go.opentelemetry.io/otel/sdk/metric/controller/basic/controller.go
generated
vendored
@ -1,382 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package basic // import "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
sdk "go.opentelemetry.io/otel/sdk/metric"
|
||||
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/registry"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// DefaultPeriod is used for:
|
||||
//
|
||||
// - the minimum time between calls to Collect()
|
||||
// - the timeout for Export()
|
||||
// - the timeout for Collect().
|
||||
const DefaultPeriod = 10 * time.Second
|
||||
|
||||
// ErrControllerStarted indicates that a controller was started more
|
||||
// than once.
|
||||
var ErrControllerStarted = fmt.Errorf("controller already started")
|
||||
|
||||
// Controller organizes and synchronizes collection of metric data in
|
||||
// both "pull" and "push" configurations. This supports two distinct
|
||||
// modes:
|
||||
//
|
||||
// - Push and Pull: Start() must be called to begin calling the exporter;
|
||||
// Collect() is called periodically by a background thread after starting
|
||||
// the controller.
|
||||
// - Pull-Only: Start() is optional in this case, to call Collect periodically.
|
||||
// If Start() is not called, Collect() can be called manually to initiate
|
||||
// collection
|
||||
//
|
||||
// The controller supports mixing push and pull access to metric data
|
||||
// using the export.Reader RWLock interface. Collection will
|
||||
// be blocked by a pull request in the basic controller.
|
||||
type Controller struct {
|
||||
// lock synchronizes Start() and Stop().
|
||||
lock sync.Mutex
|
||||
scopes sync.Map
|
||||
checkpointerFactory export.CheckpointerFactory
|
||||
|
||||
resource *resource.Resource
|
||||
exporter export.Exporter
|
||||
wg sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
clock controllerTime.Clock
|
||||
ticker controllerTime.Ticker
|
||||
|
||||
collectPeriod time.Duration
|
||||
collectTimeout time.Duration
|
||||
pushTimeout time.Duration
|
||||
|
||||
// collectedTime is used only in configurations with no
|
||||
// exporter, when ticker != nil.
|
||||
collectedTime time.Time
|
||||
}
|
||||
|
||||
var _ export.InstrumentationLibraryReader = &Controller{}
|
||||
var _ metric.MeterProvider = &Controller{}
|
||||
|
||||
// Meter returns a new Meter defined by instrumentationName and configured
|
||||
// with opts.
|
||||
func (c *Controller) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
|
||||
cfg := metric.NewMeterConfig(opts...)
|
||||
scope := instrumentation.Scope{
|
||||
Name: instrumentationName,
|
||||
Version: cfg.InstrumentationVersion(),
|
||||
SchemaURL: cfg.SchemaURL(),
|
||||
}
|
||||
|
||||
m, ok := c.scopes.Load(scope)
|
||||
if !ok {
|
||||
checkpointer := c.checkpointerFactory.NewCheckpointer()
|
||||
m, _ = c.scopes.LoadOrStore(
|
||||
scope,
|
||||
registry.NewUniqueInstrumentMeterImpl(&accumulatorCheckpointer{
|
||||
Accumulator: sdk.NewAccumulator(checkpointer),
|
||||
checkpointer: checkpointer,
|
||||
scope: scope,
|
||||
}))
|
||||
}
|
||||
return sdkapi.WrapMeterImpl(m.(*registry.UniqueInstrumentMeterImpl))
|
||||
}
|
||||
|
||||
type accumulatorCheckpointer struct {
|
||||
*sdk.Accumulator
|
||||
checkpointer export.Checkpointer
|
||||
scope instrumentation.Scope
|
||||
}
|
||||
|
||||
var _ sdkapi.MeterImpl = &accumulatorCheckpointer{}
|
||||
|
||||
// New constructs a Controller using the provided checkpointer factory
|
||||
// and options (including optional exporter) to configure a metric
|
||||
// export pipeline.
|
||||
func New(checkpointerFactory export.CheckpointerFactory, opts ...Option) *Controller {
|
||||
c := config{
|
||||
CollectPeriod: DefaultPeriod,
|
||||
CollectTimeout: DefaultPeriod,
|
||||
PushTimeout: DefaultPeriod,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
c = opt.apply(c)
|
||||
}
|
||||
if c.Resource == nil {
|
||||
c.Resource = resource.Default()
|
||||
} else {
|
||||
var err error
|
||||
c.Resource, err = resource.Merge(resource.Environment(), c.Resource)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
return &Controller{
|
||||
checkpointerFactory: checkpointerFactory,
|
||||
exporter: c.Exporter,
|
||||
resource: c.Resource,
|
||||
stopCh: nil,
|
||||
clock: controllerTime.RealClock{},
|
||||
|
||||
collectPeriod: c.CollectPeriod,
|
||||
collectTimeout: c.CollectTimeout,
|
||||
pushTimeout: c.PushTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// SetClock supports setting a mock clock for testing. This must be
|
||||
// called before Start().
|
||||
func (c *Controller) SetClock(clock controllerTime.Clock) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.clock = clock
|
||||
}
|
||||
|
||||
// Resource returns the *resource.Resource associated with this
|
||||
// controller.
|
||||
func (c *Controller) Resource() *resource.Resource {
|
||||
return c.resource
|
||||
}
|
||||
|
||||
// Start begins a ticker that periodically collects and exports
|
||||
// metrics with the configured interval. This is required for calling
|
||||
// a configured Exporter (see WithExporter) and is otherwise optional
|
||||
// when only pulling metric data.
|
||||
//
|
||||
// The passed context is passed to Collect() and subsequently to
|
||||
// asynchronous instrument callbacks. Returns an error when the
|
||||
// controller was already started.
|
||||
//
|
||||
// Note that it is not necessary to Start a controller when only
|
||||
// pulling data; use the Collect() and ForEach() methods directly in
|
||||
// this case.
|
||||
func (c *Controller) Start(ctx context.Context) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.stopCh != nil {
|
||||
return ErrControllerStarted
|
||||
}
|
||||
|
||||
c.wg.Add(1)
|
||||
c.stopCh = make(chan struct{})
|
||||
c.ticker = c.clock.Ticker(c.collectPeriod)
|
||||
go c.runTicker(ctx, c.stopCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop waits for the background goroutine to return and then collects
|
||||
// and exports metrics one last time before returning. The passed
|
||||
// context is passed to the final Collect() and subsequently to the
|
||||
// final asynchronous instruments.
|
||||
//
|
||||
// Note that Stop() will not cancel an ongoing collection or export.
|
||||
func (c *Controller) Stop(ctx context.Context) error {
|
||||
if lastCollection := func() bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.stopCh == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
close(c.stopCh)
|
||||
c.stopCh = nil
|
||||
c.wg.Wait()
|
||||
c.ticker.Stop()
|
||||
c.ticker = nil
|
||||
return true
|
||||
}(); !lastCollection {
|
||||
return nil
|
||||
}
|
||||
return c.collect(ctx)
|
||||
}
|
||||
|
||||
// runTicker collection on ticker events until the stop channel is closed.
|
||||
func (c *Controller) runTicker(ctx context.Context, stopCh chan struct{}) {
|
||||
defer c.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-c.ticker.C():
|
||||
if err := c.collect(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// collect computes a checkpoint and optionally exports it.
|
||||
func (c *Controller) collect(ctx context.Context) error {
|
||||
if err := c.checkpoint(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.exporter == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note: this is not subject to collectTimeout. This blocks the next
|
||||
// collection despite collectTimeout because it holds a lock.
|
||||
return c.export(ctx)
|
||||
}
|
||||
|
||||
// accumulatorList returns a snapshot of current accumulators
|
||||
// registered to this controller. This briefly locks the controller.
|
||||
func (c *Controller) accumulatorList() []*accumulatorCheckpointer {
|
||||
var r []*accumulatorCheckpointer
|
||||
c.scopes.Range(func(key, value interface{}) bool {
|
||||
acc, ok := value.(*registry.UniqueInstrumentMeterImpl).MeterImpl().(*accumulatorCheckpointer)
|
||||
if ok {
|
||||
r = append(r, acc)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
// checkpoint calls the Accumulator and Checkpointer interfaces to
|
||||
// compute the Reader. This applies the configured collection
|
||||
// timeout. Note that this does not try to cancel a Collect or Export
|
||||
// when Stop() is called.
|
||||
func (c *Controller) checkpoint(ctx context.Context) error {
|
||||
for _, impl := range c.accumulatorList() {
|
||||
if err := c.checkpointSingleAccumulator(ctx, impl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkpointSingleAccumulator checkpoints a single instrumentation
|
||||
// scope's accumulator, which involves calling
|
||||
// checkpointer.StartCollection, accumulator.Collect, and
|
||||
// checkpointer.FinishCollection in sequence.
|
||||
func (c *Controller) checkpointSingleAccumulator(ctx context.Context, ac *accumulatorCheckpointer) error {
|
||||
ckpt := ac.checkpointer.Reader()
|
||||
ckpt.Lock()
|
||||
defer ckpt.Unlock()
|
||||
|
||||
ac.checkpointer.StartCollection()
|
||||
|
||||
if c.collectTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, c.collectTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
_ = ac.Accumulator.Collect(ctx)
|
||||
|
||||
var err error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
// The context wasn't done, ok.
|
||||
}
|
||||
|
||||
// Finish the checkpoint whether the accumulator timed out or not.
|
||||
if cerr := ac.checkpointer.FinishCollection(); cerr != nil {
|
||||
if err == nil {
|
||||
err = cerr
|
||||
} else {
|
||||
err = fmt.Errorf("%s: %w", cerr.Error(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// export calls the exporter with a read lock on the Reader,
|
||||
// applying the configured export timeout.
|
||||
func (c *Controller) export(ctx context.Context) error { // nolint:revive // method name shadows import.
|
||||
if c.pushTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, c.pushTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
return c.exporter.Export(ctx, c.resource, c)
|
||||
}
|
||||
|
||||
// ForEach implements export.InstrumentationLibraryReader.
|
||||
func (c *Controller) ForEach(readerFunc func(l instrumentation.Library, r export.Reader) error) error {
|
||||
for _, acPair := range c.accumulatorList() {
|
||||
reader := acPair.checkpointer.Reader()
|
||||
// TODO: We should not fail fast; instead accumulate errors.
|
||||
if err := func() error {
|
||||
reader.RLock()
|
||||
defer reader.RUnlock()
|
||||
return readerFunc(acPair.scope, reader)
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRunning returns true if the controller was started via Start(),
|
||||
// indicating that the current export.Reader is being kept
|
||||
// up-to-date.
|
||||
func (c *Controller) IsRunning() bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.ticker != nil
|
||||
}
|
||||
|
||||
// Collect requests a collection. The collection will be skipped if
|
||||
// the last collection is aged less than the configured collection
|
||||
// period.
|
||||
func (c *Controller) Collect(ctx context.Context) error {
|
||||
if c.IsRunning() {
|
||||
// When there's a non-nil ticker, there's a goroutine
|
||||
// computing checkpoints with the collection period.
|
||||
return ErrControllerStarted
|
||||
}
|
||||
if !c.shouldCollect() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.checkpoint(ctx)
|
||||
}
|
||||
|
||||
// shouldCollect returns true if the collector should collect now,
|
||||
// based on the timestamp, the last collection time, and the
|
||||
// configured period.
|
||||
func (c *Controller) shouldCollect() bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.collectPeriod == 0 {
|
||||
return true
|
||||
}
|
||||
now := c.clock.Now()
|
||||
if now.Sub(c.collectedTime) < c.collectPeriod {
|
||||
return false
|
||||
}
|
||||
c.collectedTime = now
|
||||
return true
|
||||
}
|
67
vendor/go.opentelemetry.io/otel/sdk/metric/controller/time/time.go
generated
vendored
67
vendor/go.opentelemetry.io/otel/sdk/metric/controller/time/time.go
generated
vendored
@ -1,67 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package time // import "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Several types below are created to match "github.com/benbjohnson/clock"
|
||||
// so that it remains a test-only dependency.
|
||||
|
||||
// Clock keeps track of time for a metric SDK.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
Ticker(duration time.Duration) Ticker
|
||||
}
|
||||
|
||||
// Ticker signals time intervals.
|
||||
type Ticker interface {
|
||||
Stop()
|
||||
C() <-chan time.Time
|
||||
}
|
||||
|
||||
// RealClock wraps the time package and uses the system time to tell time.
|
||||
type RealClock struct {
|
||||
}
|
||||
|
||||
// RealTicker wraps the time package and uses system time to tick time
|
||||
// intervals.
|
||||
type RealTicker struct {
|
||||
ticker *time.Ticker
|
||||
}
|
||||
|
||||
var _ Clock = RealClock{}
|
||||
var _ Ticker = RealTicker{}
|
||||
|
||||
// Now returns the current time.
|
||||
func (RealClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Ticker creates a new RealTicker that will tick with period.
|
||||
func (RealClock) Ticker(period time.Duration) Ticker {
|
||||
return RealTicker{time.NewTicker(period)}
|
||||
}
|
||||
|
||||
// Stop turns off the RealTicker.
|
||||
func (t RealTicker) Stop() {
|
||||
t.ticker.Stop()
|
||||
}
|
||||
|
||||
// C returns a channel that receives the current time when RealTicker ticks.
|
||||
func (t RealTicker) C() <-chan time.Time {
|
||||
return t.ticker.C
|
||||
}
|
147
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
147
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
@ -12,120 +12,35 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package metric implements the OpenTelemetry metric API.
|
||||
|
||||
This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
may be introduced in subsequent minor version releases as we work to track the
|
||||
evolving OpenTelemetry specification and user feedback.
|
||||
|
||||
The Accumulator type supports configurable metrics export behavior through a
|
||||
collection of export interfaces that support various export strategies,
|
||||
described below.
|
||||
|
||||
The OpenTelemetry metric API consists of methods for constructing synchronous
|
||||
and asynchronous instruments. There are two constructors per instrument for
|
||||
the two kinds of number (int64, float64).
|
||||
|
||||
Synchronous instruments are managed by a sync.Map containing a *record
|
||||
with the current state for each synchronous instrument. A lock-free
|
||||
algorithm is used to protect against races when adding and removing
|
||||
items from the sync.Map.
|
||||
|
||||
Asynchronous instruments are managed by an internal
|
||||
AsyncInstrumentState, which coordinates calling batch and single
|
||||
instrument callbacks.
|
||||
|
||||
Internal Structure
|
||||
|
||||
Each observer also has its own kind of record stored in the SDK. This
|
||||
record contains a set of recorders for every specific attribute set used in
|
||||
the callback.
|
||||
|
||||
A sync.Map maintains the mapping of current instruments and attribute sets to
|
||||
internal records. To find a record, the SDK consults the Map to locate an
|
||||
existing record, otherwise it constructs a new record. The SDK maintains a
|
||||
count of the number of references to each record, ensuring that records are
|
||||
not reclaimed from the Map while they are still active from the user's
|
||||
perspective.
|
||||
|
||||
Metric collection is performed via a single-threaded call to Collect that
|
||||
sweeps through all records in the SDK, checkpointing their state. When a
|
||||
record is discovered that has no references and has not been updated since
|
||||
the prior collection pass, it is removed from the Map.
|
||||
|
||||
Both synchronous and asynchronous instruments have an associated
|
||||
aggregator, which maintains the current state resulting from all metric
|
||||
events since its last checkpoint. Aggregators may be lock-free or they may
|
||||
use locking, but they should expect to be called concurrently. Aggregators
|
||||
must be capable of merging with another aggregator of the same type.
|
||||
|
||||
Export Pipeline
|
||||
|
||||
While the SDK serves to maintain a current set of records and
|
||||
coordinate collection, the behavior of a metrics export pipeline is
|
||||
configured through the export types in
|
||||
go.opentelemetry.io/otel/sdk/metric/export. It is important to keep
|
||||
in mind the context these interfaces are called from. There are two
|
||||
contexts, instrumentation context, where a user-level goroutine that
|
||||
enters the SDK resulting in a new record, and collection context,
|
||||
where a system-level thread performs a collection pass through the
|
||||
SDK.
|
||||
|
||||
Descriptor is a struct that describes the metric instrument to the
|
||||
export pipeline, containing the name, units, description, metric kind,
|
||||
number kind (int64 or float64). A Descriptor accompanies metric data
|
||||
as it passes through the export pipeline.
|
||||
|
||||
The AggregatorSelector interface supports choosing the method of
|
||||
aggregation to apply to a particular instrument, by delegating the
|
||||
construction of an Aggregator to this interface. Given the Descriptor,
|
||||
the AggregatorFor method returns an implementation of Aggregator. If this
|
||||
interface returns nil, the metric will be disabled. The aggregator should
|
||||
be matched to the capabilities of the exporter. Selecting the aggregator
|
||||
for Adding instruments is relatively straightforward, but many options
|
||||
are available for aggregating distributions from Grouping instruments.
|
||||
|
||||
Aggregator is an interface which implements a concrete strategy for
|
||||
aggregating metric updates. Several Aggregator implementations are
|
||||
provided by the SDK. Aggregators may be lock-free or use locking,
|
||||
depending on their structure and semantics. Aggregators implement an
|
||||
Update method, called in instrumentation context, to receive a single
|
||||
metric event. Aggregators implement a Checkpoint method, called in
|
||||
collection context, to save a checkpoint of the current state.
|
||||
Aggregators implement a Merge method, also called in collection
|
||||
context, that combines state from two aggregators into one. Each SDK
|
||||
record has an associated aggregator.
|
||||
|
||||
Processor is an interface which sits between the SDK and an exporter.
|
||||
The Processor embeds an AggregatorSelector, used by the SDK to assign
|
||||
new Aggregators. The Processor supports a Process() API for submitting
|
||||
checkpointed aggregators to the processor, and a Reader() API
|
||||
for producing a complete checkpoint for the exporter. Two default
|
||||
Processor implementations are provided, the "defaultkeys" Processor groups
|
||||
aggregate metrics by their recommended Descriptor.Keys(), the
|
||||
"simple" Processor aggregates metrics at full dimensionality.
|
||||
|
||||
Reader is an interface between the Processor and the Exporter.
|
||||
After completing a collection pass, the Processor.Reader() method
|
||||
returns a Reader, which the Exporter uses to iterate over all
|
||||
the updated metrics.
|
||||
|
||||
Record is a struct containing the state of an individual exported
|
||||
metric. This is the result of one collection interface for one
|
||||
instrument and one attribute set.
|
||||
|
||||
Exporter is the final stage of an export pipeline. It is called with
|
||||
a Reader capable of enumerating all the updated metrics.
|
||||
|
||||
Controller is not an export interface per se, but it orchestrates the
|
||||
export pipeline. For example, a "push" controller will establish a
|
||||
periodic timer to regularly collect and export metrics. A "pull"
|
||||
controller will await a pull request before initiating metric
|
||||
collection. Either way, the job of the controller is to call the SDK
|
||||
Collect() method, then read the checkpoint, then invoke the exporter.
|
||||
Controllers are expected to implement the public metric.MeterProvider
|
||||
API, meaning they can be installed as the global Meter provider.
|
||||
|
||||
*/
|
||||
// Package metric provides an implementation of the OpenTelemetry metric SDK.
|
||||
//
|
||||
// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information
|
||||
// about the concept of OpenTelemetry metrics and
|
||||
// https://opentelemetry.io/docs/concepts/components/ for more information
|
||||
// about OpenTelemetry SDKs.
|
||||
//
|
||||
// The entry point for the metric package is the MeterProvider. It is the
|
||||
// object that all API calls use to create Meters, instruments, and ultimately
|
||||
// make metric measurements. Also, it is an object that should be used to
|
||||
// control the life-cycle (start, flush, and shutdown) of the SDK.
|
||||
//
|
||||
// A MeterProvider needs to be configured to export the measured data, this is
|
||||
// done by configuring it with a Reader implementation (using the WithReader
|
||||
// MeterProviderOption). Readers take two forms: ones that push to an endpoint
|
||||
// (NewPeriodicReader), and ones that an endpoint pulls from. See the
|
||||
// go.opentelemetry.io/otel/exporters package for exporters that can be used as
|
||||
// or with these Readers.
|
||||
//
|
||||
// Each Reader, when registered with the MeterProvider, can be augmented with a
|
||||
// View. Views allow users that run OpenTelemetry instrumented code to modify
|
||||
// the generated data of that instrumentation. See the
|
||||
// go.opentelemetry.io/otel/sdk/metric/view package for more information about
|
||||
// Views.
|
||||
//
|
||||
// The data generated by a MeterProvider needs to include information about its
|
||||
// origin. A MeterProvider needs to be configured with a Resource, using the
|
||||
// WithResource MeterProviderOption, to include this information. This Resource
|
||||
// should be used to describe the unique runtime environment instrumented code
|
||||
// is being run on. That way when multiple instances of the code are collected
|
||||
// at a single endpoint their origin is decipherable.
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
119
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/aggregation.go
generated
vendored
119
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/aggregation.go
generated
vendored
@ -1,119 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
)
|
||||
|
||||
// These interfaces describe the various ways to access state from an
|
||||
// Aggregation.
|
||||
|
||||
type (
|
||||
// Aggregation is an interface returned by the Aggregator
|
||||
// containing an interval of metric data.
|
||||
Aggregation interface {
|
||||
// Kind returns a short identifying string to identify
|
||||
// the Aggregator that was used to produce the
|
||||
// Aggregation (e.g., "Sum").
|
||||
Kind() Kind
|
||||
}
|
||||
|
||||
// Sum returns an aggregated sum.
|
||||
Sum interface {
|
||||
Aggregation
|
||||
Sum() (number.Number, error)
|
||||
}
|
||||
|
||||
// Count returns the number of values that were aggregated.
|
||||
Count interface {
|
||||
Aggregation
|
||||
Count() (uint64, error)
|
||||
}
|
||||
|
||||
// LastValue returns the latest value that was aggregated.
|
||||
LastValue interface {
|
||||
Aggregation
|
||||
LastValue() (number.Number, time.Time, error)
|
||||
}
|
||||
|
||||
// Buckets represents histogram buckets boundaries and counts.
|
||||
//
|
||||
// For a Histogram with N defined boundaries, e.g, [x, y, z].
|
||||
// There are N+1 counts: [-inf, x), [x, y), [y, z), [z, +inf].
|
||||
Buckets struct {
|
||||
// Boundaries are floating point numbers, even when
|
||||
// aggregating integers.
|
||||
Boundaries []float64
|
||||
|
||||
// Counts holds the count in each bucket.
|
||||
Counts []uint64
|
||||
}
|
||||
|
||||
// Histogram returns the count of events in pre-determined buckets.
|
||||
Histogram interface {
|
||||
Aggregation
|
||||
Count() (uint64, error)
|
||||
Sum() (number.Number, error)
|
||||
Histogram() (Buckets, error)
|
||||
}
|
||||
)
|
||||
|
||||
type (
|
||||
// Kind is a short name for the Aggregator that produces an
|
||||
// Aggregation, used for descriptive purpose only. Kind is a
|
||||
// string to allow user-defined Aggregators.
|
||||
//
|
||||
// When deciding how to handle an Aggregation, Exporters are
|
||||
// encouraged to decide based on conversion to the above
|
||||
// interfaces based on strength, not on Kind value, when
|
||||
// deciding how to expose metric data. This enables
|
||||
// user-supplied Aggregators to replace builtin Aggregators.
|
||||
//
|
||||
// For example, test for a Histogram before testing for a
|
||||
// Sum, and so on.
|
||||
Kind string
|
||||
)
|
||||
|
||||
// Kind description constants.
|
||||
const (
|
||||
SumKind Kind = "Sum"
|
||||
HistogramKind Kind = "Histogram"
|
||||
LastValueKind Kind = "Lastvalue"
|
||||
)
|
||||
|
||||
// Sentinel errors for Aggregation interface.
|
||||
var (
|
||||
ErrNegativeInput = fmt.Errorf("negative value is out of range for this instrument")
|
||||
ErrNaNInput = fmt.Errorf("invalid input value: NaN")
|
||||
ErrInconsistentType = fmt.Errorf("inconsistent aggregator types")
|
||||
|
||||
// ErrNoCumulativeToDelta is returned when requesting delta
|
||||
// export kind for a precomputed sum instrument.
|
||||
ErrNoCumulativeToDelta = fmt.Errorf("cumulative to delta not implemented")
|
||||
|
||||
// ErrNoData is returned when (due to a race with collection)
|
||||
// the Aggregator is check-pointed before the first value is set.
|
||||
// The aggregator should simply be skipped in this case.
|
||||
ErrNoData = fmt.Errorf("no data collected by this aggregator")
|
||||
)
|
||||
|
||||
// String returns the string value of Kind.
|
||||
func (k Kind) String() string {
|
||||
return string(k)
|
||||
}
|
117
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/temporality.go
generated
vendored
117
vendor/go.opentelemetry.io/otel/sdk/metric/export/aggregation/temporality.go
generated
vendored
@ -1,117 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate stringer -type=Temporality
|
||||
|
||||
package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
// Temporality indicates the temporal aggregation exported by an exporter.
|
||||
// These bits may be OR-d together when multiple exporters are in use.
|
||||
type Temporality uint8
|
||||
|
||||
const (
|
||||
// CumulativeTemporality indicates that an Exporter expects a
|
||||
// Cumulative Aggregation.
|
||||
CumulativeTemporality Temporality = 1
|
||||
|
||||
// DeltaTemporality indicates that an Exporter expects a
|
||||
// Delta Aggregation.
|
||||
DeltaTemporality Temporality = 2
|
||||
)
|
||||
|
||||
// Includes returns if t includes support for other temporality.
|
||||
func (t Temporality) Includes(other Temporality) bool {
|
||||
return t&other != 0
|
||||
}
|
||||
|
||||
// MemoryRequired returns whether an exporter of this temporality requires
|
||||
// memory to export correctly.
|
||||
func (t Temporality) MemoryRequired(mkind sdkapi.InstrumentKind) bool {
|
||||
switch mkind {
|
||||
case sdkapi.HistogramInstrumentKind, sdkapi.GaugeObserverInstrumentKind,
|
||||
sdkapi.CounterInstrumentKind, sdkapi.UpDownCounterInstrumentKind:
|
||||
// Delta-oriented instruments:
|
||||
return t.Includes(CumulativeTemporality)
|
||||
|
||||
case sdkapi.CounterObserverInstrumentKind, sdkapi.UpDownCounterObserverInstrumentKind:
|
||||
// Cumulative-oriented instruments:
|
||||
return t.Includes(DeltaTemporality)
|
||||
}
|
||||
// Something unexpected is happening--we could panic. This
|
||||
// will become an error when the exporter tries to access a
|
||||
// checkpoint, presumably, so let it be.
|
||||
return false
|
||||
}
|
||||
|
||||
type (
|
||||
constantTemporalitySelector Temporality
|
||||
statelessTemporalitySelector struct{}
|
||||
)
|
||||
|
||||
var (
|
||||
_ TemporalitySelector = constantTemporalitySelector(0)
|
||||
_ TemporalitySelector = statelessTemporalitySelector{}
|
||||
)
|
||||
|
||||
// ConstantTemporalitySelector returns an TemporalitySelector that returns
|
||||
// a constant Temporality.
|
||||
func ConstantTemporalitySelector(t Temporality) TemporalitySelector {
|
||||
return constantTemporalitySelector(t)
|
||||
}
|
||||
|
||||
// CumulativeTemporalitySelector returns an TemporalitySelector that
|
||||
// always returns CumulativeTemporality.
|
||||
func CumulativeTemporalitySelector() TemporalitySelector {
|
||||
return ConstantTemporalitySelector(CumulativeTemporality)
|
||||
}
|
||||
|
||||
// DeltaTemporalitySelector returns an TemporalitySelector that
|
||||
// always returns DeltaTemporality.
|
||||
func DeltaTemporalitySelector() TemporalitySelector {
|
||||
return ConstantTemporalitySelector(DeltaTemporality)
|
||||
}
|
||||
|
||||
// StatelessTemporalitySelector returns an TemporalitySelector that
|
||||
// always returns the Temporality that avoids long-term memory
|
||||
// requirements.
|
||||
func StatelessTemporalitySelector() TemporalitySelector {
|
||||
return statelessTemporalitySelector{}
|
||||
}
|
||||
|
||||
// TemporalityFor implements TemporalitySelector.
|
||||
func (c constantTemporalitySelector) TemporalityFor(_ *sdkapi.Descriptor, _ Kind) Temporality {
|
||||
return Temporality(c)
|
||||
}
|
||||
|
||||
// TemporalityFor implements TemporalitySelector.
|
||||
func (s statelessTemporalitySelector) TemporalityFor(desc *sdkapi.Descriptor, kind Kind) Temporality {
|
||||
if kind == SumKind && desc.InstrumentKind().PrecomputedSum() {
|
||||
return CumulativeTemporality
|
||||
}
|
||||
return DeltaTemporality
|
||||
}
|
||||
|
||||
// TemporalitySelector is a sub-interface of Exporter used to indicate
|
||||
// whether the Processor should compute Delta or Cumulative
|
||||
// Aggregations.
|
||||
type TemporalitySelector interface {
|
||||
// TemporalityFor should return the correct Temporality that
|
||||
// should be used when exporting data for the given metric
|
||||
// instrument and Aggregator kind.
|
||||
TemporalityFor(descriptor *sdkapi.Descriptor, aggregationKind Kind) Temporality
|
||||
}
|
280
vendor/go.opentelemetry.io/otel/sdk/metric/export/metric.go
generated
vendored
280
vendor/go.opentelemetry.io/otel/sdk/metric/export/metric.go
generated
vendored
@ -1,280 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package export // import "go.opentelemetry.io/otel/sdk/metric/export"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// Processor is responsible for deciding which kind of aggregation to
|
||||
// use (via AggregatorSelector), gathering exported results from the
|
||||
// SDK during collection, and deciding over which dimensions to group
|
||||
// the exported data.
|
||||
//
|
||||
// The SDK supports binding only one of these interfaces, as it has
|
||||
// the sole responsibility of determining which Aggregator to use for
|
||||
// each record.
|
||||
//
|
||||
// The embedded AggregatorSelector interface is called (concurrently)
|
||||
// in instrumentation context to select the appropriate Aggregator for
|
||||
// an instrument.
|
||||
//
|
||||
// The `Process` method is called during collection in a
|
||||
// single-threaded context from the SDK, after the aggregator is
|
||||
// checkpointed, allowing the processor to build the set of metrics
|
||||
// currently being exported.
|
||||
type Processor interface {
|
||||
// AggregatorSelector is responsible for selecting the
|
||||
// concrete type of Aggregator used for a metric in the SDK.
|
||||
//
|
||||
// This may be a static decision based on fields of the
|
||||
// Descriptor, or it could use an external configuration
|
||||
// source to customize the treatment of each metric
|
||||
// instrument.
|
||||
//
|
||||
// The result from AggregatorSelector.AggregatorFor should be
|
||||
// the same type for a given Descriptor or else nil. The same
|
||||
// type should be returned for a given descriptor, because
|
||||
// Aggregators only know how to Merge with their own type. If
|
||||
// the result is nil, the metric instrument will be disabled.
|
||||
//
|
||||
// Note that the SDK only calls AggregatorFor when new records
|
||||
// require an Aggregator. This does not provide a way to
|
||||
// disable metrics with active records.
|
||||
AggregatorSelector
|
||||
|
||||
// Process is called by the SDK once per internal record, passing the
|
||||
// export Accumulation (a Descriptor, the corresponding attributes, and
|
||||
// the checkpointed Aggregator). This call has no Context argument because
|
||||
// it is expected to perform only computation. An SDK is not expected to
|
||||
// call exporters from with Process, use a controller for that (see
|
||||
// ./controllers/{pull,push}.
|
||||
Process(accum Accumulation) error
|
||||
}
|
||||
|
||||
// AggregatorSelector supports selecting the kind of Aggregator to
|
||||
// use at runtime for a specific metric instrument.
|
||||
type AggregatorSelector interface {
|
||||
// AggregatorFor allocates a variable number of aggregators of
|
||||
// a kind suitable for the requested export. This method
|
||||
// initializes a `...*Aggregator`, to support making a single
|
||||
// allocation.
|
||||
//
|
||||
// When the call returns without initializing the *Aggregator
|
||||
// to a non-nil value, the metric instrument is explicitly
|
||||
// disabled.
|
||||
//
|
||||
// This must return a consistent type to avoid confusion in
|
||||
// later stages of the metrics export process, i.e., when
|
||||
// Merging or Checkpointing aggregators for a specific
|
||||
// instrument.
|
||||
//
|
||||
// Note: This is context-free because the aggregator should
|
||||
// not relate to the incoming context. This call should not
|
||||
// block.
|
||||
AggregatorFor(descriptor *sdkapi.Descriptor, agg ...*aggregator.Aggregator)
|
||||
}
|
||||
|
||||
// Checkpointer is the interface used by a Controller to coordinate
|
||||
// the Processor with Accumulator(s) and Exporter(s). The
|
||||
// StartCollection() and FinishCollection() methods start and finish a
|
||||
// collection interval. Controllers call the Accumulator(s) during
|
||||
// collection to process Accumulations.
|
||||
type Checkpointer interface {
|
||||
// Processor processes metric data for export. The Process
|
||||
// method is bracketed by StartCollection and FinishCollection
|
||||
// calls. The embedded AggregatorSelector can be called at
|
||||
// any time.
|
||||
Processor
|
||||
|
||||
// Reader returns the current data set. This may be
|
||||
// called before and after collection. The
|
||||
// implementation is required to return the same value
|
||||
// throughout its lifetime, since Reader exposes a
|
||||
// sync.Locker interface. The caller is responsible for
|
||||
// locking the Reader before initiating collection.
|
||||
Reader() Reader
|
||||
|
||||
// StartCollection begins a collection interval.
|
||||
StartCollection()
|
||||
|
||||
// FinishCollection ends a collection interval.
|
||||
FinishCollection() error
|
||||
}
|
||||
|
||||
// CheckpointerFactory is an interface for producing configured
|
||||
// Checkpointer instances.
|
||||
type CheckpointerFactory interface {
|
||||
NewCheckpointer() Checkpointer
|
||||
}
|
||||
|
||||
// Exporter handles presentation of the checkpoint of aggregate
|
||||
// metrics. This is the final stage of a metrics export pipeline,
|
||||
// where metric data are formatted for a specific system.
|
||||
type Exporter interface {
|
||||
// Export is called immediately after completing a collection
|
||||
// pass in the SDK.
|
||||
//
|
||||
// The Context comes from the controller that initiated
|
||||
// collection.
|
||||
//
|
||||
// The InstrumentationLibraryReader interface refers to the
|
||||
// Processor that just completed collection.
|
||||
Export(ctx context.Context, res *resource.Resource, reader InstrumentationLibraryReader) error
|
||||
|
||||
// TemporalitySelector is an interface used by the Processor
|
||||
// in deciding whether to compute Delta or Cumulative
|
||||
// Aggregations when passing Records to this Exporter.
|
||||
aggregation.TemporalitySelector
|
||||
}
|
||||
|
||||
// InstrumentationLibraryReader is an interface for exporters to iterate
|
||||
// over one instrumentation library of metric data at a time.
|
||||
type InstrumentationLibraryReader interface {
|
||||
// ForEach calls the passed function once per instrumentation library,
|
||||
// allowing the caller to emit metrics grouped by the library that
|
||||
// produced them.
|
||||
ForEach(readerFunc func(instrumentation.Library, Reader) error) error
|
||||
}
|
||||
|
||||
// Reader allows a controller to access a complete checkpoint of
|
||||
// aggregated metrics from the Processor for a single library of
|
||||
// metric data. This is passed to the Exporter which may then use
|
||||
// ForEach to iterate over the collection of aggregated metrics.
|
||||
type Reader interface {
|
||||
// ForEach iterates over aggregated checkpoints for all
|
||||
// metrics that were updated during the last collection
|
||||
// period. Each aggregated checkpoint returned by the
|
||||
// function parameter may return an error.
|
||||
//
|
||||
// The TemporalitySelector argument is used to determine
|
||||
// whether the Record is computed using Delta or Cumulative
|
||||
// aggregation.
|
||||
//
|
||||
// ForEach tolerates ErrNoData silently, as this is
|
||||
// expected from the Meter implementation. Any other kind
|
||||
// of error will immediately halt ForEach and return
|
||||
// the error to the caller.
|
||||
ForEach(tempSelector aggregation.TemporalitySelector, recordFunc func(Record) error) error
|
||||
|
||||
// Locker supports locking the checkpoint set. Collection
|
||||
// into the checkpoint set cannot take place (in case of a
|
||||
// stateful processor) while it is locked.
|
||||
//
|
||||
// The Processor attached to the Accumulator MUST be called
|
||||
// with the lock held.
|
||||
sync.Locker
|
||||
|
||||
// RLock acquires a read lock corresponding to this Locker.
|
||||
RLock()
|
||||
// RUnlock releases a read lock corresponding to this Locker.
|
||||
RUnlock()
|
||||
}
|
||||
|
||||
// Metadata contains the common elements for exported metric data that
|
||||
// are shared by the Accumulator->Processor and Processor->Exporter
|
||||
// steps.
|
||||
type Metadata struct {
|
||||
descriptor *sdkapi.Descriptor
|
||||
attrs *attribute.Set
|
||||
}
|
||||
|
||||
// Accumulation contains the exported data for a single metric instrument
|
||||
// and attribute set, as prepared by an Accumulator for the Processor.
|
||||
type Accumulation struct {
|
||||
Metadata
|
||||
aggregator aggregator.Aggregator
|
||||
}
|
||||
|
||||
// Record contains the exported data for a single metric instrument
|
||||
// and attribute set, as prepared by the Processor for the Exporter.
|
||||
// This includes the effective start and end time for the aggregation.
|
||||
type Record struct {
|
||||
Metadata
|
||||
aggregation aggregation.Aggregation
|
||||
start time.Time
|
||||
end time.Time
|
||||
}
|
||||
|
||||
// Descriptor describes the metric instrument being exported.
|
||||
func (m Metadata) Descriptor() *sdkapi.Descriptor {
|
||||
return m.descriptor
|
||||
}
|
||||
|
||||
// Attributes returns the attribute set associated with the instrument and the
|
||||
// aggregated data.
|
||||
func (m Metadata) Attributes() *attribute.Set {
|
||||
return m.attrs
|
||||
}
|
||||
|
||||
// NewAccumulation allows Accumulator implementations to construct new
|
||||
// Accumulations to send to Processors. The Descriptor, attributes, and
|
||||
// Aggregator represent aggregate metric events received over a single
|
||||
// collection period.
|
||||
func NewAccumulation(descriptor *sdkapi.Descriptor, attrs *attribute.Set, agg aggregator.Aggregator) Accumulation {
|
||||
return Accumulation{
|
||||
Metadata: Metadata{
|
||||
descriptor: descriptor,
|
||||
attrs: attrs,
|
||||
},
|
||||
aggregator: agg,
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregator returns the checkpointed aggregator. It is safe to
|
||||
// access the checkpointed state without locking.
|
||||
func (r Accumulation) Aggregator() aggregator.Aggregator {
|
||||
return r.aggregator
|
||||
}
|
||||
|
||||
// NewRecord allows Processor implementations to construct export records.
|
||||
// The Descriptor, attributes, and Aggregator represent aggregate metric
|
||||
// events received over a single collection period.
|
||||
func NewRecord(descriptor *sdkapi.Descriptor, attrs *attribute.Set, agg aggregation.Aggregation, start, end time.Time) Record {
|
||||
return Record{
|
||||
Metadata: Metadata{
|
||||
descriptor: descriptor,
|
||||
attrs: attrs,
|
||||
},
|
||||
aggregation: agg,
|
||||
start: start,
|
||||
end: end,
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregation returns the aggregation, an interface to the record and
|
||||
// its aggregator, dependent on the kind of both the input and exporter.
|
||||
func (r Record) Aggregation() aggregation.Aggregation {
|
||||
return r.aggregation
|
||||
}
|
||||
|
||||
// StartTime is the start time of the interval covered by this aggregation.
|
||||
func (r Record) StartTime() time.Time {
|
||||
return r.start
|
||||
}
|
||||
|
||||
// EndTime is the end time of the interval covered by this aggregation.
|
||||
func (r Record) EndTime() time.Time {
|
||||
return r.end
|
||||
}
|
58
vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
generated
vendored
Normal file
58
vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// ErrExporterShutdown is returned if Export or Shutdown are called after an
|
||||
// Exporter has been Shutdown.
|
||||
var ErrExporterShutdown = fmt.Errorf("exporter is shutdown")
|
||||
|
||||
// Exporter handles the delivery of metric data to external receivers. This is
|
||||
// the final component in the metric push pipeline.
|
||||
type Exporter interface {
|
||||
// Export serializes and transmits metric data to a receiver.
|
||||
//
|
||||
// This is called synchronously, there is no concurrency safety
|
||||
// requirement. Because of this, it is critical that all timeouts and
|
||||
// cancellations of the passed context be honored.
|
||||
//
|
||||
// All retry logic must be contained in this function. The SDK does not
|
||||
// implement any retry logic. All errors returned by this function are
|
||||
// considered unrecoverable and will be reported to a configured error
|
||||
// Handler.
|
||||
Export(context.Context, metricdata.ResourceMetrics) error
|
||||
|
||||
// ForceFlush flushes any metric data held by an exporter.
|
||||
//
|
||||
// The deadline or cancellation of the passed context must be honored. An
|
||||
// appropriate error should be returned in these situations.
|
||||
ForceFlush(context.Context) error
|
||||
|
||||
// Shutdown flushes all metric data held by an exporter and releases any
|
||||
// held computational resources.
|
||||
//
|
||||
// The deadline or cancellation of the passed context must be honored. An
|
||||
// appropriate error should be returned in these situations.
|
||||
//
|
||||
// After Shutdown is called, calls to Export will perform no operation and
|
||||
// instead will return an error indicating the shutdown state.
|
||||
Shutdown(context.Context) error
|
||||
}
|
96
vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
generated
vendored
Normal file
96
vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncint64"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// instrumentID are the identifying properties of an instrument.
|
||||
type instrumentID struct {
|
||||
// Name is the name of the instrument.
|
||||
Name string
|
||||
// Description is the description of the instrument.
|
||||
Description string
|
||||
// Unit is the unit of the instrument.
|
||||
Unit unit.Unit
|
||||
// Aggregation is the aggregation data type of the instrument.
|
||||
Aggregation string
|
||||
// Monotonic is the monotonicity of an instruments data type. This field is
|
||||
// not used for all data types, so a zero value needs to be understood in the
|
||||
// context of Aggregation.
|
||||
Monotonic bool
|
||||
// Temporality is the temporality of an instrument's data type. This field
|
||||
// is not used by some data types.
|
||||
Temporality metricdata.Temporality
|
||||
// Number is the number type of the instrument.
|
||||
Number string
|
||||
}
|
||||
|
||||
type instrumentImpl[N int64 | float64] struct {
|
||||
instrument.Asynchronous
|
||||
instrument.Synchronous
|
||||
|
||||
aggregators []internal.Aggregator[N]
|
||||
}
|
||||
|
||||
var _ asyncfloat64.Counter = &instrumentImpl[float64]{}
|
||||
var _ asyncfloat64.UpDownCounter = &instrumentImpl[float64]{}
|
||||
var _ asyncfloat64.Gauge = &instrumentImpl[float64]{}
|
||||
var _ asyncint64.Counter = &instrumentImpl[int64]{}
|
||||
var _ asyncint64.UpDownCounter = &instrumentImpl[int64]{}
|
||||
var _ asyncint64.Gauge = &instrumentImpl[int64]{}
|
||||
var _ syncfloat64.Counter = &instrumentImpl[float64]{}
|
||||
var _ syncfloat64.UpDownCounter = &instrumentImpl[float64]{}
|
||||
var _ syncfloat64.Histogram = &instrumentImpl[float64]{}
|
||||
var _ syncint64.Counter = &instrumentImpl[int64]{}
|
||||
var _ syncint64.UpDownCounter = &instrumentImpl[int64]{}
|
||||
var _ syncint64.Histogram = &instrumentImpl[int64]{}
|
||||
|
||||
func (i *instrumentImpl[N]) Observe(ctx context.Context, val N, attrs ...attribute.KeyValue) {
|
||||
// Only record a value if this is being called from the MetricProvider.
|
||||
_, ok := ctx.Value(produceKey).(struct{})
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
i.aggregate(ctx, val, attrs)
|
||||
}
|
||||
|
||||
func (i *instrumentImpl[N]) Add(ctx context.Context, val N, attrs ...attribute.KeyValue) {
|
||||
i.aggregate(ctx, val, attrs)
|
||||
}
|
||||
|
||||
func (i *instrumentImpl[N]) Record(ctx context.Context, val N, attrs ...attribute.KeyValue) {
|
||||
i.aggregate(ctx, val, attrs)
|
||||
}
|
||||
|
||||
func (i *instrumentImpl[N]) aggregate(ctx context.Context, val N, attrs []attribute.KeyValue) {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
for _, agg := range i.aggregators {
|
||||
agg.Aggregate(val, attribute.NewSet(attrs...))
|
||||
}
|
||||
}
|
272
vendor/go.opentelemetry.io/otel/sdk/metric/instrument_provider.go
generated
vendored
Normal file
272
vendor/go.opentelemetry.io/otel/sdk/metric/instrument_provider.go
generated
vendored
Normal file
@ -0,0 +1,272 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncint64"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/view"
|
||||
)
|
||||
|
||||
type asyncInt64Provider struct {
|
||||
scope instrumentation.Scope
|
||||
resolve *resolver[int64]
|
||||
}
|
||||
|
||||
var _ asyncint64.InstrumentProvider = asyncInt64Provider{}
|
||||
|
||||
// Counter creates an instrument for recording increasing values.
|
||||
func (p asyncInt64Provider) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.AsyncCounter,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
|
||||
return &instrumentImpl[int64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
// UpDownCounter creates an instrument for recording changes of a value.
|
||||
func (p asyncInt64Provider) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.AsyncUpDownCounter,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[int64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
// Gauge creates an instrument for recording the current value.
|
||||
func (p asyncInt64Provider) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.AsyncGauge,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[int64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
type asyncFloat64Provider struct {
|
||||
scope instrumentation.Scope
|
||||
resolve *resolver[float64]
|
||||
}
|
||||
|
||||
var _ asyncfloat64.InstrumentProvider = asyncFloat64Provider{}
|
||||
|
||||
// Counter creates an instrument for recording increasing values.
|
||||
func (p asyncFloat64Provider) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.AsyncCounter,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[float64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
// UpDownCounter creates an instrument for recording changes of a value.
|
||||
func (p asyncFloat64Provider) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.AsyncUpDownCounter,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[float64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
// Gauge creates an instrument for recording the current value.
|
||||
func (p asyncFloat64Provider) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.AsyncGauge,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[float64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
type syncInt64Provider struct {
|
||||
scope instrumentation.Scope
|
||||
resolve *resolver[int64]
|
||||
}
|
||||
|
||||
var _ syncint64.InstrumentProvider = syncInt64Provider{}
|
||||
|
||||
// Counter creates an instrument for recording increasing values.
|
||||
func (p syncInt64Provider) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.SyncCounter,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[int64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
// UpDownCounter creates an instrument for recording changes of a value.
|
||||
func (p syncInt64Provider) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.SyncUpDownCounter,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[int64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
// Histogram creates an instrument for recording the current value.
|
||||
func (p syncInt64Provider) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.SyncHistogram,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[int64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
type syncFloat64Provider struct {
|
||||
scope instrumentation.Scope
|
||||
resolve *resolver[float64]
|
||||
}
|
||||
|
||||
var _ syncfloat64.InstrumentProvider = syncFloat64Provider{}
|
||||
|
||||
// Counter creates an instrument for recording increasing values.
|
||||
func (p syncFloat64Provider) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.SyncCounter,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[float64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
// UpDownCounter creates an instrument for recording changes of a value.
|
||||
func (p syncFloat64Provider) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.SyncUpDownCounter,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[float64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
||||
|
||||
// Histogram creates an instrument for recording the current value.
|
||||
func (p syncFloat64Provider) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
|
||||
aggs, err := p.resolve.Aggregators(view.Instrument{
|
||||
Scope: p.scope,
|
||||
Name: name,
|
||||
Description: cfg.Description(),
|
||||
Kind: view.SyncHistogram,
|
||||
}, cfg.Unit())
|
||||
if len(aggs) == 0 && err != nil {
|
||||
err = fmt.Errorf("instrument does not match any view: %w", err)
|
||||
}
|
||||
return &instrumentImpl[float64]{
|
||||
aggregators: aggs,
|
||||
}, err
|
||||
}
|
40
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregator.go
generated
vendored
Normal file
40
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregator.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// now is used to return the current local time while allowing tests to
|
||||
// override the the default time.Now function.
|
||||
var now = time.Now
|
||||
|
||||
// Aggregator forms an aggregation from a collection of recorded measurements.
|
||||
//
|
||||
// Aggregators need to be comparable so they can be de-duplicated by the SDK when
|
||||
// it creates them for multiple views.
|
||||
type Aggregator[N int64 | float64] interface {
|
||||
// Aggregate records the measurement, scoped by attr, and aggregates it
|
||||
// into an aggregation.
|
||||
Aggregate(measurement N, attr attribute.Set)
|
||||
|
||||
// Aggregation returns an Aggregation, for all the aggregated
|
||||
// measurements made and ends an aggregation cycle.
|
||||
Aggregation() metricdata.Aggregation
|
||||
}
|
@ -12,12 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package number provides a number abstraction for instruments that
|
||||
either support int64 or float64 input values.
|
||||
|
||||
This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
may be introduced in subsequent minor version releases as we work to track the
|
||||
evolving OpenTelemetry specification and user feedback.
|
||||
*/
|
||||
package number // import "go.opentelemetry.io/otel/sdk/metric/number"
|
||||
// Package internal provides types and functionality used to aggregate and
|
||||
// cycle the state of metric measurements made by the SDK. These types and
|
||||
// functionality are meant only for internal SDK use.
|
||||
package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
64
vendor/go.opentelemetry.io/otel/sdk/metric/internal/filter.go
generated
vendored
Normal file
64
vendor/go.opentelemetry.io/otel/sdk/metric/internal/filter.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// filter is an aggregator that applies attribute filter when Aggregating. filters
|
||||
// do not have any backing memory, and must be constructed with a backing Aggregator.
|
||||
type filter[N int64 | float64] struct {
|
||||
filter func(attribute.Set) attribute.Set
|
||||
aggregator Aggregator[N]
|
||||
|
||||
sync.Mutex
|
||||
seen map[attribute.Set]attribute.Set
|
||||
}
|
||||
|
||||
// NewFilter wraps an Aggregator with an attribute filtering function.
|
||||
func NewFilter[N int64 | float64](agg Aggregator[N], fn func(attribute.Set) attribute.Set) Aggregator[N] {
|
||||
if fn == nil {
|
||||
return agg
|
||||
}
|
||||
return &filter[N]{
|
||||
filter: fn,
|
||||
aggregator: agg,
|
||||
seen: map[attribute.Set]attribute.Set{},
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate records the measurement, scoped by attr, and aggregates it
|
||||
// into an aggregation.
|
||||
func (f *filter[N]) Aggregate(measurement N, attr attribute.Set) {
|
||||
// TODO (#3006): drop stale attributes from seen.
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
fAttr, ok := f.seen[attr]
|
||||
if !ok {
|
||||
fAttr = f.filter(attr)
|
||||
f.seen[attr] = fAttr
|
||||
}
|
||||
f.aggregator.Aggregate(measurement, fAttr)
|
||||
}
|
||||
|
||||
// Aggregation returns an Aggregation, for all the aggregated
|
||||
// measurements made and ends an aggregation cycle.
|
||||
func (f *filter[N]) Aggregation() metricdata.Aggregation {
|
||||
return f.aggregator.Aggregation()
|
||||
}
|
240
vendor/go.opentelemetry.io/otel/sdk/metric/internal/histogram.go
generated
vendored
Normal file
240
vendor/go.opentelemetry.io/otel/sdk/metric/internal/histogram.go
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
type buckets struct {
|
||||
counts []uint64
|
||||
count uint64
|
||||
sum float64
|
||||
min, max float64
|
||||
}
|
||||
|
||||
// newBuckets returns buckets with n bins.
|
||||
func newBuckets(n int) *buckets {
|
||||
return &buckets{counts: make([]uint64, n)}
|
||||
}
|
||||
|
||||
func (b *buckets) bin(idx int, value float64) {
|
||||
b.counts[idx]++
|
||||
b.count++
|
||||
b.sum += value
|
||||
if value < b.min {
|
||||
b.min = value
|
||||
} else if value > b.max {
|
||||
b.max = value
|
||||
}
|
||||
}
|
||||
|
||||
// histValues summarizes a set of measurements as an histValues with
|
||||
// explicitly defined buckets.
|
||||
type histValues[N int64 | float64] struct {
|
||||
bounds []float64
|
||||
|
||||
values map[attribute.Set]*buckets
|
||||
valuesMu sync.Mutex
|
||||
}
|
||||
|
||||
func newHistValues[N int64 | float64](bounds []float64) *histValues[N] {
|
||||
// The responsibility of keeping all buckets correctly associated with the
|
||||
// passed boundaries is ultimately this type's responsibility. Make a copy
|
||||
// here so we can always guarantee this. Or, in the case of failure, have
|
||||
// complete control over the fix.
|
||||
b := make([]float64, len(bounds))
|
||||
copy(b, bounds)
|
||||
sort.Float64s(b)
|
||||
return &histValues[N]{
|
||||
bounds: b,
|
||||
values: make(map[attribute.Set]*buckets),
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate records the measurement value, scoped by attr, and aggregates it
|
||||
// into a histogram.
|
||||
func (s *histValues[N]) Aggregate(value N, attr attribute.Set) {
|
||||
// Accept all types to satisfy the Aggregator interface. However, since
|
||||
// the Aggregation produced by this Aggregator is only float64, convert
|
||||
// here to only use this type.
|
||||
v := float64(value)
|
||||
|
||||
// This search will return an index in the range [0, len(s.bounds)], where
|
||||
// it will return len(s.bounds) if value is greater than the last element
|
||||
// of s.bounds. This aligns with the buckets in that the length of buckets
|
||||
// is len(s.bounds)+1, with the last bucket representing:
|
||||
// (s.bounds[len(s.bounds)-1], +∞).
|
||||
idx := sort.SearchFloat64s(s.bounds, v)
|
||||
|
||||
s.valuesMu.Lock()
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
b, ok := s.values[attr]
|
||||
if !ok {
|
||||
// N+1 buckets. For example:
|
||||
//
|
||||
// bounds = [0, 5, 10]
|
||||
//
|
||||
// Then,
|
||||
//
|
||||
// buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
|
||||
b = newBuckets(len(s.bounds) + 1)
|
||||
// Ensure min and max are recorded values (not zero), for new buckets.
|
||||
b.min, b.max = v, v
|
||||
s.values[attr] = b
|
||||
}
|
||||
b.bin(idx, v)
|
||||
}
|
||||
|
||||
// NewDeltaHistogram returns an Aggregator that summarizes a set of
|
||||
// measurements as an histogram. Each histogram is scoped by attributes and
|
||||
// the aggregation cycle the measurements were made in.
|
||||
//
|
||||
// Each aggregation cycle is treated independently. When the returned
|
||||
// Aggregator's Aggregations method is called it will reset all histogram
|
||||
// counts to zero.
|
||||
func NewDeltaHistogram[N int64 | float64](cfg aggregation.ExplicitBucketHistogram) Aggregator[N] {
|
||||
return &deltaHistogram[N]{
|
||||
histValues: newHistValues[N](cfg.Boundaries),
|
||||
noMinMax: cfg.NoMinMax,
|
||||
start: now(),
|
||||
}
|
||||
}
|
||||
|
||||
// deltaHistogram summarizes a set of measurements made in a single
|
||||
// aggregation cycle as an histogram with explicitly defined buckets.
|
||||
type deltaHistogram[N int64 | float64] struct {
|
||||
*histValues[N]
|
||||
|
||||
noMinMax bool
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (s *deltaHistogram[N]) Aggregation() metricdata.Aggregation {
|
||||
h := metricdata.Histogram{Temporality: metricdata.DeltaTemporality}
|
||||
|
||||
s.valuesMu.Lock()
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return h
|
||||
}
|
||||
|
||||
// Do not allow modification of our copy of bounds.
|
||||
bounds := make([]float64, len(s.bounds))
|
||||
copy(bounds, s.bounds)
|
||||
t := now()
|
||||
h.DataPoints = make([]metricdata.HistogramDataPoint, 0, len(s.values))
|
||||
for a, b := range s.values {
|
||||
hdp := metricdata.HistogramDataPoint{
|
||||
Attributes: a,
|
||||
StartTime: s.start,
|
||||
Time: t,
|
||||
Count: b.count,
|
||||
Bounds: bounds,
|
||||
BucketCounts: b.counts,
|
||||
Sum: b.sum,
|
||||
}
|
||||
if !s.noMinMax {
|
||||
hdp.Min = &b.min
|
||||
hdp.Max = &b.max
|
||||
}
|
||||
h.DataPoints = append(h.DataPoints, hdp)
|
||||
|
||||
// Unused attribute sets do not report.
|
||||
delete(s.values, a)
|
||||
}
|
||||
// The delta collection cycle resets.
|
||||
s.start = t
|
||||
return h
|
||||
}
|
||||
|
||||
// NewCumulativeHistogram returns an Aggregator that summarizes a set of
|
||||
// measurements as an histogram. Each histogram is scoped by attributes.
|
||||
//
|
||||
// Each aggregation cycle builds from the previous, the histogram counts are
|
||||
// the bucketed counts of all values aggregated since the returned Aggregator
|
||||
// was created.
|
||||
func NewCumulativeHistogram[N int64 | float64](cfg aggregation.ExplicitBucketHistogram) Aggregator[N] {
|
||||
return &cumulativeHistogram[N]{
|
||||
histValues: newHistValues[N](cfg.Boundaries),
|
||||
noMinMax: cfg.NoMinMax,
|
||||
start: now(),
|
||||
}
|
||||
}
|
||||
|
||||
// cumulativeHistogram summarizes a set of measurements made over all
|
||||
// aggregation cycles as an histogram with explicitly defined buckets.
|
||||
type cumulativeHistogram[N int64 | float64] struct {
|
||||
*histValues[N]
|
||||
|
||||
noMinMax bool
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (s *cumulativeHistogram[N]) Aggregation() metricdata.Aggregation {
|
||||
h := metricdata.Histogram{Temporality: metricdata.CumulativeTemporality}
|
||||
|
||||
s.valuesMu.Lock()
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return h
|
||||
}
|
||||
|
||||
// Do not allow modification of our copy of bounds.
|
||||
bounds := make([]float64, len(s.bounds))
|
||||
copy(bounds, s.bounds)
|
||||
t := now()
|
||||
h.DataPoints = make([]metricdata.HistogramDataPoint, 0, len(s.values))
|
||||
for a, b := range s.values {
|
||||
// The HistogramDataPoint field values returned need to be copies of
|
||||
// the buckets value as we will keep updating them.
|
||||
//
|
||||
// TODO (#3047): Making copies for bounds and counts incurs a large
|
||||
// memory allocation footprint. Alternatives should be explored.
|
||||
counts := make([]uint64, len(b.counts))
|
||||
copy(counts, b.counts)
|
||||
|
||||
hdp := metricdata.HistogramDataPoint{
|
||||
Attributes: a,
|
||||
StartTime: s.start,
|
||||
Time: t,
|
||||
Count: b.count,
|
||||
Bounds: bounds,
|
||||
BucketCounts: counts,
|
||||
Sum: b.sum,
|
||||
}
|
||||
if !s.noMinMax {
|
||||
// Similar to counts, make a copy.
|
||||
min, max := b.min, b.max
|
||||
hdp.Min = &min
|
||||
hdp.Max = &max
|
||||
}
|
||||
h.DataPoints = append(h.DataPoints, hdp)
|
||||
// TODO (#3006): This will use an unbounded amount of memory if there
|
||||
// are unbounded number of attribute sets being aggregated. Attribute
|
||||
// sets that become "stale" need to be forgotten so this will not
|
||||
// overload the system.
|
||||
}
|
||||
return h
|
||||
}
|
74
vendor/go.opentelemetry.io/otel/sdk/metric/internal/lastvalue.go
generated
vendored
Normal file
74
vendor/go.opentelemetry.io/otel/sdk/metric/internal/lastvalue.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// datapoint is timestamped measurement data.
|
||||
type datapoint[N int64 | float64] struct {
|
||||
timestamp time.Time
|
||||
value N
|
||||
}
|
||||
|
||||
// lastValue summarizes a set of measurements as the last one made.
|
||||
type lastValue[N int64 | float64] struct {
|
||||
sync.Mutex
|
||||
|
||||
values map[attribute.Set]datapoint[N]
|
||||
}
|
||||
|
||||
// NewLastValue returns an Aggregator that summarizes a set of measurements as
|
||||
// the last one made.
|
||||
func NewLastValue[N int64 | float64]() Aggregator[N] {
|
||||
return &lastValue[N]{values: make(map[attribute.Set]datapoint[N])}
|
||||
}
|
||||
|
||||
func (s *lastValue[N]) Aggregate(value N, attr attribute.Set) {
|
||||
d := datapoint[N]{timestamp: now(), value: value}
|
||||
s.Lock()
|
||||
s.values[attr] = d
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
func (s *lastValue[N]) Aggregation() metricdata.Aggregation {
|
||||
gauge := metricdata.Gauge[N]{}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return gauge
|
||||
}
|
||||
|
||||
gauge.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.values))
|
||||
for a, v := range s.values {
|
||||
gauge.DataPoints = append(gauge.DataPoints, metricdata.DataPoint[N]{
|
||||
Attributes: a,
|
||||
// The event time is the only meaningful timestamp, StartTime is
|
||||
// ignored.
|
||||
Time: v.timestamp,
|
||||
Value: v.value,
|
||||
})
|
||||
// Do not report stale values.
|
||||
delete(s.values, a)
|
||||
}
|
||||
return gauge
|
||||
}
|
211
vendor/go.opentelemetry.io/otel/sdk/metric/internal/sum.go
generated
vendored
Normal file
211
vendor/go.opentelemetry.io/otel/sdk/metric/internal/sum.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// valueMap is the storage for all sums.
|
||||
type valueMap[N int64 | float64] struct {
|
||||
sync.Mutex
|
||||
values map[attribute.Set]N
|
||||
}
|
||||
|
||||
func newValueMap[N int64 | float64]() *valueMap[N] {
|
||||
return &valueMap[N]{values: make(map[attribute.Set]N)}
|
||||
}
|
||||
|
||||
func (s *valueMap[N]) set(value N, attr attribute.Set) { // nolint: unused // This is indeed used.
|
||||
s.Lock()
|
||||
s.values[attr] = value
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
func (s *valueMap[N]) Aggregate(value N, attr attribute.Set) {
|
||||
s.Lock()
|
||||
s.values[attr] += value
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
// NewDeltaSum returns an Aggregator that summarizes a set of measurements as
|
||||
// their arithmetic sum. Each sum is scoped by attributes and the aggregation
|
||||
// cycle the measurements were made in.
|
||||
//
|
||||
// The monotonic value is used to communicate the produced Aggregation is
|
||||
// monotonic or not. The returned Aggregator does not make any guarantees this
|
||||
// value is accurate. It is up to the caller to ensure it.
|
||||
//
|
||||
// Each aggregation cycle is treated independently. When the returned
|
||||
// Aggregator's Aggregation method is called it will reset all sums to zero.
|
||||
func NewDeltaSum[N int64 | float64](monotonic bool) Aggregator[N] {
|
||||
return newDeltaSum[N](monotonic)
|
||||
}
|
||||
|
||||
func newDeltaSum[N int64 | float64](monotonic bool) *deltaSum[N] {
|
||||
return &deltaSum[N]{
|
||||
valueMap: newValueMap[N](),
|
||||
monotonic: monotonic,
|
||||
start: now(),
|
||||
}
|
||||
}
|
||||
|
||||
// deltaSum summarizes a set of measurements made in a single aggregation
|
||||
// cycle as their arithmetic sum.
|
||||
type deltaSum[N int64 | float64] struct {
|
||||
*valueMap[N]
|
||||
|
||||
monotonic bool
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (s *deltaSum[N]) Aggregation() metricdata.Aggregation {
|
||||
out := metricdata.Sum[N]{
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
IsMonotonic: s.monotonic,
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return out
|
||||
}
|
||||
|
||||
t := now()
|
||||
out.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.values))
|
||||
for attr, value := range s.values {
|
||||
out.DataPoints = append(out.DataPoints, metricdata.DataPoint[N]{
|
||||
Attributes: attr,
|
||||
StartTime: s.start,
|
||||
Time: t,
|
||||
Value: value,
|
||||
})
|
||||
// Unused attribute sets do not report.
|
||||
delete(s.values, attr)
|
||||
}
|
||||
// The delta collection cycle resets.
|
||||
s.start = t
|
||||
return out
|
||||
}
|
||||
|
||||
// NewCumulativeSum returns an Aggregator that summarizes a set of
|
||||
// measurements as their arithmetic sum. Each sum is scoped by attributes and
|
||||
// the aggregation cycle the measurements were made in.
|
||||
//
|
||||
// The monotonic value is used to communicate the produced Aggregation is
|
||||
// monotonic or not. The returned Aggregator does not make any guarantees this
|
||||
// value is accurate. It is up to the caller to ensure it.
|
||||
//
|
||||
// Each aggregation cycle is treated independently. When the returned
|
||||
// Aggregator's Aggregation method is called it will reset all sums to zero.
|
||||
func NewCumulativeSum[N int64 | float64](monotonic bool) Aggregator[N] {
|
||||
return newCumulativeSum[N](monotonic)
|
||||
}
|
||||
|
||||
func newCumulativeSum[N int64 | float64](monotonic bool) *cumulativeSum[N] {
|
||||
return &cumulativeSum[N]{
|
||||
valueMap: newValueMap[N](),
|
||||
monotonic: monotonic,
|
||||
start: now(),
|
||||
}
|
||||
}
|
||||
|
||||
// cumulativeSum summarizes a set of measurements made over all aggregation
|
||||
// cycles as their arithmetic sum.
|
||||
type cumulativeSum[N int64 | float64] struct {
|
||||
*valueMap[N]
|
||||
|
||||
monotonic bool
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (s *cumulativeSum[N]) Aggregation() metricdata.Aggregation {
|
||||
out := metricdata.Sum[N]{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
IsMonotonic: s.monotonic,
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return out
|
||||
}
|
||||
|
||||
t := now()
|
||||
out.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.values))
|
||||
for attr, value := range s.values {
|
||||
out.DataPoints = append(out.DataPoints, metricdata.DataPoint[N]{
|
||||
Attributes: attr,
|
||||
StartTime: s.start,
|
||||
Time: t,
|
||||
Value: value,
|
||||
})
|
||||
// TODO (#3006): This will use an unbounded amount of memory if there
|
||||
// are unbounded number of attribute sets being aggregated. Attribute
|
||||
// sets that become "stale" need to be forgotten so this will not
|
||||
// overload the system.
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// NewPrecomputedDeltaSum returns an Aggregator that summarizes a set of
|
||||
// measurements as their pre-computed arithmetic sum. Each sum is scoped by
|
||||
// attributes and the aggregation cycle the measurements were made in.
|
||||
//
|
||||
// The monotonic value is used to communicate the produced Aggregation is
|
||||
// monotonic or not. The returned Aggregator does not make any guarantees this
|
||||
// value is accurate. It is up to the caller to ensure it.
|
||||
//
|
||||
// The output Aggregation will report recorded values as delta temporality. It
|
||||
// is up to the caller to ensure this is accurate.
|
||||
func NewPrecomputedDeltaSum[N int64 | float64](monotonic bool) Aggregator[N] {
|
||||
return &precomputedSum[N]{settableSum: newDeltaSum[N](monotonic)}
|
||||
}
|
||||
|
||||
// NewPrecomputedCumulativeSum returns an Aggregator that summarizes a set of
|
||||
// measurements as their pre-computed arithmetic sum. Each sum is scoped by
|
||||
// attributes and the aggregation cycle the measurements were made in.
|
||||
//
|
||||
// The monotonic value is used to communicate the produced Aggregation is
|
||||
// monotonic or not. The returned Aggregator does not make any guarantees this
|
||||
// value is accurate. It is up to the caller to ensure it.
|
||||
//
|
||||
// The output Aggregation will report recorded values as cumulative
|
||||
// temporality. It is up to the caller to ensure this is accurate.
|
||||
func NewPrecomputedCumulativeSum[N int64 | float64](monotonic bool) Aggregator[N] {
|
||||
return &precomputedSum[N]{settableSum: newCumulativeSum[N](monotonic)}
|
||||
}
|
||||
|
||||
type settableSum[N int64 | float64] interface {
|
||||
set(value N, attr attribute.Set)
|
||||
Aggregation() metricdata.Aggregation
|
||||
}
|
||||
|
||||
// precomputedSum summarizes a set of measurements recorded over all
|
||||
// aggregation cycles directly as an arithmetic sum.
|
||||
type precomputedSum[N int64 | float64] struct {
|
||||
settableSum[N]
|
||||
}
|
||||
|
||||
// Aggregate records value directly as a sum for attr.
|
||||
func (s *precomputedSum[N]) Aggregate(value N, attr attribute.Set) {
|
||||
s.set(value, attr)
|
||||
}
|
131
vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
generated
vendored
Normal file
131
vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/metric/view"
|
||||
)
|
||||
|
||||
// manualReader is a a simple Reader that allows an application to
|
||||
// read metrics on demand.
|
||||
type manualReader struct {
|
||||
producer atomic.Value
|
||||
shutdownOnce sync.Once
|
||||
|
||||
temporalitySelector TemporalitySelector
|
||||
aggregationSelector AggregationSelector
|
||||
}
|
||||
|
||||
// Compile time check the manualReader implements Reader and is comparable.
|
||||
var _ = map[Reader]struct{}{&manualReader{}: {}}
|
||||
|
||||
// NewManualReader returns a Reader which is directly called to collect metrics.
|
||||
func NewManualReader(opts ...ManualReaderOption) Reader {
|
||||
cfg := newManualReaderConfig(opts)
|
||||
return &manualReader{
|
||||
temporalitySelector: cfg.temporalitySelector,
|
||||
aggregationSelector: cfg.aggregationSelector,
|
||||
}
|
||||
}
|
||||
|
||||
// register stores the Producer which enables the caller to read
|
||||
// metrics on demand.
|
||||
func (mr *manualReader) register(p producer) {
|
||||
// Only register once. If producer is already set, do nothing.
|
||||
if !mr.producer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
|
||||
msg := "did not register manual reader"
|
||||
global.Error(errDuplicateRegister, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// temporality reports the Temporality for the instrument kind provided.
|
||||
func (mr *manualReader) temporality(kind view.InstrumentKind) metricdata.Temporality {
|
||||
return mr.temporalitySelector(kind)
|
||||
}
|
||||
|
||||
// aggregation returns what Aggregation to use for kind.
|
||||
func (mr *manualReader) aggregation(kind view.InstrumentKind) aggregation.Aggregation { // nolint:revive // import-shadow for method scoped by type.
|
||||
return mr.aggregationSelector(kind)
|
||||
}
|
||||
|
||||
// ForceFlush is a no-op, it always returns nil.
|
||||
func (mr *manualReader) ForceFlush(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown closes any connections and frees any resources used by the reader.
|
||||
func (mr *manualReader) Shutdown(context.Context) error {
|
||||
err := ErrReaderShutdown
|
||||
mr.shutdownOnce.Do(func() {
|
||||
// Any future call to Collect will now return ErrReaderShutdown.
|
||||
mr.producer.Store(produceHolder{
|
||||
produce: shutdownProducer{}.produce,
|
||||
})
|
||||
err = nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Collect gathers all metrics from the SDK, calling any callbacks necessary.
|
||||
// Collect will return an error if called after shutdown.
|
||||
func (mr *manualReader) Collect(ctx context.Context) (metricdata.ResourceMetrics, error) {
|
||||
p := mr.producer.Load()
|
||||
if p == nil {
|
||||
return metricdata.ResourceMetrics{}, ErrReaderNotRegistered
|
||||
}
|
||||
|
||||
ph, ok := p.(produceHolder)
|
||||
if !ok {
|
||||
// The atomic.Value is entirely in the periodicReader's control so
|
||||
// this should never happen. In the unforeseen case that this does
|
||||
// happen, return an error instead of panicking so a users code does
|
||||
// not halt in the processes.
|
||||
err := fmt.Errorf("manual reader: invalid producer: %T", p)
|
||||
return metricdata.ResourceMetrics{}, err
|
||||
}
|
||||
|
||||
return ph.produce(ctx)
|
||||
}
|
||||
|
||||
// manualReaderConfig contains configuration options for a ManualReader.
|
||||
type manualReaderConfig struct {
|
||||
temporalitySelector TemporalitySelector
|
||||
aggregationSelector AggregationSelector
|
||||
}
|
||||
|
||||
// newManualReaderConfig returns a manualReaderConfig configured with options.
|
||||
func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig {
|
||||
cfg := manualReaderConfig{
|
||||
temporalitySelector: DefaultTemporalitySelector,
|
||||
aggregationSelector: DefaultAggregationSelector,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
cfg = opt.applyManual(cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// ManualReaderOption applies a configuration option value to a ManualReader.
|
||||
type ManualReaderOption interface {
|
||||
applyManual(manualReaderConfig) manualReaderConfig
|
||||
}
|
92
vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
generated
vendored
Normal file
92
vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncint64"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
)
|
||||
|
||||
// meter handles the creation and coordination of all metric instruments. A
|
||||
// meter represents a single instrumentation scope; all metric telemetry
|
||||
// produced by an instrumentation scope will use metric instruments from a
|
||||
// single meter.
|
||||
type meter struct {
|
||||
instrumentation.Scope
|
||||
|
||||
// *Resolvers are used by the provided instrument providers to resolve new
|
||||
// instruments aggregators and maintain a cache across instruments this
|
||||
// meter owns.
|
||||
int64Resolver resolver[int64]
|
||||
float64Resolver resolver[float64]
|
||||
|
||||
pipes pipelines
|
||||
}
|
||||
|
||||
func newMeter(s instrumentation.Scope, p pipelines) *meter {
|
||||
// viewCache ensures instrument conflicts, including number conflicts, this
|
||||
// meter is asked to create are logged to the user.
|
||||
var viewCache cache[string, instrumentID]
|
||||
|
||||
// Passing nil as the ac parameter to newInstrumentCache will have each
|
||||
// create its own aggregator cache.
|
||||
ic := newInstrumentCache[int64](nil, &viewCache)
|
||||
fc := newInstrumentCache[float64](nil, &viewCache)
|
||||
|
||||
return &meter{
|
||||
Scope: s,
|
||||
pipes: p,
|
||||
|
||||
int64Resolver: newResolver(p, ic),
|
||||
float64Resolver: newResolver(p, fc),
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time check meter implements metric.Meter.
|
||||
var _ metric.Meter = (*meter)(nil)
|
||||
|
||||
// AsyncInt64 returns the asynchronous integer instrument provider.
|
||||
func (m *meter) AsyncInt64() asyncint64.InstrumentProvider {
|
||||
return asyncInt64Provider{scope: m.Scope, resolve: &m.int64Resolver}
|
||||
}
|
||||
|
||||
// AsyncFloat64 returns the asynchronous floating-point instrument provider.
|
||||
func (m *meter) AsyncFloat64() asyncfloat64.InstrumentProvider {
|
||||
return asyncFloat64Provider{scope: m.Scope, resolve: &m.float64Resolver}
|
||||
}
|
||||
|
||||
// RegisterCallback registers the function f to be called when any of the
|
||||
// insts Collect method is called.
|
||||
func (m *meter) RegisterCallback(insts []instrument.Asynchronous, f func(context.Context)) error {
|
||||
m.pipes.registerCallback(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncInt64 returns the synchronous integer instrument provider.
|
||||
func (m *meter) SyncInt64() syncint64.InstrumentProvider {
|
||||
return syncInt64Provider{scope: m.Scope, resolve: &m.int64Resolver}
|
||||
}
|
||||
|
||||
// SyncFloat64 returns the synchronous floating-point instrument provider.
|
||||
func (m *meter) SyncFloat64() syncfloat64.InstrumentProvider {
|
||||
return syncFloat64Provider{scope: m.Scope, resolve: &m.float64Resolver}
|
||||
}
|
130
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
generated
vendored
Normal file
130
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// ResourceMetrics is a collection of ScopeMetrics and the associated Resource
|
||||
// that created them.
|
||||
type ResourceMetrics struct {
|
||||
// Resource represents the entity that collected the metrics.
|
||||
Resource *resource.Resource
|
||||
// ScopeMetrics are the collection of metrics with unique Scopes.
|
||||
ScopeMetrics []ScopeMetrics
|
||||
}
|
||||
|
||||
// ScopeMetrics is a collection of Metrics Produces by a Meter.
|
||||
type ScopeMetrics struct {
|
||||
// Scope is the Scope that the Meter was created with.
|
||||
Scope instrumentation.Scope
|
||||
// Metrics are a list of aggregations created by the Meter.
|
||||
Metrics []Metrics
|
||||
}
|
||||
|
||||
// Metrics is a collection of one or more aggregated timeseries from an Instrument.
|
||||
type Metrics struct {
|
||||
// Name is the name of the Instrument that created this data.
|
||||
Name string
|
||||
// Description is the description of the Instrument, which can be used in documentation.
|
||||
Description string
|
||||
// Unit is the unit in which the Instrument reports.
|
||||
Unit unit.Unit
|
||||
// Data is the aggregated data from an Instrument.
|
||||
Data Aggregation
|
||||
}
|
||||
|
||||
// Aggregation is the store of data reported by an Instrument.
|
||||
// It will be one of: Gauge, Sum, Histogram.
|
||||
type Aggregation interface {
|
||||
privateAggregation()
|
||||
}
|
||||
|
||||
// Gauge represents a measurement of the current value of an instrument.
|
||||
type Gauge[N int64 | float64] struct {
|
||||
// DataPoints reprents individual aggregated measurements with unique Attributes.
|
||||
DataPoints []DataPoint[N]
|
||||
}
|
||||
|
||||
func (Gauge[N]) privateAggregation() {}
|
||||
|
||||
// Sum represents the sum of all measurements of values from an instrument.
|
||||
type Sum[N int64 | float64] struct {
|
||||
// DataPoints reprents individual aggregated measurements with unique Attributes.
|
||||
DataPoints []DataPoint[N]
|
||||
// Temporality describes if the aggregation is reported as the change from the
|
||||
// last report time, or the cumulative changes since a fixed start time.
|
||||
Temporality Temporality
|
||||
// IsMonotonic represents if this aggregation only increases or decreases.
|
||||
IsMonotonic bool
|
||||
}
|
||||
|
||||
func (Sum[N]) privateAggregation() {}
|
||||
|
||||
// DataPoint is a single data point in a timeseries.
|
||||
type DataPoint[N int64 | float64] struct {
|
||||
// Attributes is the set of key value pairs that uniquely identify the
|
||||
// timeseries.
|
||||
Attributes attribute.Set
|
||||
// StartTime is when the timeseries was started. (optional)
|
||||
StartTime time.Time `json:",omitempty"`
|
||||
// Time is the time when the timeseries was recorded. (optional)
|
||||
Time time.Time `json:",omitempty"`
|
||||
// Value is the value of this data point.
|
||||
Value N
|
||||
}
|
||||
|
||||
// Histogram represents the histogram of all measurements of values from an instrument.
|
||||
type Histogram struct {
|
||||
// DataPoints reprents individual aggregated measurements with unique Attributes.
|
||||
DataPoints []HistogramDataPoint
|
||||
// Temporality describes if the aggregation is reported as the change from the
|
||||
// last report time, or the cumulative changes since a fixed start time.
|
||||
Temporality Temporality
|
||||
}
|
||||
|
||||
func (Histogram) privateAggregation() {}
|
||||
|
||||
// HistogramDataPoint is a single histogram data point in a timeseries.
|
||||
type HistogramDataPoint struct {
|
||||
// Attributes is the set of key value pairs that uniquely identify the
|
||||
// timeseries.
|
||||
Attributes attribute.Set
|
||||
// StartTime is when the timeseries was started.
|
||||
StartTime time.Time
|
||||
// Time is the time when the timeseries was recorded.
|
||||
Time time.Time
|
||||
|
||||
// Count is the number of updates this histogram has been calculated with.
|
||||
Count uint64
|
||||
// Bounds are the upper bounds of the buckets of the histogram. Because the
|
||||
// last boundary is +infinity this one is implied.
|
||||
Bounds []float64
|
||||
// BucketCounts is the count of each of the buckets.
|
||||
BucketCounts []uint64
|
||||
|
||||
// Min is the minimum value recorded. (optional)
|
||||
Min *float64 `json:",omitempty"`
|
||||
// Max is the maximum value recorded. (optional)
|
||||
Max *float64 `json:",omitempty"`
|
||||
// Sum is the sum of the values recorded.
|
||||
Sum float64
|
||||
}
|
41
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
generated
vendored
Normal file
41
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate stringer -type=Temporality
|
||||
|
||||
package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
|
||||
// Temporality defines the window that an aggregation was calculated over.
|
||||
type Temporality uint8
|
||||
|
||||
const (
|
||||
// undefinedTemporality represents an unset Temporality.
|
||||
//nolint:deadcode,unused,varcheck
|
||||
undefinedTemporality Temporality = iota
|
||||
|
||||
// CumulativeTemporality defines a measurement interval that continues to
|
||||
// expand forward in time from a starting point. New measurements are
|
||||
// added to all previous measurements since a start time.
|
||||
CumulativeTemporality
|
||||
|
||||
// DeltaTemporality defines a measurement interval that resets each cycle.
|
||||
// Measurements from one cycle are recorded independently, measurements
|
||||
// from other cycles do not affect them.
|
||||
DeltaTemporality
|
||||
)
|
||||
|
||||
// MarshalText returns the byte encoded of t.
|
||||
func (t Temporality) MarshalText() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
// Code generated by "stringer -type=Temporality"; DO NOT EDIT.
|
||||
|
||||
package aggregation
|
||||
package metricdata
|
||||
|
||||
import "strconv"
|
||||
|
||||
@ -8,18 +8,18 @@ func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[undefinedTemporality-0]
|
||||
_ = x[CumulativeTemporality-1]
|
||||
_ = x[DeltaTemporality-2]
|
||||
}
|
||||
|
||||
const _Temporality_name = "CumulativeTemporalityDeltaTemporality"
|
||||
const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality"
|
||||
|
||||
var _Temporality_index = [...]uint8{0, 21, 37}
|
||||
var _Temporality_index = [...]uint8{0, 20, 41, 57}
|
||||
|
||||
func (i Temporality) String() string {
|
||||
i -= 1
|
||||
if i >= Temporality(len(_Temporality_index)-1) {
|
||||
return "Temporality(" + strconv.FormatInt(int64(i+1), 10) + ")"
|
||||
return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
|
||||
}
|
24
vendor/go.opentelemetry.io/otel/sdk/metric/number/kind_string.go
generated
vendored
24
vendor/go.opentelemetry.io/otel/sdk/metric/number/kind_string.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
// Code generated by "stringer -type=Kind"; DO NOT EDIT.
|
||||
|
||||
package number
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Int64Kind-0]
|
||||
_ = x[Float64Kind-1]
|
||||
}
|
||||
|
||||
const _Kind_name = "Int64KindFloat64Kind"
|
||||
|
||||
var _Kind_index = [...]uint8{0, 9, 20}
|
||||
|
||||
func (i Kind) String() string {
|
||||
if i < 0 || i >= Kind(len(_Kind_index)-1) {
|
||||
return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
|
||||
}
|
538
vendor/go.opentelemetry.io/otel/sdk/metric/number/number.go
generated
vendored
538
vendor/go.opentelemetry.io/otel/sdk/metric/number/number.go
generated
vendored
@ -1,538 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package number // import "go.opentelemetry.io/otel/sdk/metric/number"
|
||||
|
||||
//go:generate stringer -type=Kind
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel/internal"
|
||||
)
|
||||
|
||||
// Kind describes the data type of the Number.
|
||||
type Kind int8
|
||||
|
||||
const (
|
||||
// Int64Kind means that the Number stores int64.
|
||||
Int64Kind Kind = iota
|
||||
// Float64Kind means that the Number stores float64.
|
||||
Float64Kind
|
||||
)
|
||||
|
||||
// Zero returns a zero value for a given Kind.
|
||||
func (k Kind) Zero() Number {
|
||||
switch k {
|
||||
case Int64Kind:
|
||||
return NewInt64Number(0)
|
||||
case Float64Kind:
|
||||
return NewFloat64Number(0.)
|
||||
default:
|
||||
return Number(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Minimum returns the minimum representable value
|
||||
// for a given Kind.
|
||||
func (k Kind) Minimum() Number {
|
||||
switch k {
|
||||
case Int64Kind:
|
||||
return NewInt64Number(math.MinInt64)
|
||||
case Float64Kind:
|
||||
return NewFloat64Number(-1. * math.MaxFloat64)
|
||||
default:
|
||||
return Number(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Maximum returns the maximum representable value
|
||||
// for a given Kind.
|
||||
func (k Kind) Maximum() Number {
|
||||
switch k {
|
||||
case Int64Kind:
|
||||
return NewInt64Number(math.MaxInt64)
|
||||
case Float64Kind:
|
||||
return NewFloat64Number(math.MaxFloat64)
|
||||
default:
|
||||
return Number(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Number represents either an integral or a floating point value. It
|
||||
// needs to be accompanied with a source of Kind that describes
|
||||
// the actual type of the value stored within Number.
|
||||
type Number uint64
|
||||
|
||||
// - constructors
|
||||
|
||||
// NewNumberFromRaw creates a new Number from a raw value.
|
||||
func NewNumberFromRaw(r uint64) Number {
|
||||
return Number(r)
|
||||
}
|
||||
|
||||
// NewInt64Number creates an integral Number.
|
||||
func NewInt64Number(i int64) Number {
|
||||
return NewNumberFromRaw(internal.Int64ToRaw(i))
|
||||
}
|
||||
|
||||
// NewFloat64Number creates a floating point Number.
|
||||
func NewFloat64Number(f float64) Number {
|
||||
return NewNumberFromRaw(internal.Float64ToRaw(f))
|
||||
}
|
||||
|
||||
// NewNumberSignChange returns a number with the same magnitude and
|
||||
// the opposite sign. `kind` must describe the kind of number in `nn`.
|
||||
func NewNumberSignChange(kind Kind, nn Number) Number {
|
||||
switch kind {
|
||||
case Int64Kind:
|
||||
return NewInt64Number(-nn.AsInt64())
|
||||
case Float64Kind:
|
||||
return NewFloat64Number(-nn.AsFloat64())
|
||||
}
|
||||
return nn
|
||||
}
|
||||
|
||||
// - as x
|
||||
|
||||
// AsNumber gets the Number.
|
||||
func (n *Number) AsNumber() Number {
|
||||
return *n
|
||||
}
|
||||
|
||||
// AsRaw gets the uninterpreted raw value. Might be useful for some
|
||||
// atomic operations.
|
||||
func (n *Number) AsRaw() uint64 {
|
||||
return uint64(*n)
|
||||
}
|
||||
|
||||
// AsInt64 assumes that the value contains an int64 and returns it as
|
||||
// such.
|
||||
func (n *Number) AsInt64() int64 {
|
||||
return internal.RawToInt64(n.AsRaw())
|
||||
}
|
||||
|
||||
// AsFloat64 assumes that the measurement value contains a float64 and
|
||||
// returns it as such.
|
||||
func (n *Number) AsFloat64() float64 {
|
||||
return internal.RawToFloat64(n.AsRaw())
|
||||
}
|
||||
|
||||
// - as x atomic
|
||||
|
||||
// AsNumberAtomic gets the Number atomically.
|
||||
func (n *Number) AsNumberAtomic() Number {
|
||||
return NewNumberFromRaw(n.AsRawAtomic())
|
||||
}
|
||||
|
||||
// AsRawAtomic gets the uninterpreted raw value atomically. Might be
|
||||
// useful for some atomic operations.
|
||||
func (n *Number) AsRawAtomic() uint64 {
|
||||
return atomic.LoadUint64(n.AsRawPtr())
|
||||
}
|
||||
|
||||
// AsInt64Atomic assumes that the number contains an int64 and returns
|
||||
// it as such atomically.
|
||||
func (n *Number) AsInt64Atomic() int64 {
|
||||
return atomic.LoadInt64(n.AsInt64Ptr())
|
||||
}
|
||||
|
||||
// AsFloat64Atomic assumes that the measurement value contains a
|
||||
// float64 and returns it as such atomically.
|
||||
func (n *Number) AsFloat64Atomic() float64 {
|
||||
return internal.RawToFloat64(n.AsRawAtomic())
|
||||
}
|
||||
|
||||
// - as x ptr
|
||||
|
||||
// AsRawPtr gets the pointer to the raw, uninterpreted raw
|
||||
// value. Might be useful for some atomic operations.
|
||||
func (n *Number) AsRawPtr() *uint64 {
|
||||
return (*uint64)(n)
|
||||
}
|
||||
|
||||
// AsInt64Ptr assumes that the number contains an int64 and returns a
|
||||
// pointer to it.
|
||||
func (n *Number) AsInt64Ptr() *int64 {
|
||||
return internal.RawPtrToInt64Ptr(n.AsRawPtr())
|
||||
}
|
||||
|
||||
// AsFloat64Ptr assumes that the number contains a float64 and returns a
|
||||
// pointer to it.
|
||||
func (n *Number) AsFloat64Ptr() *float64 {
|
||||
return internal.RawPtrToFloat64Ptr(n.AsRawPtr())
|
||||
}
|
||||
|
||||
// - coerce
|
||||
|
||||
// CoerceToInt64 casts the number to int64. May result in
|
||||
// data/precision loss.
|
||||
func (n *Number) CoerceToInt64(kind Kind) int64 {
|
||||
switch kind {
|
||||
case Int64Kind:
|
||||
return n.AsInt64()
|
||||
case Float64Kind:
|
||||
return int64(n.AsFloat64())
|
||||
default:
|
||||
// you get what you deserve
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// CoerceToFloat64 casts the number to float64. May result in
|
||||
// data/precision loss.
|
||||
func (n *Number) CoerceToFloat64(kind Kind) float64 {
|
||||
switch kind {
|
||||
case Int64Kind:
|
||||
return float64(n.AsInt64())
|
||||
case Float64Kind:
|
||||
return n.AsFloat64()
|
||||
default:
|
||||
// you get what you deserve
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// - set
|
||||
|
||||
// SetNumber sets the number to the passed number. Both should be of
|
||||
// the same kind.
|
||||
func (n *Number) SetNumber(nn Number) {
|
||||
*n.AsRawPtr() = nn.AsRaw()
|
||||
}
|
||||
|
||||
// SetRaw sets the number to the passed raw value. Both number and the
|
||||
// raw number should represent the same kind.
|
||||
func (n *Number) SetRaw(r uint64) {
|
||||
*n.AsRawPtr() = r
|
||||
}
|
||||
|
||||
// SetInt64 assumes that the number contains an int64 and sets it to
|
||||
// the passed value.
|
||||
func (n *Number) SetInt64(i int64) {
|
||||
*n.AsInt64Ptr() = i
|
||||
}
|
||||
|
||||
// SetFloat64 assumes that the number contains a float64 and sets it
|
||||
// to the passed value.
|
||||
func (n *Number) SetFloat64(f float64) {
|
||||
*n.AsFloat64Ptr() = f
|
||||
}
|
||||
|
||||
// - set atomic
|
||||
|
||||
// SetNumberAtomic sets the number to the passed number
|
||||
// atomically. Both should be of the same kind.
|
||||
func (n *Number) SetNumberAtomic(nn Number) {
|
||||
atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw())
|
||||
}
|
||||
|
||||
// SetRawAtomic sets the number to the passed raw value
|
||||
// atomically. Both number and the raw number should represent the
|
||||
// same kind.
|
||||
func (n *Number) SetRawAtomic(r uint64) {
|
||||
atomic.StoreUint64(n.AsRawPtr(), r)
|
||||
}
|
||||
|
||||
// SetInt64Atomic assumes that the number contains an int64 and sets
|
||||
// it to the passed value atomically.
|
||||
func (n *Number) SetInt64Atomic(i int64) {
|
||||
atomic.StoreInt64(n.AsInt64Ptr(), i)
|
||||
}
|
||||
|
||||
// SetFloat64Atomic assumes that the number contains a float64 and
|
||||
// sets it to the passed value atomically.
|
||||
func (n *Number) SetFloat64Atomic(f float64) {
|
||||
atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f))
|
||||
}
|
||||
|
||||
// - swap
|
||||
|
||||
// SwapNumber sets the number to the passed number and returns the old
|
||||
// number. Both this number and the passed number should be of the
|
||||
// same kind.
|
||||
func (n *Number) SwapNumber(nn Number) Number {
|
||||
old := *n
|
||||
n.SetNumber(nn)
|
||||
return old
|
||||
}
|
||||
|
||||
// SwapRaw sets the number to the passed raw value and returns the old
|
||||
// raw value. Both number and the raw number should represent the same
|
||||
// kind.
|
||||
func (n *Number) SwapRaw(r uint64) uint64 {
|
||||
old := n.AsRaw()
|
||||
n.SetRaw(r)
|
||||
return old
|
||||
}
|
||||
|
||||
// SwapInt64 assumes that the number contains an int64, sets it to the
|
||||
// passed value and returns the old int64 value.
|
||||
func (n *Number) SwapInt64(i int64) int64 {
|
||||
old := n.AsInt64()
|
||||
n.SetInt64(i)
|
||||
return old
|
||||
}
|
||||
|
||||
// SwapFloat64 assumes that the number contains an float64, sets it to
|
||||
// the passed value and returns the old float64 value.
|
||||
func (n *Number) SwapFloat64(f float64) float64 {
|
||||
old := n.AsFloat64()
|
||||
n.SetFloat64(f)
|
||||
return old
|
||||
}
|
||||
|
||||
// - swap atomic
|
||||
|
||||
// SwapNumberAtomic sets the number to the passed number and returns
|
||||
// the old number atomically. Both this number and the passed number
|
||||
// should be of the same kind.
|
||||
func (n *Number) SwapNumberAtomic(nn Number) Number {
|
||||
return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw()))
|
||||
}
|
||||
|
||||
// SwapRawAtomic sets the number to the passed raw value and returns
|
||||
// the old raw value atomically. Both number and the raw number should
|
||||
// represent the same kind.
|
||||
func (n *Number) SwapRawAtomic(r uint64) uint64 {
|
||||
return atomic.SwapUint64(n.AsRawPtr(), r)
|
||||
}
|
||||
|
||||
// SwapInt64Atomic assumes that the number contains an int64, sets it
|
||||
// to the passed value and returns the old int64 value atomically.
|
||||
func (n *Number) SwapInt64Atomic(i int64) int64 {
|
||||
return atomic.SwapInt64(n.AsInt64Ptr(), i)
|
||||
}
|
||||
|
||||
// SwapFloat64Atomic assumes that the number contains an float64, sets
|
||||
// it to the passed value and returns the old float64 value
|
||||
// atomically.
|
||||
func (n *Number) SwapFloat64Atomic(f float64) float64 {
|
||||
return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f)))
|
||||
}
|
||||
|
||||
// - add
|
||||
|
||||
// AddNumber assumes that this and the passed number are of the passed
|
||||
// kind and adds the passed number to this number.
|
||||
func (n *Number) AddNumber(kind Kind, nn Number) {
|
||||
switch kind {
|
||||
case Int64Kind:
|
||||
n.AddInt64(nn.AsInt64())
|
||||
case Float64Kind:
|
||||
n.AddFloat64(nn.AsFloat64())
|
||||
}
|
||||
}
|
||||
|
||||
// AddRaw assumes that this number and the passed raw value are of the
|
||||
// passed kind and adds the passed raw value to this number.
|
||||
func (n *Number) AddRaw(kind Kind, r uint64) {
|
||||
n.AddNumber(kind, NewNumberFromRaw(r))
|
||||
}
|
||||
|
||||
// AddInt64 assumes that the number contains an int64 and adds the
|
||||
// passed int64 to it.
|
||||
func (n *Number) AddInt64(i int64) {
|
||||
*n.AsInt64Ptr() += i
|
||||
}
|
||||
|
||||
// AddFloat64 assumes that the number contains a float64 and adds the
|
||||
// passed float64 to it.
|
||||
func (n *Number) AddFloat64(f float64) {
|
||||
*n.AsFloat64Ptr() += f
|
||||
}
|
||||
|
||||
// - add atomic
|
||||
|
||||
// AddNumberAtomic assumes that this and the passed number are of the
|
||||
// passed kind and adds the passed number to this number atomically.
|
||||
func (n *Number) AddNumberAtomic(kind Kind, nn Number) {
|
||||
switch kind {
|
||||
case Int64Kind:
|
||||
n.AddInt64Atomic(nn.AsInt64())
|
||||
case Float64Kind:
|
||||
n.AddFloat64Atomic(nn.AsFloat64())
|
||||
}
|
||||
}
|
||||
|
||||
// AddRawAtomic assumes that this number and the passed raw value are
|
||||
// of the passed kind and adds the passed raw value to this number
|
||||
// atomically.
|
||||
func (n *Number) AddRawAtomic(kind Kind, r uint64) {
|
||||
n.AddNumberAtomic(kind, NewNumberFromRaw(r))
|
||||
}
|
||||
|
||||
// AddInt64Atomic assumes that the number contains an int64 and adds
|
||||
// the passed int64 to it atomically.
|
||||
func (n *Number) AddInt64Atomic(i int64) {
|
||||
atomic.AddInt64(n.AsInt64Ptr(), i)
|
||||
}
|
||||
|
||||
// AddFloat64Atomic assumes that the number contains a float64 and
|
||||
// adds the passed float64 to it atomically.
|
||||
func (n *Number) AddFloat64Atomic(f float64) {
|
||||
for {
|
||||
o := n.AsFloat64Atomic()
|
||||
if n.CompareAndSwapFloat64(o, o+f) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// - compare and swap (atomic only)
|
||||
|
||||
// CompareAndSwapNumber does the atomic CAS operation on this
|
||||
// number. This number and passed old and new numbers should be of the
|
||||
// same kind.
|
||||
func (n *Number) CompareAndSwapNumber(on, nn Number) bool {
|
||||
return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw())
|
||||
}
|
||||
|
||||
// CompareAndSwapRaw does the atomic CAS operation on this
|
||||
// number. This number and passed old and new raw values should be of
|
||||
// the same kind.
|
||||
func (n *Number) CompareAndSwapRaw(or, nr uint64) bool {
|
||||
return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr)
|
||||
}
|
||||
|
||||
// CompareAndSwapInt64 assumes that this number contains an int64 and
|
||||
// does the atomic CAS operation on it.
|
||||
func (n *Number) CompareAndSwapInt64(oi, ni int64) bool {
|
||||
return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni)
|
||||
}
|
||||
|
||||
// CompareAndSwapFloat64 assumes that this number contains a float64 and
|
||||
// does the atomic CAS operation on it.
|
||||
func (n *Number) CompareAndSwapFloat64(of, nf float64) bool {
|
||||
return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf))
|
||||
}
|
||||
|
||||
// - compare
|
||||
|
||||
// CompareNumber compares two Numbers given their kind. Both numbers
|
||||
// should have the same kind. This returns:
|
||||
// 0 if the numbers are equal
|
||||
// -1 if the subject `n` is less than the argument `nn`
|
||||
// +1 if the subject `n` is greater than the argument `nn`
|
||||
func (n *Number) CompareNumber(kind Kind, nn Number) int {
|
||||
switch kind {
|
||||
case Int64Kind:
|
||||
return n.CompareInt64(nn.AsInt64())
|
||||
case Float64Kind:
|
||||
return n.CompareFloat64(nn.AsFloat64())
|
||||
default:
|
||||
// you get what you deserve
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// CompareRaw compares two numbers, where one is input as a raw
|
||||
// uint64, interpreting both values as a `kind` of number.
|
||||
func (n *Number) CompareRaw(kind Kind, r uint64) int {
|
||||
return n.CompareNumber(kind, NewNumberFromRaw(r))
|
||||
}
|
||||
|
||||
// CompareInt64 assumes that the Number contains an int64 and performs
|
||||
// a comparison between the value and the other value. It returns the
|
||||
// typical result of the compare function: -1 if the value is less
|
||||
// than the other, 0 if both are equal, 1 if the value is greater than
|
||||
// the other.
|
||||
func (n *Number) CompareInt64(i int64) int {
|
||||
this := n.AsInt64()
|
||||
if this < i {
|
||||
return -1
|
||||
} else if this > i {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// CompareFloat64 assumes that the Number contains a float64 and
|
||||
// performs a comparison between the value and the other value. It
|
||||
// returns the typical result of the compare function: -1 if the value
|
||||
// is less than the other, 0 if both are equal, 1 if the value is
|
||||
// greater than the other.
|
||||
//
|
||||
// Do not compare NaN values.
|
||||
func (n *Number) CompareFloat64(f float64) int {
|
||||
this := n.AsFloat64()
|
||||
if this < f {
|
||||
return -1
|
||||
} else if this > f {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// - relations to zero
|
||||
|
||||
// IsPositive returns true if the actual value is greater than zero.
|
||||
func (n *Number) IsPositive(kind Kind) bool {
|
||||
return n.compareWithZero(kind) > 0
|
||||
}
|
||||
|
||||
// IsNegative returns true if the actual value is less than zero.
|
||||
func (n *Number) IsNegative(kind Kind) bool {
|
||||
return n.compareWithZero(kind) < 0
|
||||
}
|
||||
|
||||
// IsZero returns true if the actual value is equal to zero.
|
||||
func (n *Number) IsZero(kind Kind) bool {
|
||||
return n.compareWithZero(kind) == 0
|
||||
}
|
||||
|
||||
// - misc
|
||||
|
||||
// Emit returns a string representation of the raw value of the
|
||||
// Number. A %d is used for integral values, %f for floating point
|
||||
// values.
|
||||
func (n *Number) Emit(kind Kind) string {
|
||||
switch kind {
|
||||
case Int64Kind:
|
||||
return fmt.Sprintf("%d", n.AsInt64())
|
||||
case Float64Kind:
|
||||
return fmt.Sprintf("%f", n.AsFloat64())
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// AsInterface returns the number as an interface{}, typically used
|
||||
// for Kind-correct JSON conversion.
|
||||
func (n *Number) AsInterface(kind Kind) interface{} {
|
||||
switch kind {
|
||||
case Int64Kind:
|
||||
return n.AsInt64()
|
||||
case Float64Kind:
|
||||
return n.AsFloat64()
|
||||
default:
|
||||
return math.NaN()
|
||||
}
|
||||
}
|
||||
|
||||
// - private stuff
|
||||
|
||||
func (n *Number) compareWithZero(kind Kind) int {
|
||||
switch kind {
|
||||
case Int64Kind:
|
||||
return n.CompareInt64(0)
|
||||
case Float64Kind:
|
||||
return n.CompareFloat64(0.)
|
||||
default:
|
||||
// you get what you deserve
|
||||
return 0
|
||||
}
|
||||
}
|
292
vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
generated
vendored
Normal file
292
vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
generated
vendored
Normal file
@ -0,0 +1,292 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/metric/view"
|
||||
)
|
||||
|
||||
// Default periodic reader timing.
|
||||
const (
|
||||
defaultTimeout = time.Millisecond * 30000
|
||||
defaultInterval = time.Millisecond * 60000
|
||||
)
|
||||
|
||||
// periodicReaderConfig contains configuration options for a PeriodicReader.
|
||||
type periodicReaderConfig struct {
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
temporalitySelector TemporalitySelector
|
||||
aggregationSelector AggregationSelector
|
||||
}
|
||||
|
||||
// newPeriodicReaderConfig returns a periodicReaderConfig configured with
|
||||
// options.
|
||||
func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig {
|
||||
c := periodicReaderConfig{
|
||||
interval: defaultInterval,
|
||||
timeout: defaultTimeout,
|
||||
temporalitySelector: DefaultTemporalitySelector,
|
||||
aggregationSelector: DefaultAggregationSelector,
|
||||
}
|
||||
for _, o := range options {
|
||||
c = o.applyPeriodic(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// PeriodicReaderOption applies a configuration option value to a PeriodicReader.
|
||||
type PeriodicReaderOption interface {
|
||||
applyPeriodic(periodicReaderConfig) periodicReaderConfig
|
||||
}
|
||||
|
||||
// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig.
|
||||
type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig
|
||||
|
||||
// applyPeriodic returns a periodicReaderConfig with option(s) applied.
|
||||
func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig {
|
||||
return o(conf)
|
||||
}
|
||||
|
||||
// WithTimeout configures the time a PeriodicReader waits for an export to
|
||||
// complete before canceling it.
|
||||
//
|
||||
// If this option is not used or d is less than or equal to zero, 30 seconds
|
||||
// is used as the default.
|
||||
func WithTimeout(d time.Duration) PeriodicReaderOption {
|
||||
return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
|
||||
if d <= 0 {
|
||||
return conf
|
||||
}
|
||||
conf.timeout = d
|
||||
return conf
|
||||
})
|
||||
}
|
||||
|
||||
// WithInterval configures the intervening time between exports for a
|
||||
// PeriodicReader.
|
||||
//
|
||||
// If this option is not used or d is less than or equal to zero, 60 seconds
|
||||
// is used as the default.
|
||||
func WithInterval(d time.Duration) PeriodicReaderOption {
|
||||
return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
|
||||
if d <= 0 {
|
||||
return conf
|
||||
}
|
||||
conf.interval = d
|
||||
return conf
|
||||
})
|
||||
}
|
||||
|
||||
// NewPeriodicReader returns a Reader that collects and exports metric data to
|
||||
// the exporter at a defined interval. By default, the returned Reader will
|
||||
// collect and export data every 60 seconds, and will cancel export attempts
|
||||
// that exceed 30 seconds. The export time is not counted towards the interval
|
||||
// between attempts.
|
||||
//
|
||||
// The Collect method of the returned Reader continues to gather and return
|
||||
// metric data to the user. It will not automatically send that data to the
|
||||
// exporter. That is left to the user to accomplish.
|
||||
func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) Reader {
|
||||
conf := newPeriodicReaderConfig(options)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &periodicReader{
|
||||
timeout: conf.timeout,
|
||||
exporter: exporter,
|
||||
flushCh: make(chan chan error),
|
||||
cancel: cancel,
|
||||
done: make(chan struct{}),
|
||||
|
||||
temporalitySelector: conf.temporalitySelector,
|
||||
aggregationSelector: conf.aggregationSelector,
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() { close(r.done) }()
|
||||
r.run(ctx, conf.interval)
|
||||
}()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// periodicReader is a Reader that continuously collects and exports metric
|
||||
// data at a set interval.
|
||||
type periodicReader struct {
|
||||
producer atomic.Value
|
||||
|
||||
timeout time.Duration
|
||||
exporter Exporter
|
||||
flushCh chan chan error
|
||||
|
||||
temporalitySelector TemporalitySelector
|
||||
aggregationSelector AggregationSelector
|
||||
|
||||
done chan struct{}
|
||||
cancel context.CancelFunc
|
||||
shutdownOnce sync.Once
|
||||
}
|
||||
|
||||
// Compile time check the periodicReader implements Reader and is comparable.
|
||||
var _ = map[Reader]struct{}{&periodicReader{}: {}}
|
||||
|
||||
// newTicker allows testing override.
|
||||
var newTicker = time.NewTicker
|
||||
|
||||
// run continuously collects and exports metric data at the specified
|
||||
// interval. This will run until ctx is canceled or times out.
|
||||
func (r *periodicReader) run(ctx context.Context, interval time.Duration) {
|
||||
ticker := newTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err := r.collectAndExport(ctx)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
case errCh := <-r.flushCh:
|
||||
errCh <- r.collectAndExport(ctx)
|
||||
ticker.Reset(interval)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// register registers p as the producer of this reader.
|
||||
func (r *periodicReader) register(p producer) {
|
||||
// Only register once. If producer is already set, do nothing.
|
||||
if !r.producer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
|
||||
msg := "did not register periodic reader"
|
||||
global.Error(errDuplicateRegister, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// temporality reports the Temporality for the instrument kind provided.
|
||||
func (r *periodicReader) temporality(kind view.InstrumentKind) metricdata.Temporality {
|
||||
return r.temporalitySelector(kind)
|
||||
}
|
||||
|
||||
// aggregation returns what Aggregation to use for kind.
|
||||
func (r *periodicReader) aggregation(kind view.InstrumentKind) aggregation.Aggregation { // nolint:revive // import-shadow for method scoped by type.
|
||||
return r.aggregationSelector(kind)
|
||||
}
|
||||
|
||||
// collectAndExport gather all metric data related to the periodicReader r from
|
||||
// the SDK and exports it with r's exporter.
|
||||
func (r *periodicReader) collectAndExport(ctx context.Context) error {
|
||||
m, err := r.Collect(ctx)
|
||||
if err == nil {
|
||||
err = r.export(ctx, m)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Collect gathers and returns all metric data related to the Reader from
|
||||
// the SDK. The returned metric data is not exported to the configured
|
||||
// exporter, it is left to the caller to handle that if desired.
|
||||
//
|
||||
// An error is returned if this is called after Shutdown.
|
||||
func (r *periodicReader) Collect(ctx context.Context) (metricdata.ResourceMetrics, error) {
|
||||
return r.collect(ctx, r.producer.Load())
|
||||
}
|
||||
|
||||
// collect unwraps p as a produceHolder and returns its produce results.
|
||||
func (r *periodicReader) collect(ctx context.Context, p interface{}) (metricdata.ResourceMetrics, error) {
|
||||
if p == nil {
|
||||
return metricdata.ResourceMetrics{}, ErrReaderNotRegistered
|
||||
}
|
||||
|
||||
ph, ok := p.(produceHolder)
|
||||
if !ok {
|
||||
// The atomic.Value is entirely in the periodicReader's control so
|
||||
// this should never happen. In the unforeseen case that this does
|
||||
// happen, return an error instead of panicking so a users code does
|
||||
// not halt in the processes.
|
||||
err := fmt.Errorf("periodic reader: invalid producer: %T", p)
|
||||
return metricdata.ResourceMetrics{}, err
|
||||
}
|
||||
return ph.produce(ctx)
|
||||
}
|
||||
|
||||
// export exports metric data m using r's exporter.
|
||||
func (r *periodicReader) export(ctx context.Context, m metricdata.ResourceMetrics) error {
|
||||
c, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
return r.exporter.Export(c, m)
|
||||
}
|
||||
|
||||
// ForceFlush flushes pending telemetry.
|
||||
func (r *periodicReader) ForceFlush(ctx context.Context) error {
|
||||
errCh := make(chan error, 1)
|
||||
select {
|
||||
case r.flushCh <- errCh:
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
close(errCh)
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
case <-r.done:
|
||||
return ErrReaderShutdown
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return r.exporter.ForceFlush(ctx)
|
||||
}
|
||||
|
||||
// Shutdown flushes pending telemetry and then stops the export pipeline.
|
||||
func (r *periodicReader) Shutdown(ctx context.Context) error {
|
||||
err := ErrReaderShutdown
|
||||
r.shutdownOnce.Do(func() {
|
||||
// Stop the run loop.
|
||||
r.cancel()
|
||||
<-r.done
|
||||
|
||||
// Any future call to Collect will now return ErrReaderShutdown.
|
||||
ph := r.producer.Swap(produceHolder{
|
||||
produce: shutdownProducer{}.produce,
|
||||
})
|
||||
|
||||
if ph != nil { // Reader was registered.
|
||||
// Flush pending telemetry.
|
||||
var m metricdata.ResourceMetrics
|
||||
m, err = r.collect(ctx, ph)
|
||||
if err == nil {
|
||||
err = r.export(ctx, m)
|
||||
}
|
||||
}
|
||||
|
||||
sErr := r.exporter.Shutdown(ctx)
|
||||
if err == nil || err == ErrReaderShutdown {
|
||||
err = sErr
|
||||
}
|
||||
})
|
||||
return err
|
||||
}
|
485
vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
generated
vendored
Normal file
485
vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
generated
vendored
Normal file
@ -0,0 +1,485 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/metric/view"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
var (
|
||||
errCreatingAggregators = errors.New("could not create all aggregators")
|
||||
errIncompatibleAggregation = errors.New("incompatible aggregation")
|
||||
errUnknownAggregation = errors.New("unrecognized aggregation")
|
||||
errUnknownTemporality = errors.New("unrecognized temporality")
|
||||
)
|
||||
|
||||
type aggregator interface {
|
||||
Aggregation() metricdata.Aggregation
|
||||
}
|
||||
|
||||
// instrumentSync is a synchronization point between a pipeline and an
|
||||
// instrument's Aggregators.
|
||||
type instrumentSync struct {
|
||||
name string
|
||||
description string
|
||||
unit unit.Unit
|
||||
aggregator aggregator
|
||||
}
|
||||
|
||||
func newPipeline(res *resource.Resource, reader Reader, views []view.View) *pipeline {
|
||||
if res == nil {
|
||||
res = resource.Empty()
|
||||
}
|
||||
return &pipeline{
|
||||
resource: res,
|
||||
reader: reader,
|
||||
views: views,
|
||||
aggregations: make(map[instrumentation.Scope][]instrumentSync),
|
||||
}
|
||||
}
|
||||
|
||||
// pipeline connects all of the instruments created by a meter provider to a Reader.
|
||||
// This is the object that will be `Reader.register()` when a meter provider is created.
|
||||
//
|
||||
// As instruments are created the instrument should be checked if it exists in the
|
||||
// views of a the Reader, and if so each aggregator should be added to the pipeline.
|
||||
type pipeline struct {
|
||||
resource *resource.Resource
|
||||
|
||||
reader Reader
|
||||
views []view.View
|
||||
|
||||
sync.Mutex
|
||||
aggregations map[instrumentation.Scope][]instrumentSync
|
||||
callbacks []func(context.Context)
|
||||
}
|
||||
|
||||
// addSync adds the instrumentSync to pipeline p with scope. This method is not
|
||||
// idempotent. Duplicate calls will result in duplicate additions, it is the
|
||||
// callers responsibility to ensure this is called with unique values.
|
||||
func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if p.aggregations == nil {
|
||||
p.aggregations = map[instrumentation.Scope][]instrumentSync{
|
||||
scope: {iSync},
|
||||
}
|
||||
return
|
||||
}
|
||||
p.aggregations[scope] = append(p.aggregations[scope], iSync)
|
||||
}
|
||||
|
||||
// addCallback registers a callback to be run when `produce()` is called.
|
||||
func (p *pipeline) addCallback(callback func(context.Context)) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.callbacks = append(p.callbacks, callback)
|
||||
}
|
||||
|
||||
// callbackKey is a context key type used to identify context that came from the SDK.
|
||||
type callbackKey int
|
||||
|
||||
// produceKey is the context key to tell if a Observe is called within a callback.
|
||||
// Its value of zero is arbitrary. If this package defined other context keys,
|
||||
// they would have different integer values.
|
||||
const produceKey callbackKey = 0
|
||||
|
||||
// produce returns aggregated metrics from a single collection.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (p *pipeline) produce(ctx context.Context) (metricdata.ResourceMetrics, error) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
ctx = context.WithValue(ctx, produceKey, struct{}{})
|
||||
|
||||
for _, callback := range p.callbacks {
|
||||
// TODO make the callbacks parallel. ( #3034 )
|
||||
callback(ctx)
|
||||
if err := ctx.Err(); err != nil {
|
||||
// This means the context expired before we finished running callbacks.
|
||||
return metricdata.ResourceMetrics{}, err
|
||||
}
|
||||
}
|
||||
|
||||
sm := make([]metricdata.ScopeMetrics, 0, len(p.aggregations))
|
||||
for scope, instruments := range p.aggregations {
|
||||
metrics := make([]metricdata.Metrics, 0, len(instruments))
|
||||
for _, inst := range instruments {
|
||||
data := inst.aggregator.Aggregation()
|
||||
if data != nil {
|
||||
metrics = append(metrics, metricdata.Metrics{
|
||||
Name: inst.name,
|
||||
Description: inst.description,
|
||||
Unit: inst.unit,
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(metrics) > 0 {
|
||||
sm = append(sm, metricdata.ScopeMetrics{
|
||||
Scope: scope,
|
||||
Metrics: metrics,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return metricdata.ResourceMetrics{
|
||||
Resource: p.resource,
|
||||
ScopeMetrics: sm,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// inserter facilitates inserting of new instruments into a pipeline.
|
||||
type inserter[N int64 | float64] struct {
|
||||
cache instrumentCache[N]
|
||||
pipeline *pipeline
|
||||
}
|
||||
|
||||
func newInserter[N int64 | float64](p *pipeline, c instrumentCache[N]) *inserter[N] {
|
||||
return &inserter[N]{cache: c, pipeline: p}
|
||||
}
|
||||
|
||||
// Instrument inserts the instrument inst with instUnit into a pipeline. All
|
||||
// views the pipeline contains are matched against, and any matching view that
|
||||
// creates a unique Aggregator will be inserted into the pipeline and included
|
||||
// in the returned slice.
|
||||
//
|
||||
// The returned Aggregators are ensured to be deduplicated and unique. If
|
||||
// another view in another pipeline that is cached by this inserter's cache has
|
||||
// already inserted the same Aggregator for the same instrument, that
|
||||
// Aggregator instance is returned.
|
||||
//
|
||||
// If another instrument has already been inserted by this inserter, or any
|
||||
// other using the same cache, and it conflicts with the instrument being
|
||||
// inserted in this call, an Aggregator matching the arguments will still be
|
||||
// returned but an Info level log message will also be logged to the OTel
|
||||
// global logger.
|
||||
//
|
||||
// If the passed instrument would result in an incompatible Aggregator, an
|
||||
// error is returned and that Aggregator is not inserted or returned.
|
||||
//
|
||||
// If an instrument is determined to use a Drop aggregation, that instrument is
|
||||
// not inserted nor returned.
|
||||
func (i *inserter[N]) Instrument(inst view.Instrument, instUnit unit.Unit) ([]internal.Aggregator[N], error) {
|
||||
var (
|
||||
matched bool
|
||||
aggs []internal.Aggregator[N]
|
||||
)
|
||||
|
||||
errs := &multierror{wrapped: errCreatingAggregators}
|
||||
// The cache will return the same Aggregator instance. Use this fact to
|
||||
// compare pointer addresses to deduplicate Aggregators.
|
||||
seen := make(map[internal.Aggregator[N]]struct{})
|
||||
for _, v := range i.pipeline.views {
|
||||
inst, match := v.TransformInstrument(inst)
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
matched = true
|
||||
|
||||
agg, err := i.cachedAggregator(inst, instUnit)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
if agg == nil { // Drop aggregator.
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[agg]; ok {
|
||||
// This aggregator has already been added.
|
||||
continue
|
||||
}
|
||||
seen[agg] = struct{}{}
|
||||
aggs = append(aggs, agg)
|
||||
}
|
||||
|
||||
if matched {
|
||||
return aggs, errs.errorOrNil()
|
||||
}
|
||||
|
||||
// Apply implicit default view if no explicit matched.
|
||||
agg, err := i.cachedAggregator(inst, instUnit)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
if agg != nil {
|
||||
// Ensured to have not seen given matched was false.
|
||||
aggs = append(aggs, agg)
|
||||
}
|
||||
return aggs, errs.errorOrNil()
|
||||
}
|
||||
|
||||
// cachedAggregator returns the appropriate Aggregator for an instrument
|
||||
// configuration. If the exact instrument has been created within the
|
||||
// inst.Scope, that Aggregator instance will be returned. Otherwise, a new
|
||||
// computed Aggregator will be cached and returned.
|
||||
//
|
||||
// If the instrument configuration conflicts with an instrument that has
|
||||
// already been created (e.g. description, unit, data type) a warning will be
|
||||
// logged at the "Info" level with the global OTel logger. A valid new
|
||||
// Aggregator for the instrument configuration will still be returned without
|
||||
// an error.
|
||||
//
|
||||
// If the instrument defines an unknown or incompatible aggregation, an error
|
||||
// is returned.
|
||||
func (i *inserter[N]) cachedAggregator(inst view.Instrument, u unit.Unit) (internal.Aggregator[N], error) {
|
||||
switch inst.Aggregation.(type) {
|
||||
case nil, aggregation.Default:
|
||||
// Undefined, nil, means to use the default from the reader.
|
||||
inst.Aggregation = i.pipeline.reader.aggregation(inst.Kind)
|
||||
}
|
||||
|
||||
if err := isAggregatorCompatible(inst.Kind, inst.Aggregation); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"creating aggregator with instrumentKind: %d, aggregation %v: %w",
|
||||
inst.Kind, inst.Aggregation, err,
|
||||
)
|
||||
}
|
||||
|
||||
id := i.instrumentID(inst, u)
|
||||
// If there is a conflict, the specification says the view should
|
||||
// still be applied and a warning should be logged.
|
||||
i.logConflict(id)
|
||||
return i.cache.LookupAggregator(id, func() (internal.Aggregator[N], error) {
|
||||
agg, err := i.aggregator(inst.Aggregation, inst.Kind, id.Temporality, id.Monotonic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if agg == nil { // Drop aggregator.
|
||||
return nil, nil
|
||||
}
|
||||
i.pipeline.addSync(inst.Scope, instrumentSync{
|
||||
name: inst.Name,
|
||||
description: inst.Description,
|
||||
unit: u,
|
||||
aggregator: agg,
|
||||
})
|
||||
return agg, err
|
||||
})
|
||||
}
|
||||
|
||||
// logConflict validates if an instrument with the same name as id has already
|
||||
// been created. If that instrument conflicts with id, a warning is logged.
|
||||
func (i *inserter[N]) logConflict(id instrumentID) {
|
||||
existing, unique := i.cache.Unique(id)
|
||||
if unique {
|
||||
return
|
||||
}
|
||||
|
||||
global.Info(
|
||||
"duplicate metric stream definitions",
|
||||
"names", fmt.Sprintf("%q, %q", existing.Name, id.Name),
|
||||
"descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description),
|
||||
"units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit),
|
||||
"numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number),
|
||||
"aggregations", fmt.Sprintf("%s, %s", existing.Aggregation, id.Aggregation),
|
||||
"monotonics", fmt.Sprintf("%t, %t", existing.Monotonic, id.Monotonic),
|
||||
"temporalities", fmt.Sprintf("%s, %s", existing.Temporality.String(), id.Temporality.String()),
|
||||
)
|
||||
}
|
||||
|
||||
func (i *inserter[N]) instrumentID(vi view.Instrument, u unit.Unit) instrumentID {
|
||||
var zero N
|
||||
id := instrumentID{
|
||||
Name: vi.Name,
|
||||
Description: vi.Description,
|
||||
Unit: u,
|
||||
Aggregation: fmt.Sprintf("%T", vi.Aggregation),
|
||||
Temporality: i.pipeline.reader.temporality(vi.Kind),
|
||||
Number: fmt.Sprintf("%T", zero),
|
||||
}
|
||||
|
||||
switch vi.Kind {
|
||||
case view.AsyncCounter, view.SyncCounter, view.SyncHistogram:
|
||||
id.Monotonic = true
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
// aggregator returns a new Aggregator matching agg, kind, temporality, and
|
||||
// monotonic. If the agg is unknown or temporality is invalid, an error is
|
||||
// returned.
|
||||
func (i *inserter[N]) aggregator(agg aggregation.Aggregation, kind view.InstrumentKind, temporality metricdata.Temporality, monotonic bool) (internal.Aggregator[N], error) {
|
||||
switch a := agg.(type) {
|
||||
case aggregation.Drop:
|
||||
return nil, nil
|
||||
case aggregation.LastValue:
|
||||
return internal.NewLastValue[N](), nil
|
||||
case aggregation.Sum:
|
||||
switch kind {
|
||||
case view.AsyncCounter, view.AsyncUpDownCounter:
|
||||
// Asynchronous counters and up-down-counters are defined to record
|
||||
// the absolute value of the count:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#asynchronous-counter-creation
|
||||
switch temporality {
|
||||
case metricdata.CumulativeTemporality:
|
||||
return internal.NewPrecomputedCumulativeSum[N](monotonic), nil
|
||||
case metricdata.DeltaTemporality:
|
||||
return internal.NewPrecomputedDeltaSum[N](monotonic), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %s(%d)", errUnknownTemporality, temporality.String(), temporality)
|
||||
}
|
||||
}
|
||||
|
||||
switch temporality {
|
||||
case metricdata.CumulativeTemporality:
|
||||
return internal.NewCumulativeSum[N](monotonic), nil
|
||||
case metricdata.DeltaTemporality:
|
||||
return internal.NewDeltaSum[N](monotonic), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %s(%d)", errUnknownTemporality, temporality.String(), temporality)
|
||||
}
|
||||
case aggregation.ExplicitBucketHistogram:
|
||||
switch temporality {
|
||||
case metricdata.CumulativeTemporality:
|
||||
return internal.NewCumulativeHistogram[N](a), nil
|
||||
case metricdata.DeltaTemporality:
|
||||
return internal.NewDeltaHistogram[N](a), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %s(%d)", errUnknownTemporality, temporality.String(), temporality)
|
||||
}
|
||||
}
|
||||
return nil, errUnknownAggregation
|
||||
}
|
||||
|
||||
// isAggregatorCompatible checks if the aggregation can be used by the instrument.
|
||||
// Current compatibility:
|
||||
//
|
||||
// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram |
|
||||
// |----------------------|------|-----------|-----|-----------|-----------------------|
|
||||
// | Sync Counter | X | | X | X | X |
|
||||
// | Sync UpDown Counter | X | | X | | |
|
||||
// | Sync Histogram | X | | X | X | X |
|
||||
// | Async Counter | X | | X | | |
|
||||
// | Async UpDown Counter | X | | X | | |
|
||||
// | Async Gauge | X | X | | | |.
|
||||
func isAggregatorCompatible(kind view.InstrumentKind, agg aggregation.Aggregation) error {
|
||||
switch agg.(type) {
|
||||
case aggregation.ExplicitBucketHistogram:
|
||||
if kind == view.SyncCounter || kind == view.SyncHistogram {
|
||||
return nil
|
||||
}
|
||||
// TODO: review need for aggregation check after
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/issues/2710
|
||||
return errIncompatibleAggregation
|
||||
case aggregation.Sum:
|
||||
switch kind {
|
||||
case view.AsyncCounter, view.AsyncUpDownCounter, view.SyncCounter, view.SyncHistogram, view.SyncUpDownCounter:
|
||||
return nil
|
||||
default:
|
||||
// TODO: review need for aggregation check after
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/issues/2710
|
||||
return errIncompatibleAggregation
|
||||
}
|
||||
case aggregation.LastValue:
|
||||
if kind == view.AsyncGauge {
|
||||
return nil
|
||||
}
|
||||
// TODO: review need for aggregation check after
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/issues/2710
|
||||
return errIncompatibleAggregation
|
||||
case aggregation.Drop:
|
||||
return nil
|
||||
default:
|
||||
// This is used passed checking for default, it should be an error at this point.
|
||||
return fmt.Errorf("%w: %v", errUnknownAggregation, agg)
|
||||
}
|
||||
}
|
||||
|
||||
// pipelines is the group of pipelines connecting Readers with instrument
|
||||
// measurement.
|
||||
type pipelines []*pipeline
|
||||
|
||||
func newPipelines(res *resource.Resource, readers map[Reader][]view.View) pipelines {
|
||||
pipes := make([]*pipeline, 0, len(readers))
|
||||
for r, v := range readers {
|
||||
p := &pipeline{
|
||||
resource: res,
|
||||
reader: r,
|
||||
views: v,
|
||||
}
|
||||
r.register(p)
|
||||
pipes = append(pipes, p)
|
||||
}
|
||||
return pipes
|
||||
}
|
||||
|
||||
// TODO (#3053) Only register callbacks if any instrument matches in a view.
|
||||
func (p pipelines) registerCallback(fn func(context.Context)) {
|
||||
for _, pipe := range p {
|
||||
pipe.addCallback(fn)
|
||||
}
|
||||
}
|
||||
|
||||
// resolver facilitates resolving Aggregators an instrument needs to aggregate
|
||||
// measurements with while updating all pipelines that need to pull from those
|
||||
// aggregations.
|
||||
type resolver[N int64 | float64] struct {
|
||||
inserters []*inserter[N]
|
||||
}
|
||||
|
||||
func newResolver[N int64 | float64](p pipelines, c instrumentCache[N]) resolver[N] {
|
||||
in := make([]*inserter[N], len(p))
|
||||
for i := range in {
|
||||
in[i] = newInserter(p[i], c)
|
||||
}
|
||||
return resolver[N]{in}
|
||||
}
|
||||
|
||||
// Aggregators returns the Aggregators instrument inst needs to update when it
|
||||
// makes a measurement.
|
||||
func (r resolver[N]) Aggregators(inst view.Instrument, instUnit unit.Unit) ([]internal.Aggregator[N], error) {
|
||||
var aggs []internal.Aggregator[N]
|
||||
|
||||
errs := &multierror{}
|
||||
for _, i := range r.inserters {
|
||||
a, err := i.Instrument(inst, instUnit)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
aggs = append(aggs, a...)
|
||||
}
|
||||
return aggs, errs.errorOrNil()
|
||||
}
|
||||
|
||||
type multierror struct {
|
||||
wrapped error
|
||||
errors []string
|
||||
}
|
||||
|
||||
func (m *multierror) errorOrNil() error {
|
||||
if len(m.errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; "))
|
||||
}
|
||||
|
||||
func (m *multierror) append(err error) {
|
||||
m.errors = append(m.errors, err.Error())
|
||||
}
|
383
vendor/go.opentelemetry.io/otel/sdk/metric/processor/basic/basic.go
generated
vendored
383
vendor/go.opentelemetry.io/otel/sdk/metric/processor/basic/basic.go
generated
vendored
@ -1,383 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
type (
|
||||
// Processor is a basic metric processor.
|
||||
Processor struct {
|
||||
aggregation.TemporalitySelector
|
||||
export.AggregatorSelector
|
||||
|
||||
state
|
||||
}
|
||||
|
||||
stateKey struct {
|
||||
// TODO: This code is organized to support multiple
|
||||
// accumulators which could theoretically produce the
|
||||
// data for the same instrument, and this code has
|
||||
// logic to combine data properly from multiple
|
||||
// accumulators. However, the use of
|
||||
// *sdkapi.Descriptor in the stateKey makes such
|
||||
// combination impossible, because each accumulator
|
||||
// allocates its own instruments. This can be fixed
|
||||
// by using the instrument name and kind instead of
|
||||
// the descriptor pointer. See
|
||||
// https://github.com/open-telemetry/opentelemetry-go/issues/862.
|
||||
descriptor *sdkapi.Descriptor
|
||||
distinct attribute.Distinct
|
||||
}
|
||||
|
||||
stateValue struct {
|
||||
// attrs corresponds to the stateKey.distinct field.
|
||||
attrs *attribute.Set
|
||||
|
||||
// updated indicates the last sequence number when this value had
|
||||
// Process() called by an accumulator.
|
||||
updated int64
|
||||
|
||||
// stateful indicates that a cumulative aggregation is
|
||||
// being maintained, taken from the process start time.
|
||||
stateful bool
|
||||
|
||||
// currentOwned indicates that "current" was allocated
|
||||
// by the processor in order to merge results from
|
||||
// multiple Accumulators during a single collection
|
||||
// round, which may happen either because:
|
||||
// (1) multiple Accumulators output the same Accumulation.
|
||||
// (2) one Accumulator is configured with dimensionality reduction.
|
||||
currentOwned bool
|
||||
|
||||
// current refers to the output from a single Accumulator
|
||||
// (if !currentOwned) or it refers to an Aggregator
|
||||
// owned by the processor used to accumulate multiple
|
||||
// values in a single collection round.
|
||||
current aggregator.Aggregator
|
||||
|
||||
// cumulative, if non-nil, refers to an Aggregator owned
|
||||
// by the processor used to store the last cumulative
|
||||
// value.
|
||||
cumulative aggregator.Aggregator
|
||||
}
|
||||
|
||||
state struct {
|
||||
config config
|
||||
|
||||
// RWMutex implements locking for the `Reader` interface.
|
||||
sync.RWMutex
|
||||
values map[stateKey]*stateValue
|
||||
|
||||
processStart time.Time
|
||||
intervalStart time.Time
|
||||
intervalEnd time.Time
|
||||
|
||||
// startedCollection and finishedCollection are the
|
||||
// number of StartCollection() and FinishCollection()
|
||||
// calls, used to ensure that the sequence of starts
|
||||
// and finishes are correctly balanced.
|
||||
|
||||
startedCollection int64
|
||||
finishedCollection int64
|
||||
}
|
||||
)
|
||||
|
||||
var _ export.Processor = &Processor{}
|
||||
var _ export.Checkpointer = &Processor{}
|
||||
var _ export.Reader = &state{}
|
||||
|
||||
// ErrInconsistentState is returned when the sequence of collection's starts and finishes are incorrectly balanced.
|
||||
var ErrInconsistentState = fmt.Errorf("inconsistent processor state")
|
||||
|
||||
// ErrInvalidTemporality is returned for unknown metric.Temporality.
|
||||
var ErrInvalidTemporality = fmt.Errorf("invalid aggregation temporality")
|
||||
|
||||
// New returns a basic Processor that is also a Checkpointer using the provided
|
||||
// AggregatorSelector to select Aggregators. The TemporalitySelector
|
||||
// is consulted to determine the kind(s) of exporter that will consume
|
||||
// data, so that this Processor can prepare to compute Cumulative Aggregations
|
||||
// as needed.
|
||||
func New(aselector export.AggregatorSelector, tselector aggregation.TemporalitySelector, opts ...Option) *Processor {
|
||||
return NewFactory(aselector, tselector, opts...).NewCheckpointer().(*Processor)
|
||||
}
|
||||
|
||||
type factory struct {
|
||||
aselector export.AggregatorSelector
|
||||
tselector aggregation.TemporalitySelector
|
||||
config config
|
||||
}
|
||||
|
||||
// NewFactory returns a new basic CheckpointerFactory.
|
||||
func NewFactory(aselector export.AggregatorSelector, tselector aggregation.TemporalitySelector, opts ...Option) export.CheckpointerFactory {
|
||||
var config config
|
||||
for _, opt := range opts {
|
||||
config = opt.applyProcessor(config)
|
||||
}
|
||||
return factory{
|
||||
aselector: aselector,
|
||||
tselector: tselector,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
var _ export.CheckpointerFactory = factory{}
|
||||
|
||||
func (f factory) NewCheckpointer() export.Checkpointer {
|
||||
now := time.Now()
|
||||
p := &Processor{
|
||||
AggregatorSelector: f.aselector,
|
||||
TemporalitySelector: f.tselector,
|
||||
state: state{
|
||||
values: map[stateKey]*stateValue{},
|
||||
processStart: now,
|
||||
intervalStart: now,
|
||||
config: f.config,
|
||||
},
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Process implements export.Processor.
|
||||
func (b *Processor) Process(accum export.Accumulation) error {
|
||||
if b.startedCollection != b.finishedCollection+1 {
|
||||
return ErrInconsistentState
|
||||
}
|
||||
desc := accum.Descriptor()
|
||||
key := stateKey{
|
||||
descriptor: desc,
|
||||
distinct: accum.Attributes().Equivalent(),
|
||||
}
|
||||
agg := accum.Aggregator()
|
||||
|
||||
// Check if there is an existing value.
|
||||
value, ok := b.state.values[key]
|
||||
if !ok {
|
||||
stateful := b.TemporalityFor(desc, agg.Aggregation().Kind()).MemoryRequired(desc.InstrumentKind())
|
||||
|
||||
newValue := &stateValue{
|
||||
attrs: accum.Attributes(),
|
||||
updated: b.state.finishedCollection,
|
||||
stateful: stateful,
|
||||
current: agg,
|
||||
}
|
||||
if stateful {
|
||||
if desc.InstrumentKind().PrecomputedSum() {
|
||||
// To convert precomputed sums to
|
||||
// deltas requires two aggregators to
|
||||
// be allocated, one for the prior
|
||||
// value and one for the output delta.
|
||||
// This functionality was removed from
|
||||
// the basic processor in PR #2350.
|
||||
return aggregation.ErrNoCumulativeToDelta
|
||||
}
|
||||
// In this case allocate one aggregator to
|
||||
// save the current state.
|
||||
b.AggregatorFor(desc, &newValue.cumulative)
|
||||
}
|
||||
b.state.values[key] = newValue
|
||||
return nil
|
||||
}
|
||||
|
||||
// Advance the update sequence number.
|
||||
sameCollection := b.state.finishedCollection == value.updated
|
||||
value.updated = b.state.finishedCollection
|
||||
|
||||
// At this point in the code, we have located an existing
|
||||
// value for some stateKey. This can be because:
|
||||
//
|
||||
// (a) stateful aggregation is being used, the entry was
|
||||
// entered during a prior collection, and this is the first
|
||||
// time processing an accumulation for this stateKey in the
|
||||
// current collection. Since this is the first time
|
||||
// processing an accumulation for this stateKey during this
|
||||
// collection, we don't know yet whether there are multiple
|
||||
// accumulators at work. If there are multiple accumulators,
|
||||
// they'll hit case (b) the second time through.
|
||||
//
|
||||
// (b) multiple accumulators are being used, whether stateful
|
||||
// or not.
|
||||
//
|
||||
// Case (a) occurs when the instrument and the exporter
|
||||
// require memory to work correctly, either because the
|
||||
// instrument reports a PrecomputedSum to a DeltaExporter or
|
||||
// the reverse, a non-PrecomputedSum instrument with a
|
||||
// CumulativeExporter. This logic is encapsulated in
|
||||
// Temporality.MemoryRequired(InstrumentKind).
|
||||
//
|
||||
// Case (b) occurs when the variable `sameCollection` is true,
|
||||
// indicating that the stateKey for Accumulation has already
|
||||
// been seen in the same collection. When this happens, it
|
||||
// implies that multiple Accumulators are being used, or that
|
||||
// a single Accumulator has been configured with a attribute key
|
||||
// filter.
|
||||
|
||||
if !sameCollection {
|
||||
if !value.currentOwned {
|
||||
// This is the first Accumulation we've seen
|
||||
// for this stateKey during this collection.
|
||||
// Just keep a reference to the Accumulator's
|
||||
// Aggregator. All the other cases copy
|
||||
// Aggregator state.
|
||||
value.current = agg
|
||||
return nil
|
||||
}
|
||||
return agg.SynchronizedMove(value.current, desc)
|
||||
}
|
||||
|
||||
// If the current is not owned, take ownership of a copy
|
||||
// before merging below.
|
||||
if !value.currentOwned {
|
||||
tmp := value.current
|
||||
b.AggregatorSelector.AggregatorFor(desc, &value.current)
|
||||
value.currentOwned = true
|
||||
if err := tmp.SynchronizedMove(value.current, desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Combine this Accumulation with the prior Accumulation.
|
||||
return value.current.Merge(agg, desc)
|
||||
}
|
||||
|
||||
// Reader returns the associated Reader. Use the
|
||||
// Reader Locker interface to synchronize access to this
|
||||
// object. The Reader.ForEach() method cannot be called
|
||||
// concurrently with Process().
|
||||
func (b *Processor) Reader() export.Reader {
|
||||
return &b.state
|
||||
}
|
||||
|
||||
// StartCollection signals to the Processor one or more Accumulators
|
||||
// will begin calling Process() calls during collection.
|
||||
func (b *Processor) StartCollection() {
|
||||
if b.startedCollection != 0 {
|
||||
b.intervalStart = b.intervalEnd
|
||||
}
|
||||
b.startedCollection++
|
||||
}
|
||||
|
||||
// FinishCollection signals to the Processor that a complete
|
||||
// collection has finished and that ForEach will be called to access
|
||||
// the Reader.
|
||||
func (b *Processor) FinishCollection() error {
|
||||
b.intervalEnd = time.Now()
|
||||
if b.startedCollection != b.finishedCollection+1 {
|
||||
return ErrInconsistentState
|
||||
}
|
||||
defer func() { b.finishedCollection++ }()
|
||||
|
||||
for key, value := range b.values {
|
||||
mkind := key.descriptor.InstrumentKind()
|
||||
stale := value.updated != b.finishedCollection
|
||||
stateless := !value.stateful
|
||||
|
||||
// The following branch updates stateful aggregators. Skip
|
||||
// these updates if the aggregator is not stateful or if the
|
||||
// aggregator is stale.
|
||||
if stale || stateless {
|
||||
// If this processor does not require memeory,
|
||||
// stale, stateless entries can be removed.
|
||||
// This implies that they were not updated
|
||||
// over the previous full collection interval.
|
||||
if stale && stateless && !b.config.Memory {
|
||||
delete(b.values, key)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// The only kind of aggregators that are not stateless
|
||||
// are the ones needing delta to cumulative
|
||||
// conversion. Merge aggregator state in this case.
|
||||
if !mkind.PrecomputedSum() {
|
||||
// This line is equivalent to:
|
||||
// value.cumulative = value.cumulative + value.current
|
||||
if err := value.cumulative.Merge(value.current, key.descriptor); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForEach iterates through the Reader, passing an
|
||||
// export.Record with the appropriate Cumulative or Delta aggregation
|
||||
// to an exporter.
|
||||
func (b *state) ForEach(exporter aggregation.TemporalitySelector, f func(export.Record) error) error {
|
||||
if b.startedCollection != b.finishedCollection {
|
||||
return ErrInconsistentState
|
||||
}
|
||||
for key, value := range b.values {
|
||||
mkind := key.descriptor.InstrumentKind()
|
||||
|
||||
var agg aggregation.Aggregation
|
||||
var start time.Time
|
||||
|
||||
aggTemp := exporter.TemporalityFor(key.descriptor, value.current.Aggregation().Kind())
|
||||
|
||||
switch aggTemp {
|
||||
case aggregation.CumulativeTemporality:
|
||||
// If stateful, the sum has been computed. If stateless, the
|
||||
// input was already cumulative. Either way, use the checkpointed
|
||||
// value:
|
||||
if value.stateful {
|
||||
agg = value.cumulative.Aggregation()
|
||||
} else {
|
||||
agg = value.current.Aggregation()
|
||||
}
|
||||
start = b.processStart
|
||||
|
||||
case aggregation.DeltaTemporality:
|
||||
// Precomputed sums are a special case.
|
||||
if mkind.PrecomputedSum() {
|
||||
// This functionality was removed from
|
||||
// the basic processor in PR #2350.
|
||||
return aggregation.ErrNoCumulativeToDelta
|
||||
}
|
||||
agg = value.current.Aggregation()
|
||||
start = b.intervalStart
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%v: %w", aggTemp, ErrInvalidTemporality)
|
||||
}
|
||||
|
||||
// If the processor does not have Config.Memory and it was not updated
|
||||
// in the prior round, do not visit this value.
|
||||
if !b.config.Memory && value.updated != (b.finishedCollection-1) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := f(export.NewRecord(
|
||||
key.descriptor,
|
||||
value.attrs,
|
||||
agg,
|
||||
start,
|
||||
b.intervalEnd,
|
||||
)); err != nil && !errors.Is(err, aggregation.ErrNoData) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
43
vendor/go.opentelemetry.io/otel/sdk/metric/processor/basic/config.go
generated
vendored
43
vendor/go.opentelemetry.io/otel/sdk/metric/processor/basic/config.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
|
||||
// config contains the options for configuring a basic metric processor.
|
||||
type config struct {
|
||||
// Memory controls whether the processor remembers metric instruments and
|
||||
// attribute sets that were previously reported. When Memory is true,
|
||||
// Reader.ForEach() will visit metrics that were not updated in the most
|
||||
// recent interval.
|
||||
Memory bool
|
||||
}
|
||||
|
||||
// Option configures a basic processor configuration.
|
||||
type Option interface {
|
||||
applyProcessor(config) config
|
||||
}
|
||||
|
||||
// WithMemory sets the memory behavior of a Processor. If this is true, the
|
||||
// processor will report metric instruments and attribute sets that were
|
||||
// previously reported but not updated in the most recent interval.
|
||||
func WithMemory(memory bool) Option {
|
||||
return memoryOption(memory)
|
||||
}
|
||||
|
||||
type memoryOption bool
|
||||
|
||||
func (m memoryOption) applyProcessor(cfg config) config {
|
||||
cfg.Memory = bool(m)
|
||||
return cfg
|
||||
}
|
115
vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
generated
vendored
Normal file
115
vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
)
|
||||
|
||||
// MeterProvider handles the creation and coordination of Meters. All Meters
|
||||
// created by a MeterProvider will be associated with the same Resource, have
|
||||
// the same Views applied to them, and have their produced metric telemetry
|
||||
// passed to the configured Readers.
|
||||
type MeterProvider struct {
|
||||
pipes pipelines
|
||||
meters cache[instrumentation.Scope, *meter]
|
||||
|
||||
forceFlush, shutdown func(context.Context) error
|
||||
}
|
||||
|
||||
// Compile-time check MeterProvider implements metric.MeterProvider.
|
||||
var _ metric.MeterProvider = (*MeterProvider)(nil)
|
||||
|
||||
// NewMeterProvider returns a new and configured MeterProvider.
|
||||
//
|
||||
// By default, the returned MeterProvider is configured with the default
|
||||
// Resource and no Readers. Readers cannot be added after a MeterProvider is
|
||||
// created. This means the returned MeterProvider, one created with no
|
||||
// Readers, will perform no operations.
|
||||
func NewMeterProvider(options ...Option) *MeterProvider {
|
||||
conf := newConfig(options)
|
||||
flush, sdown := conf.readerSignals()
|
||||
return &MeterProvider{
|
||||
pipes: newPipelines(conf.res, conf.readers),
|
||||
forceFlush: flush,
|
||||
shutdown: sdown,
|
||||
}
|
||||
}
|
||||
|
||||
// Meter returns a Meter with the given name and configured with options.
|
||||
//
|
||||
// The name should be the name of the instrumentation scope creating
|
||||
// telemetry. This name may be the same as the instrumented code only if that
|
||||
// code provides built-in instrumentation.
|
||||
//
|
||||
// If name is empty, the default (go.opentelemetry.io/otel/sdk/meter) will be
|
||||
// used.
|
||||
//
|
||||
// Calls to the Meter method after Shutdown has been called will return Meters
|
||||
// that perform no operations.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter {
|
||||
c := metric.NewMeterConfig(options...)
|
||||
s := instrumentation.Scope{
|
||||
Name: name,
|
||||
Version: c.InstrumentationVersion(),
|
||||
SchemaURL: c.SchemaURL(),
|
||||
}
|
||||
return mp.meters.Lookup(s, func() *meter {
|
||||
return newMeter(s, mp.pipes)
|
||||
})
|
||||
}
|
||||
|
||||
// ForceFlush flushes all pending telemetry.
|
||||
//
|
||||
// This method honors the deadline or cancellation of ctx. An appropriate
|
||||
// error will be returned in these situations. There is no guaranteed that all
|
||||
// telemetry be flushed or all resources have been released in these
|
||||
// situations.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (mp *MeterProvider) ForceFlush(ctx context.Context) error {
|
||||
if mp.forceFlush != nil {
|
||||
return mp.forceFlush(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown shuts down the MeterProvider flushing all pending telemetry and
|
||||
// releasing any held computational resources.
|
||||
//
|
||||
// This call is idempotent. The first call will perform all flush and
|
||||
// releasing operations. Subsequent calls will perform no action and will
|
||||
// return an error stating this.
|
||||
//
|
||||
// Measurements made by instruments from meters this MeterProvider created
|
||||
// will not be exported after Shutdown is called.
|
||||
//
|
||||
// This method honors the deadline or cancellation of ctx. An appropriate
|
||||
// error will be returned in these situations. There is no guaranteed that all
|
||||
// telemetry be flushed or all resources have been released in these
|
||||
// situations.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (mp *MeterProvider) Shutdown(ctx context.Context) error {
|
||||
if mp.shutdown != nil {
|
||||
return mp.shutdown(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
213
vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
generated
vendored
Normal file
213
vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
generated
vendored
Normal file
@ -0,0 +1,213 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/metric/view"
|
||||
)
|
||||
|
||||
// errDuplicateRegister is logged by a Reader when an attempt to registered it
|
||||
// more than once occurs.
|
||||
var errDuplicateRegister = fmt.Errorf("duplicate reader registration")
|
||||
|
||||
// ErrReaderNotRegistered is returned if Collect or Shutdown are called before
|
||||
// the reader is registered with a MeterProvider.
|
||||
var ErrReaderNotRegistered = fmt.Errorf("reader is not registered")
|
||||
|
||||
// ErrReaderShutdown is returned if Collect or Shutdown are called after a
|
||||
// reader has been Shutdown once.
|
||||
var ErrReaderShutdown = fmt.Errorf("reader is shutdown")
|
||||
|
||||
// Reader is the interface used between the SDK and an
|
||||
// exporter. Control flow is bi-directional through the
|
||||
// Reader, since the SDK initiates ForceFlush and Shutdown
|
||||
// while the initiates collection. The Register() method here
|
||||
// informs the Reader that it can begin reading, signaling the
|
||||
// start of bi-directional control flow.
|
||||
//
|
||||
// Typically, push-based exporters that are periodic will
|
||||
// implement PeroidicExporter themselves and construct a
|
||||
// PeriodicReader to satisfy this interface.
|
||||
//
|
||||
// Pull-based exporters will typically implement Register
|
||||
// themselves, since they read on demand.
|
||||
type Reader interface {
|
||||
// register registers a Reader with a MeterProvider.
|
||||
// The producer argument allows the Reader to signal the sdk to collect
|
||||
// and send aggregated metric measurements.
|
||||
register(producer)
|
||||
|
||||
// temporality reports the Temporality for the instrument kind provided.
|
||||
temporality(view.InstrumentKind) metricdata.Temporality
|
||||
|
||||
// aggregation returns what Aggregation to use for an instrument kind.
|
||||
aggregation(view.InstrumentKind) aggregation.Aggregation // nolint:revive // import-shadow for method scoped by type.
|
||||
|
||||
// Collect gathers and returns all metric data related to the Reader from
|
||||
// the SDK. An error is returned if this is called after Shutdown.
|
||||
Collect(context.Context) (metricdata.ResourceMetrics, error)
|
||||
|
||||
// ForceFlush flushes all metric measurements held in an export pipeline.
|
||||
//
|
||||
// This deadline or cancellation of the passed context are honored. An appropriate
|
||||
// error will be returned in these situations. There is no guaranteed that all
|
||||
// telemetry be flushed or all resources have been released in these
|
||||
// situations.
|
||||
ForceFlush(context.Context) error
|
||||
|
||||
// Shutdown flushes all metric measurements held in an export pipeline and releases any
|
||||
// held computational resources.
|
||||
//
|
||||
// This deadline or cancellation of the passed context are honored. An appropriate
|
||||
// error will be returned in these situations. There is no guaranteed that all
|
||||
// telemetry be flushed or all resources have been released in these
|
||||
// situations.
|
||||
//
|
||||
// After Shutdown is called, calls to Collect will perform no operation and instead will return
|
||||
// an error indicating the shutdown state.
|
||||
Shutdown(context.Context) error
|
||||
}
|
||||
|
||||
// producer produces metrics for a Reader.
|
||||
type producer interface {
|
||||
// produce returns aggregated metrics from a single collection.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
produce(context.Context) (metricdata.ResourceMetrics, error)
|
||||
}
|
||||
|
||||
// produceHolder is used as an atomic.Value to wrap the non-concrete producer
|
||||
// type.
|
||||
type produceHolder struct {
|
||||
produce func(context.Context) (metricdata.ResourceMetrics, error)
|
||||
}
|
||||
|
||||
// shutdownProducer produces an ErrReaderShutdown error always.
|
||||
type shutdownProducer struct{}
|
||||
|
||||
// produce returns an ErrReaderShutdown error.
|
||||
func (p shutdownProducer) produce(context.Context) (metricdata.ResourceMetrics, error) {
|
||||
return metricdata.ResourceMetrics{}, ErrReaderShutdown
|
||||
}
|
||||
|
||||
// ReaderOption applies a configuration option value to either a ManualReader or
|
||||
// a PeriodicReader.
|
||||
type ReaderOption interface {
|
||||
ManualReaderOption
|
||||
PeriodicReaderOption
|
||||
}
|
||||
|
||||
// TemporalitySelector selects the temporality to use based on the InstrumentKind.
|
||||
type TemporalitySelector func(view.InstrumentKind) metricdata.Temporality
|
||||
|
||||
// DefaultTemporalitySelector is the default TemporalitySelector used if
|
||||
// WithTemporalitySelector is not provided. CumulativeTemporality will be used
|
||||
// for all instrument kinds if this TemporalitySelector is used.
|
||||
func DefaultTemporalitySelector(view.InstrumentKind) metricdata.Temporality {
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
|
||||
// WithTemporalitySelector sets the TemporalitySelector a reader will use to
|
||||
// determine the Temporality of an instrument based on its kind. If this
|
||||
// option is not used, the reader will use the DefaultTemporalitySelector.
|
||||
func WithTemporalitySelector(selector TemporalitySelector) ReaderOption {
|
||||
return temporalitySelectorOption{selector: selector}
|
||||
}
|
||||
|
||||
type temporalitySelectorOption struct {
|
||||
selector func(instrument view.InstrumentKind) metricdata.Temporality
|
||||
}
|
||||
|
||||
// applyManual returns a manualReaderConfig with option applied.
|
||||
func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig {
|
||||
mrc.temporalitySelector = t.selector
|
||||
return mrc
|
||||
}
|
||||
|
||||
// applyPeriodic returns a periodicReaderConfig with option applied.
|
||||
func (t temporalitySelectorOption) applyPeriodic(prc periodicReaderConfig) periodicReaderConfig {
|
||||
prc.temporalitySelector = t.selector
|
||||
return prc
|
||||
}
|
||||
|
||||
// AggregationSelector selects the aggregation and the parameters to use for
|
||||
// that aggregation based on the InstrumentKind.
|
||||
type AggregationSelector func(view.InstrumentKind) aggregation.Aggregation
|
||||
|
||||
// DefaultAggregationSelector returns the default aggregation and parameters
|
||||
// that will be used to summarize measurement made from an instrument of
|
||||
// InstrumentKind. This AggregationSelector using the following selection
|
||||
// mapping: Counter ⇨ Sum, Asynchronous Counter ⇨ Sum, UpDownCounter ⇨ Sum,
|
||||
// Asynchronous UpDownCounter ⇨ Sum, Asynchronous Gauge ⇨ LastValue,
|
||||
// Histogram ⇨ ExplicitBucketHistogram.
|
||||
func DefaultAggregationSelector(ik view.InstrumentKind) aggregation.Aggregation {
|
||||
switch ik {
|
||||
case view.SyncCounter, view.SyncUpDownCounter, view.AsyncCounter, view.AsyncUpDownCounter:
|
||||
return aggregation.Sum{}
|
||||
case view.AsyncGauge:
|
||||
return aggregation.LastValue{}
|
||||
case view.SyncHistogram:
|
||||
return aggregation.ExplicitBucketHistogram{
|
||||
Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000},
|
||||
NoMinMax: false,
|
||||
}
|
||||
}
|
||||
panic("unknown instrument kind")
|
||||
}
|
||||
|
||||
// WithAggregationSelector sets the AggregationSelector a reader will use to
|
||||
// determine the aggregation to use for an instrument based on its kind. If
|
||||
// this option is not used, the reader will use the DefaultAggregationSelector
|
||||
// or the aggregation explicitly passed for a view matching an instrument.
|
||||
func WithAggregationSelector(selector AggregationSelector) ReaderOption {
|
||||
// Deep copy and validate before using.
|
||||
wrapped := func(ik view.InstrumentKind) aggregation.Aggregation {
|
||||
a := selector(ik)
|
||||
cpA := a.Copy()
|
||||
if err := cpA.Err(); err != nil {
|
||||
cpA = DefaultAggregationSelector(ik)
|
||||
global.Error(
|
||||
err, "using default aggregation instead",
|
||||
"aggregation", a,
|
||||
"replacement", cpA,
|
||||
)
|
||||
}
|
||||
return cpA
|
||||
}
|
||||
|
||||
return aggregationSelectorOption{selector: wrapped}
|
||||
}
|
||||
|
||||
type aggregationSelectorOption struct {
|
||||
selector AggregationSelector
|
||||
}
|
||||
|
||||
// applyManual returns a manualReaderConfig with option applied.
|
||||
func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig {
|
||||
c.aggregationSelector = t.selector
|
||||
return c
|
||||
}
|
||||
|
||||
// applyPeriodic returns a periodicReaderConfig with option applied.
|
||||
func (t aggregationSelectorOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig {
|
||||
c.aggregationSelector = t.selector
|
||||
return c
|
||||
}
|
59
vendor/go.opentelemetry.io/otel/sdk/metric/refcount_mapped.go
generated
vendored
59
vendor/go.opentelemetry.io/otel/sdk/metric/refcount_mapped.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// refcountMapped atomically counts the number of references (usages) of an entry
|
||||
// while also keeping a state of mapped/unmapped into a different data structure
|
||||
// (an external map or list for example).
|
||||
//
|
||||
// refcountMapped uses an atomic value where the least significant bit is used to
|
||||
// keep the state of mapping ('1' is used for unmapped and '0' is for mapped) and
|
||||
// the rest of the bits are used for refcounting.
|
||||
type refcountMapped struct {
|
||||
// refcount has to be aligned for 64-bit atomic operations.
|
||||
value int64
|
||||
}
|
||||
|
||||
// ref returns true if the entry is still mapped and increases the
|
||||
// reference usages, if unmapped returns false.
|
||||
func (rm *refcountMapped) ref() bool {
|
||||
// Check if this entry was marked as unmapped between the moment
|
||||
// we got a reference to it (or will be removed very soon) and here.
|
||||
return atomic.AddInt64(&rm.value, 2)&1 == 0
|
||||
}
|
||||
|
||||
func (rm *refcountMapped) unref() {
|
||||
atomic.AddInt64(&rm.value, -2)
|
||||
}
|
||||
|
||||
// tryUnmap flips the mapped bit to "unmapped" state and returns true if both of the
|
||||
// following conditions are true upon entry to this function:
|
||||
// * There are no active references;
|
||||
// * The mapped bit is in "mapped" state.
|
||||
// Otherwise no changes are done to mapped bit and false is returned.
|
||||
func (rm *refcountMapped) tryUnmap() bool {
|
||||
if atomic.LoadInt64(&rm.value) != 0 {
|
||||
return false
|
||||
}
|
||||
return atomic.CompareAndSwapInt64(
|
||||
&rm.value,
|
||||
0,
|
||||
1,
|
||||
)
|
||||
}
|
139
vendor/go.opentelemetry.io/otel/sdk/metric/registry/registry.go
generated
vendored
139
vendor/go.opentelemetry.io/otel/sdk/metric/registry/registry.go
generated
vendored
@ -1,139 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package registry // import "go.opentelemetry.io/otel/sdk/metric/registry"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
|
||||
// uniqueness checking for instrument descriptors.
|
||||
type UniqueInstrumentMeterImpl struct {
|
||||
lock sync.Mutex
|
||||
impl sdkapi.MeterImpl
|
||||
state map[string]sdkapi.InstrumentImpl
|
||||
}
|
||||
|
||||
var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil)
|
||||
|
||||
// ErrMetricKindMismatch is the standard error for mismatched metric
|
||||
// instrument definitions.
|
||||
var ErrMetricKindMismatch = fmt.Errorf(
|
||||
"a metric was already registered by this name with another kind or number type")
|
||||
|
||||
// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl
|
||||
// with the addition of instrument name uniqueness checking.
|
||||
func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl {
|
||||
return &UniqueInstrumentMeterImpl{
|
||||
impl: impl,
|
||||
state: map[string]sdkapi.InstrumentImpl{},
|
||||
}
|
||||
}
|
||||
|
||||
// MeterImpl gives the caller access to the underlying MeterImpl
|
||||
// used by this UniqueInstrumentMeterImpl.
|
||||
func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl {
|
||||
return u.impl
|
||||
}
|
||||
|
||||
// NewMetricKindMismatchError formats an error that describes a
|
||||
// mismatched metric instrument definition.
|
||||
func NewMetricKindMismatchError(desc sdkapi.Descriptor) error {
|
||||
return fmt.Errorf("metric %s registered as %s %s: %w",
|
||||
desc.Name(),
|
||||
desc.NumberKind(),
|
||||
desc.InstrumentKind(),
|
||||
ErrMetricKindMismatch)
|
||||
}
|
||||
|
||||
// Compatible determines whether two sdkapi.Descriptors are considered
|
||||
// the same for the purpose of uniqueness checking.
|
||||
func Compatible(candidate, existing sdkapi.Descriptor) bool {
|
||||
return candidate.InstrumentKind() == existing.InstrumentKind() &&
|
||||
candidate.NumberKind() == existing.NumberKind()
|
||||
}
|
||||
|
||||
// checkUniqueness returns an ErrMetricKindMismatch error if there is
|
||||
// a conflict between a descriptor that was already registered and the
|
||||
// `descriptor` argument. If there is an existing compatible
|
||||
// registration, this returns the already-registered instrument. If
|
||||
// there is no conflict and no prior registration, returns (nil, nil).
|
||||
func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) {
|
||||
impl, ok := u.state[descriptor.Name()]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if !Compatible(descriptor, impl.Descriptor()) {
|
||||
return nil, NewMetricKindMismatchError(impl.Descriptor())
|
||||
}
|
||||
|
||||
return impl, nil
|
||||
}
|
||||
|
||||
// NewSyncInstrument implements sdkapi.MeterImpl.
|
||||
func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) {
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
|
||||
impl, err := u.checkUniqueness(descriptor)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if impl != nil {
|
||||
return impl.(sdkapi.SyncImpl), nil
|
||||
}
|
||||
|
||||
syncInst, err := u.impl.NewSyncInstrument(descriptor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.state[descriptor.Name()] = syncInst
|
||||
return syncInst, nil
|
||||
}
|
||||
|
||||
// NewAsyncInstrument implements sdkapi.MeterImpl.
|
||||
func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.AsyncImpl, error) {
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
|
||||
impl, err := u.checkUniqueness(descriptor)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if impl != nil {
|
||||
return impl.(sdkapi.AsyncImpl), nil
|
||||
}
|
||||
|
||||
asyncInst, err := u.impl.NewAsyncInstrument(descriptor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.state[descriptor.Name()] = asyncInst
|
||||
return asyncInst, nil
|
||||
}
|
||||
|
||||
// RegisterCallback registers callback with insts.
|
||||
func (u *UniqueInstrumentMeterImpl) RegisterCallback(insts []instrument.Asynchronous, callback func(context.Context)) error {
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
|
||||
return u.impl.RegisterCallback(insts, callback)
|
||||
}
|
423
vendor/go.opentelemetry.io/otel/sdk/metric/sdk.go
generated
vendored
423
vendor/go.opentelemetry.io/otel/sdk/metric/sdk.go
generated
vendored
@ -1,423 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
type (
|
||||
// Accumulator implements the OpenTelemetry Meter API. The
|
||||
// Accumulator is bound to a single export.Processor in
|
||||
// `NewAccumulator()`.
|
||||
//
|
||||
// The Accumulator supports a Collect() API to gather and export
|
||||
// current data. Collect() should be arranged according to
|
||||
// the processor model. Push-based processors will setup a
|
||||
// timer to call Collect() periodically. Pull-based processors
|
||||
// will call Collect() when a pull request arrives.
|
||||
Accumulator struct {
|
||||
// current maps `mapkey` to *record.
|
||||
current sync.Map
|
||||
|
||||
callbackLock sync.Mutex
|
||||
callbacks map[*callback]struct{}
|
||||
|
||||
// currentEpoch is the current epoch number. It is
|
||||
// incremented in `Collect()`.
|
||||
currentEpoch int64
|
||||
|
||||
// processor is the configured processor+configuration.
|
||||
processor export.Processor
|
||||
|
||||
// collectLock prevents simultaneous calls to Collect().
|
||||
collectLock sync.Mutex
|
||||
}
|
||||
|
||||
callback struct {
|
||||
insts map[*asyncInstrument]struct{}
|
||||
f func(context.Context)
|
||||
}
|
||||
|
||||
asyncContextKey struct{}
|
||||
|
||||
asyncInstrument struct {
|
||||
baseInstrument
|
||||
instrument.Asynchronous
|
||||
}
|
||||
|
||||
syncInstrument struct {
|
||||
baseInstrument
|
||||
instrument.Synchronous
|
||||
}
|
||||
|
||||
// mapkey uniquely describes a metric instrument in terms of its
|
||||
// InstrumentID and the encoded form of its attributes.
|
||||
mapkey struct {
|
||||
descriptor *sdkapi.Descriptor
|
||||
ordered attribute.Distinct
|
||||
}
|
||||
|
||||
// record maintains the state of one metric instrument. Due
|
||||
// the use of lock-free algorithms, there may be more than one
|
||||
// `record` in existence at a time, although at most one can
|
||||
// be referenced from the `Accumulator.current` map.
|
||||
record struct {
|
||||
// refMapped keeps track of refcounts and the mapping state to the
|
||||
// Accumulator.current map.
|
||||
refMapped refcountMapped
|
||||
|
||||
// updateCount is incremented on every Update.
|
||||
updateCount int64
|
||||
|
||||
// collectedCount is set to updateCount on collection,
|
||||
// supports checking for no updates during a round.
|
||||
collectedCount int64
|
||||
|
||||
// attrs is the stored attribute set for this record, except in cases
|
||||
// where a attribute set is shared due to batch recording.
|
||||
attrs attribute.Set
|
||||
|
||||
// sortSlice has a single purpose - as a temporary place for sorting
|
||||
// during attributes creation to avoid allocation.
|
||||
sortSlice attribute.Sortable
|
||||
|
||||
// inst is a pointer to the corresponding instrument.
|
||||
inst *baseInstrument
|
||||
|
||||
// current implements the actual RecordOne() API,
|
||||
// depending on the type of aggregation. If nil, the
|
||||
// metric was disabled by the exporter.
|
||||
current aggregator.Aggregator
|
||||
checkpoint aggregator.Aggregator
|
||||
}
|
||||
|
||||
baseInstrument struct {
|
||||
meter *Accumulator
|
||||
descriptor sdkapi.Descriptor
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
_ sdkapi.MeterImpl = &Accumulator{}
|
||||
|
||||
// ErrUninitializedInstrument is returned when an instrument is used when uninitialized.
|
||||
ErrUninitializedInstrument = fmt.Errorf("use of an uninitialized instrument")
|
||||
|
||||
// ErrBadInstrument is returned when an instrument from another SDK is
|
||||
// attempted to be registered with this SDK.
|
||||
ErrBadInstrument = fmt.Errorf("use of a instrument from another SDK")
|
||||
)
|
||||
|
||||
func (b *baseInstrument) Descriptor() sdkapi.Descriptor {
|
||||
return b.descriptor
|
||||
}
|
||||
|
||||
func (a *asyncInstrument) Implementation() interface{} {
|
||||
return a
|
||||
}
|
||||
|
||||
func (s *syncInstrument) Implementation() interface{} {
|
||||
return s
|
||||
}
|
||||
|
||||
// acquireHandle gets or creates a `*record` corresponding to `kvs`,
|
||||
// the input attributes.
|
||||
func (b *baseInstrument) acquireHandle(kvs []attribute.KeyValue) *record {
|
||||
// This memory allocation may not be used, but it's
|
||||
// needed for the `sortSlice` field, to avoid an
|
||||
// allocation while sorting.
|
||||
rec := &record{}
|
||||
rec.attrs = attribute.NewSetWithSortable(kvs, &rec.sortSlice)
|
||||
|
||||
// Create lookup key for sync.Map (one allocation, as this
|
||||
// passes through an interface{})
|
||||
mk := mapkey{
|
||||
descriptor: &b.descriptor,
|
||||
ordered: rec.attrs.Equivalent(),
|
||||
}
|
||||
|
||||
if actual, ok := b.meter.current.Load(mk); ok {
|
||||
// Existing record case.
|
||||
existingRec := actual.(*record)
|
||||
if existingRec.refMapped.ref() {
|
||||
// At this moment it is guaranteed that the entry is in
|
||||
// the map and will not be removed.
|
||||
return existingRec
|
||||
}
|
||||
// This entry is no longer mapped, try to add a new entry.
|
||||
}
|
||||
|
||||
rec.refMapped = refcountMapped{value: 2}
|
||||
rec.inst = b
|
||||
|
||||
b.meter.processor.AggregatorFor(&b.descriptor, &rec.current, &rec.checkpoint)
|
||||
|
||||
for {
|
||||
// Load/Store: there's a memory allocation to place `mk` into
|
||||
// an interface here.
|
||||
if actual, loaded := b.meter.current.LoadOrStore(mk, rec); loaded {
|
||||
// Existing record case. Cannot change rec here because if fail
|
||||
// will try to add rec again to avoid new allocations.
|
||||
oldRec := actual.(*record)
|
||||
if oldRec.refMapped.ref() {
|
||||
// At this moment it is guaranteed that the entry is in
|
||||
// the map and will not be removed.
|
||||
return oldRec
|
||||
}
|
||||
// This loaded entry is marked as unmapped (so Collect will remove
|
||||
// it from the map immediately), try again - this is a busy waiting
|
||||
// strategy to wait until Collect() removes this entry from the map.
|
||||
//
|
||||
// This can be improved by having a list of "Unmapped" entries for
|
||||
// one time only usages, OR we can make this a blocking path and use
|
||||
// a Mutex that protects the delete operation (delete only if the old
|
||||
// record is associated with the key).
|
||||
|
||||
// Let collector get work done to remove the entry from the map.
|
||||
runtime.Gosched()
|
||||
continue
|
||||
}
|
||||
// The new entry was added to the map, good to go.
|
||||
return rec
|
||||
}
|
||||
}
|
||||
|
||||
// RecordOne captures a single synchronous metric event.
|
||||
//
|
||||
// The order of the input array `kvs` may be sorted after the function is called.
|
||||
func (s *syncInstrument) RecordOne(ctx context.Context, num number.Number, kvs []attribute.KeyValue) {
|
||||
h := s.acquireHandle(kvs)
|
||||
defer h.unbind()
|
||||
h.captureOne(ctx, num)
|
||||
}
|
||||
|
||||
// ObserveOne captures a single asynchronous metric event.
|
||||
|
||||
// The order of the input array `kvs` may be sorted after the function is called.
|
||||
func (a *asyncInstrument) ObserveOne(ctx context.Context, num number.Number, attrs []attribute.KeyValue) {
|
||||
h := a.acquireHandle(attrs)
|
||||
defer h.unbind()
|
||||
h.captureOne(ctx, num)
|
||||
}
|
||||
|
||||
// NewAccumulator constructs a new Accumulator for the given
|
||||
// processor. This Accumulator supports only a single processor.
|
||||
//
|
||||
// The Accumulator does not start any background process to collect itself
|
||||
// periodically, this responsibility lies with the processor, typically,
|
||||
// depending on the type of export. For example, a pull-based
|
||||
// processor will call Collect() when it receives a request to scrape
|
||||
// current metric values. A push-based processor should configure its
|
||||
// own periodic collection.
|
||||
func NewAccumulator(processor export.Processor) *Accumulator {
|
||||
return &Accumulator{
|
||||
processor: processor,
|
||||
callbacks: map[*callback]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
var _ sdkapi.MeterImpl = &Accumulator{}
|
||||
|
||||
// NewSyncInstrument implements sdkapi.MetricImpl.
|
||||
func (m *Accumulator) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) {
|
||||
return &syncInstrument{
|
||||
baseInstrument: baseInstrument{
|
||||
descriptor: descriptor,
|
||||
meter: m,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewAsyncInstrument implements sdkapi.MetricImpl.
|
||||
func (m *Accumulator) NewAsyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.AsyncImpl, error) {
|
||||
a := &asyncInstrument{
|
||||
baseInstrument: baseInstrument{
|
||||
descriptor: descriptor,
|
||||
meter: m,
|
||||
},
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// RegisterCallback registers f to be called for insts.
|
||||
func (m *Accumulator) RegisterCallback(insts []instrument.Asynchronous, f func(context.Context)) error {
|
||||
cb := &callback{
|
||||
insts: map[*asyncInstrument]struct{}{},
|
||||
f: f,
|
||||
}
|
||||
for _, inst := range insts {
|
||||
impl, ok := inst.(sdkapi.AsyncImpl)
|
||||
if !ok {
|
||||
return ErrBadInstrument
|
||||
}
|
||||
|
||||
ai, err := m.fromAsync(impl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cb.insts[ai] = struct{}{}
|
||||
}
|
||||
|
||||
m.callbackLock.Lock()
|
||||
defer m.callbackLock.Unlock()
|
||||
m.callbacks[cb] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect traverses the list of active records and observers and
|
||||
// exports data for each active instrument. Collect() may not be
|
||||
// called concurrently.
|
||||
//
|
||||
// During the collection pass, the export.Processor will receive
|
||||
// one Export() call per current aggregation.
|
||||
//
|
||||
// Returns the number of records that were checkpointed.
|
||||
func (m *Accumulator) Collect(ctx context.Context) int {
|
||||
m.collectLock.Lock()
|
||||
defer m.collectLock.Unlock()
|
||||
|
||||
m.runAsyncCallbacks(ctx)
|
||||
checkpointed := m.collectInstruments()
|
||||
m.currentEpoch++
|
||||
|
||||
return checkpointed
|
||||
}
|
||||
|
||||
func (m *Accumulator) collectInstruments() int {
|
||||
checkpointed := 0
|
||||
|
||||
m.current.Range(func(key interface{}, value interface{}) bool {
|
||||
// Note: always continue to iterate over the entire
|
||||
// map by returning `true` in this function.
|
||||
inuse := value.(*record)
|
||||
|
||||
mods := atomic.LoadInt64(&inuse.updateCount)
|
||||
coll := inuse.collectedCount
|
||||
|
||||
if mods != coll {
|
||||
// Updates happened in this interval,
|
||||
// checkpoint and continue.
|
||||
checkpointed += m.checkpointRecord(inuse)
|
||||
inuse.collectedCount = mods
|
||||
return true
|
||||
}
|
||||
|
||||
// Having no updates since last collection, try to unmap:
|
||||
if unmapped := inuse.refMapped.tryUnmap(); !unmapped {
|
||||
// The record is referenced by a binding, continue.
|
||||
return true
|
||||
}
|
||||
|
||||
// If any other goroutines are now trying to re-insert this
|
||||
// entry in the map, they are busy calling Gosched() awaiting
|
||||
// this deletion:
|
||||
m.current.Delete(inuse.mapkey())
|
||||
|
||||
// There's a potential race between `LoadInt64` and
|
||||
// `tryUnmap` in this function. Since this is the
|
||||
// last we'll see of this record, checkpoint
|
||||
mods = atomic.LoadInt64(&inuse.updateCount)
|
||||
if mods != coll {
|
||||
checkpointed += m.checkpointRecord(inuse)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return checkpointed
|
||||
}
|
||||
|
||||
func (m *Accumulator) runAsyncCallbacks(ctx context.Context) {
|
||||
m.callbackLock.Lock()
|
||||
defer m.callbackLock.Unlock()
|
||||
|
||||
ctx = context.WithValue(ctx, asyncContextKey{}, m)
|
||||
|
||||
for cb := range m.callbacks {
|
||||
cb.f(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Accumulator) checkpointRecord(r *record) int {
|
||||
if r.current == nil {
|
||||
return 0
|
||||
}
|
||||
err := r.current.SynchronizedMove(r.checkpoint, &r.inst.descriptor)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
return 0
|
||||
}
|
||||
|
||||
a := export.NewAccumulation(&r.inst.descriptor, &r.attrs, r.checkpoint)
|
||||
err = m.processor.Process(a)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (r *record) captureOne(ctx context.Context, num number.Number) {
|
||||
if r.current == nil {
|
||||
// The instrument is disabled according to the AggregatorSelector.
|
||||
return
|
||||
}
|
||||
if err := aggregator.RangeTest(num, &r.inst.descriptor); err != nil {
|
||||
otel.Handle(err)
|
||||
return
|
||||
}
|
||||
if err := r.current.Update(ctx, num, &r.inst.descriptor); err != nil {
|
||||
otel.Handle(err)
|
||||
return
|
||||
}
|
||||
// Record was modified, inform the Collect() that things need
|
||||
// to be collected while the record is still mapped.
|
||||
atomic.AddInt64(&r.updateCount, 1)
|
||||
}
|
||||
|
||||
func (r *record) unbind() {
|
||||
r.refMapped.unref()
|
||||
}
|
||||
|
||||
func (r *record) mapkey() mapkey {
|
||||
return mapkey{
|
||||
descriptor: &r.inst.descriptor,
|
||||
ordered: r.attrs.Equivalent(),
|
||||
}
|
||||
}
|
||||
|
||||
// fromSync gets an async implementation object, checking for
|
||||
// uninitialized instruments and instruments created by another SDK.
|
||||
func (m *Accumulator) fromAsync(async sdkapi.AsyncImpl) (*asyncInstrument, error) {
|
||||
if async == nil {
|
||||
return nil, ErrUninitializedInstrument
|
||||
}
|
||||
inst, ok := async.Implementation().(*asyncInstrument)
|
||||
if !ok {
|
||||
return nil, ErrBadInstrument
|
||||
}
|
||||
return inst, nil
|
||||
}
|
70
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/descriptor.go
generated
vendored
70
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/descriptor.go
generated
vendored
@ -1,70 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
)
|
||||
|
||||
// Descriptor contains all the settings that describe an instrument,
|
||||
// including its name, metric kind, number kind, and the configurable
|
||||
// options.
|
||||
type Descriptor struct {
|
||||
name string
|
||||
instrumentKind InstrumentKind
|
||||
numberKind number.Kind
|
||||
description string
|
||||
unit unit.Unit
|
||||
}
|
||||
|
||||
// NewDescriptor returns a Descriptor with the given contents.
|
||||
func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, u unit.Unit) Descriptor {
|
||||
return Descriptor{
|
||||
name: name,
|
||||
instrumentKind: ikind,
|
||||
numberKind: nkind,
|
||||
description: description,
|
||||
unit: u,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the metric instrument's name.
|
||||
func (d Descriptor) Name() string {
|
||||
return d.name
|
||||
}
|
||||
|
||||
// InstrumentKind returns the specific kind of instrument.
|
||||
func (d Descriptor) InstrumentKind() InstrumentKind {
|
||||
return d.instrumentKind
|
||||
}
|
||||
|
||||
// Description provides a human-readable description of the metric
|
||||
// instrument.
|
||||
func (d Descriptor) Description() string {
|
||||
return d.description
|
||||
}
|
||||
|
||||
// Unit describes the units of the metric instrument. Unitless
|
||||
// metrics return the empty string.
|
||||
func (d Descriptor) Unit() unit.Unit {
|
||||
return d.unit
|
||||
}
|
||||
|
||||
// NumberKind returns whether this instrument is declared over int64,
|
||||
// float64, or uint64 values.
|
||||
func (d Descriptor) NumberKind() number.Kind {
|
||||
return d.numberKind
|
||||
}
|
80
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/instrumentkind.go
generated
vendored
80
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/instrumentkind.go
generated
vendored
@ -1,80 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate stringer -type=InstrumentKind
|
||||
|
||||
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
|
||||
// InstrumentKind describes the kind of instrument.
|
||||
type InstrumentKind int8
|
||||
|
||||
const (
|
||||
// HistogramInstrumentKind indicates a Histogram instrument.
|
||||
HistogramInstrumentKind InstrumentKind = iota
|
||||
// GaugeObserverInstrumentKind indicates an GaugeObserver instrument.
|
||||
GaugeObserverInstrumentKind
|
||||
|
||||
// CounterInstrumentKind indicates a Counter instrument.
|
||||
CounterInstrumentKind
|
||||
// UpDownCounterInstrumentKind indicates a UpDownCounter instrument.
|
||||
UpDownCounterInstrumentKind
|
||||
|
||||
// CounterObserverInstrumentKind indicates a CounterObserver instrument.
|
||||
CounterObserverInstrumentKind
|
||||
// UpDownCounterObserverInstrumentKind indicates a UpDownCounterObserver
|
||||
// instrument.
|
||||
UpDownCounterObserverInstrumentKind
|
||||
)
|
||||
|
||||
// Synchronous returns whether this is a synchronous kind of instrument.
|
||||
func (k InstrumentKind) Synchronous() bool {
|
||||
switch k {
|
||||
case CounterInstrumentKind, UpDownCounterInstrumentKind, HistogramInstrumentKind:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Asynchronous returns whether this is an asynchronous kind of instrument.
|
||||
func (k InstrumentKind) Asynchronous() bool {
|
||||
return !k.Synchronous()
|
||||
}
|
||||
|
||||
// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping).
|
||||
func (k InstrumentKind) Adding() bool {
|
||||
switch k {
|
||||
case CounterInstrumentKind, UpDownCounterInstrumentKind, CounterObserverInstrumentKind, UpDownCounterObserverInstrumentKind:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding).
|
||||
func (k InstrumentKind) Grouping() bool {
|
||||
return !k.Adding()
|
||||
}
|
||||
|
||||
// Monotonic returns whether this kind of instrument exposes a non-decreasing sum.
|
||||
func (k InstrumentKind) Monotonic() bool {
|
||||
switch k {
|
||||
case CounterInstrumentKind, CounterObserverInstrumentKind:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PrecomputedSum returns whether this kind of instrument receives precomputed sums.
|
||||
func (k InstrumentKind) PrecomputedSum() bool {
|
||||
return k.Adding() && k.Asynchronous()
|
||||
}
|
28
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/instrumentkind_string.go
generated
vendored
28
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/instrumentkind_string.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT.
|
||||
|
||||
package sdkapi
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[HistogramInstrumentKind-0]
|
||||
_ = x[GaugeObserverInstrumentKind-1]
|
||||
_ = x[CounterInstrumentKind-2]
|
||||
_ = x[UpDownCounterInstrumentKind-3]
|
||||
_ = x[CounterObserverInstrumentKind-4]
|
||||
_ = x[UpDownCounterObserverInstrumentKind-5]
|
||||
}
|
||||
|
||||
const _InstrumentKind_name = "HistogramInstrumentKindGaugeObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindCounterObserverInstrumentKindUpDownCounterObserverInstrumentKind"
|
||||
|
||||
var _InstrumentKind_index = [...]uint8{0, 23, 50, 71, 98, 127, 162}
|
||||
|
||||
func (i InstrumentKind) String() string {
|
||||
if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) {
|
||||
return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
|
||||
}
|
83
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/noop.go
generated
vendored
83
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/noop.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
) // import (
|
||||
// "context"
|
||||
|
||||
// "go.opentelemetry.io/otel/attribute"
|
||||
// "go.opentelemetry.io/otel/sdk/metric/number"
|
||||
// )
|
||||
|
||||
type noopInstrument struct {
|
||||
descriptor Descriptor
|
||||
}
|
||||
type noopSyncInstrument struct {
|
||||
noopInstrument
|
||||
|
||||
instrument.Synchronous
|
||||
}
|
||||
type noopAsyncInstrument struct {
|
||||
noopInstrument
|
||||
|
||||
instrument.Asynchronous
|
||||
}
|
||||
|
||||
var _ SyncImpl = noopSyncInstrument{}
|
||||
var _ AsyncImpl = noopAsyncInstrument{}
|
||||
|
||||
// NewNoopSyncInstrument returns a No-op implementation of the
|
||||
// synchronous instrument interface.
|
||||
func NewNoopSyncInstrument() SyncImpl {
|
||||
return noopSyncInstrument{
|
||||
noopInstrument: noopInstrument{
|
||||
descriptor: Descriptor{
|
||||
instrumentKind: CounterInstrumentKind,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewNoopAsyncInstrument returns a No-op implementation of the
|
||||
// asynchronous instrument interface.
|
||||
func NewNoopAsyncInstrument() AsyncImpl {
|
||||
return noopAsyncInstrument{
|
||||
noopInstrument: noopInstrument{
|
||||
descriptor: Descriptor{
|
||||
instrumentKind: CounterObserverInstrumentKind,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (noopInstrument) Implementation() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n noopInstrument) Descriptor() Descriptor {
|
||||
return n.descriptor
|
||||
}
|
||||
|
||||
func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) {
|
||||
}
|
||||
|
||||
func (noopAsyncInstrument) ObserveOne(context.Context, number.Number, []attribute.KeyValue) {
|
||||
}
|
162
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/sdkapi.go
generated
vendored
162
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/sdkapi.go
generated
vendored
@ -1,162 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
)
|
||||
|
||||
// MeterImpl is the interface an SDK must implement to supply a Meter
|
||||
// implementation.
|
||||
type MeterImpl interface {
|
||||
// NewSyncInstrument returns a newly constructed
|
||||
// synchronous instrument implementation or an error, should
|
||||
// one occur.
|
||||
NewSyncInstrument(descriptor Descriptor) (SyncImpl, error)
|
||||
|
||||
// NewAsyncInstrument returns a newly constructed
|
||||
// asynchronous instrument implementation or an error, should
|
||||
// one occur.
|
||||
NewAsyncInstrument(descriptor Descriptor) (AsyncImpl, error)
|
||||
|
||||
// Etc.
|
||||
RegisterCallback(insts []instrument.Asynchronous, callback func(context.Context)) error
|
||||
}
|
||||
|
||||
// InstrumentImpl is a common interface for synchronous and
|
||||
// asynchronous instruments.
|
||||
type InstrumentImpl interface {
|
||||
// Implementation returns the underlying implementation of the
|
||||
// instrument, which allows the implementation to gain access
|
||||
// to its own representation especially from a `Measurement`.
|
||||
Implementation() interface{}
|
||||
|
||||
// Descriptor returns a copy of the instrument's Descriptor.
|
||||
Descriptor() Descriptor
|
||||
}
|
||||
|
||||
// SyncImpl is the implementation-level interface to a generic
|
||||
// synchronous instrument (e.g., Histogram and Counter instruments).
|
||||
type SyncImpl interface {
|
||||
InstrumentImpl
|
||||
instrument.Synchronous
|
||||
|
||||
// RecordOne captures a single synchronous metric event.
|
||||
RecordOne(ctx context.Context, n number.Number, attrs []attribute.KeyValue)
|
||||
}
|
||||
|
||||
// AsyncImpl is an implementation-level interface to an
|
||||
// asynchronous instrument (e.g., Observer instruments).
|
||||
type AsyncImpl interface {
|
||||
InstrumentImpl
|
||||
instrument.Asynchronous
|
||||
|
||||
// ObserveOne captures a single synchronous metric event.
|
||||
ObserveOne(ctx context.Context, n number.Number, attrs []attribute.KeyValue)
|
||||
}
|
||||
|
||||
// AsyncRunner is expected to convert into an AsyncSingleRunner or an
|
||||
// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner
|
||||
// does not satisfy one of these interfaces.
|
||||
type AsyncRunner interface {
|
||||
// AnyRunner is a non-exported method with no functional use
|
||||
// other than to make this a non-empty interface.
|
||||
AnyRunner()
|
||||
}
|
||||
|
||||
// AsyncSingleRunner is an interface implemented by single-observer
|
||||
// callbacks.
|
||||
type AsyncSingleRunner interface {
|
||||
// Run accepts a single instrument and function for capturing
|
||||
// observations of that instrument. Each call to the function
|
||||
// receives one captured observation. (The function accepts
|
||||
// multiple observations so the same implementation can be
|
||||
// used for batch runners.)
|
||||
Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation))
|
||||
|
||||
AsyncRunner
|
||||
}
|
||||
|
||||
// AsyncBatchRunner is an interface implemented by batch-observer
|
||||
// callbacks.
|
||||
type AsyncBatchRunner interface {
|
||||
// Run accepts a function for capturing observations of
|
||||
// multiple instruments.
|
||||
Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation))
|
||||
|
||||
AsyncRunner
|
||||
}
|
||||
|
||||
// NewMeasurement constructs a single observation, a binding between
|
||||
// an asynchronous instrument and a number.
|
||||
func NewMeasurement(inst SyncImpl, n number.Number) Measurement {
|
||||
return Measurement{
|
||||
instrument: inst,
|
||||
number: n,
|
||||
}
|
||||
}
|
||||
|
||||
// Measurement is a low-level type used with synchronous instruments
|
||||
// as a direct interface to the SDK via `RecordBatch`.
|
||||
type Measurement struct {
|
||||
// number needs to be aligned for 64-bit atomic operations.
|
||||
number number.Number
|
||||
instrument SyncImpl
|
||||
}
|
||||
|
||||
// SyncImpl returns the instrument that created this measurement.
|
||||
// This returns an implementation-level object for use by the SDK,
|
||||
// users should not refer to this.
|
||||
func (m Measurement) SyncImpl() SyncImpl {
|
||||
return m.instrument
|
||||
}
|
||||
|
||||
// Number returns a number recorded in this measurement.
|
||||
func (m Measurement) Number() number.Number {
|
||||
return m.number
|
||||
}
|
||||
|
||||
// NewObservation constructs a single observation, a binding between
|
||||
// an asynchronous instrument and a number.
|
||||
func NewObservation(inst AsyncImpl, n number.Number) Observation {
|
||||
return Observation{
|
||||
instrument: inst,
|
||||
number: n,
|
||||
}
|
||||
}
|
||||
|
||||
// Observation is a low-level type used with asynchronous instruments
|
||||
// as a direct interface to the SDK via `BatchObserver`.
|
||||
type Observation struct {
|
||||
// number needs to be aligned for 64-bit atomic operations.
|
||||
number number.Number
|
||||
instrument AsyncImpl
|
||||
}
|
||||
|
||||
// AsyncImpl returns the instrument that created this observation.
|
||||
// This returns an implementation-level object for use by the SDK,
|
||||
// users should not refer to this.
|
||||
func (m Observation) AsyncImpl() AsyncImpl {
|
||||
return m.instrument
|
||||
}
|
||||
|
||||
// Number returns a number recorded in this observation.
|
||||
func (m Observation) Number() number.Number {
|
||||
return m.number
|
||||
}
|
183
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/wrap.go
generated
vendored
183
vendor/go.opentelemetry.io/otel/sdk/metric/sdkapi/wrap.go
generated
vendored
@ -1,183 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/asyncint64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncint64"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
)
|
||||
|
||||
type (
|
||||
meter struct{ MeterImpl }
|
||||
sfMeter struct{ meter }
|
||||
siMeter struct{ meter }
|
||||
afMeter struct{ meter }
|
||||
aiMeter struct{ meter }
|
||||
|
||||
iAdder struct{ SyncImpl }
|
||||
fAdder struct{ SyncImpl }
|
||||
iRecorder struct{ SyncImpl }
|
||||
fRecorder struct{ SyncImpl }
|
||||
iObserver struct{ AsyncImpl }
|
||||
fObserver struct{ AsyncImpl }
|
||||
)
|
||||
|
||||
// WrapMeterImpl wraps impl to be a full implementation of a Meter.
|
||||
func WrapMeterImpl(impl MeterImpl) metric.Meter {
|
||||
return meter{impl}
|
||||
}
|
||||
|
||||
// UnwrapMeterImpl unwraps the Meter to its bare MeterImpl.
|
||||
func UnwrapMeterImpl(m metric.Meter) MeterImpl {
|
||||
mm, ok := m.(meter)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return mm.MeterImpl
|
||||
}
|
||||
|
||||
func (m meter) AsyncFloat64() asyncfloat64.InstrumentProvider {
|
||||
return afMeter{m}
|
||||
}
|
||||
|
||||
func (m meter) AsyncInt64() asyncint64.InstrumentProvider {
|
||||
return aiMeter{m}
|
||||
}
|
||||
|
||||
func (m meter) SyncFloat64() syncfloat64.InstrumentProvider {
|
||||
return sfMeter{m}
|
||||
}
|
||||
|
||||
func (m meter) SyncInt64() syncint64.InstrumentProvider {
|
||||
return siMeter{m}
|
||||
}
|
||||
|
||||
func (m meter) RegisterCallback(insts []instrument.Asynchronous, cb func(ctx context.Context)) error {
|
||||
return m.MeterImpl.RegisterCallback(insts, cb)
|
||||
}
|
||||
|
||||
func (m meter) newSync(name string, ikind InstrumentKind, nkind number.Kind, opts []instrument.Option) (SyncImpl, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
return m.NewSyncInstrument(NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit()))
|
||||
}
|
||||
|
||||
func (m meter) newAsync(name string, ikind InstrumentKind, nkind number.Kind, opts []instrument.Option) (AsyncImpl, error) {
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
return m.NewAsyncInstrument(NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit()))
|
||||
}
|
||||
|
||||
func (m afMeter) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) {
|
||||
inst, err := m.newAsync(name, CounterObserverInstrumentKind, number.Float64Kind, opts)
|
||||
return fObserver{inst}, err
|
||||
}
|
||||
|
||||
func (m afMeter) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) {
|
||||
inst, err := m.newAsync(name, UpDownCounterObserverInstrumentKind, number.Float64Kind, opts)
|
||||
return fObserver{inst}, err
|
||||
}
|
||||
|
||||
func (m afMeter) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) {
|
||||
inst, err := m.newAsync(name, GaugeObserverInstrumentKind, number.Float64Kind, opts)
|
||||
return fObserver{inst}, err
|
||||
}
|
||||
|
||||
func (m aiMeter) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) {
|
||||
inst, err := m.newAsync(name, CounterObserverInstrumentKind, number.Int64Kind, opts)
|
||||
return iObserver{inst}, err
|
||||
}
|
||||
|
||||
func (m aiMeter) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) {
|
||||
inst, err := m.newAsync(name, UpDownCounterObserverInstrumentKind, number.Int64Kind, opts)
|
||||
return iObserver{inst}, err
|
||||
}
|
||||
|
||||
func (m aiMeter) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) {
|
||||
inst, err := m.newAsync(name, GaugeObserverInstrumentKind, number.Int64Kind, opts)
|
||||
return iObserver{inst}, err
|
||||
}
|
||||
|
||||
func (m sfMeter) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) {
|
||||
inst, err := m.newSync(name, CounterInstrumentKind, number.Float64Kind, opts)
|
||||
return fAdder{inst}, err
|
||||
}
|
||||
|
||||
func (m sfMeter) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) {
|
||||
inst, err := m.newSync(name, UpDownCounterInstrumentKind, number.Float64Kind, opts)
|
||||
return fAdder{inst}, err
|
||||
}
|
||||
|
||||
func (m sfMeter) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) {
|
||||
inst, err := m.newSync(name, HistogramInstrumentKind, number.Float64Kind, opts)
|
||||
return fRecorder{inst}, err
|
||||
}
|
||||
|
||||
func (m siMeter) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) {
|
||||
inst, err := m.newSync(name, CounterInstrumentKind, number.Int64Kind, opts)
|
||||
return iAdder{inst}, err
|
||||
}
|
||||
|
||||
func (m siMeter) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) {
|
||||
inst, err := m.newSync(name, UpDownCounterInstrumentKind, number.Int64Kind, opts)
|
||||
return iAdder{inst}, err
|
||||
}
|
||||
|
||||
func (m siMeter) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) {
|
||||
inst, err := m.newSync(name, HistogramInstrumentKind, number.Int64Kind, opts)
|
||||
return iRecorder{inst}, err
|
||||
}
|
||||
|
||||
func (a fAdder) Add(ctx context.Context, value float64, attrs ...attribute.KeyValue) {
|
||||
if a.SyncImpl != nil {
|
||||
a.SyncImpl.RecordOne(ctx, number.NewFloat64Number(value), attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (a iAdder) Add(ctx context.Context, value int64, attrs ...attribute.KeyValue) {
|
||||
if a.SyncImpl != nil {
|
||||
a.SyncImpl.RecordOne(ctx, number.NewInt64Number(value), attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (a fRecorder) Record(ctx context.Context, value float64, attrs ...attribute.KeyValue) {
|
||||
if a.SyncImpl != nil {
|
||||
a.SyncImpl.RecordOne(ctx, number.NewFloat64Number(value), attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (a iRecorder) Record(ctx context.Context, value int64, attrs ...attribute.KeyValue) {
|
||||
if a.SyncImpl != nil {
|
||||
a.SyncImpl.RecordOne(ctx, number.NewInt64Number(value), attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (a fObserver) Observe(ctx context.Context, value float64, attrs ...attribute.KeyValue) {
|
||||
if a.AsyncImpl != nil {
|
||||
a.AsyncImpl.ObserveOne(ctx, number.NewFloat64Number(value), attrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (a iObserver) Observe(ctx context.Context, value int64, attrs ...attribute.KeyValue) {
|
||||
if a.AsyncImpl != nil {
|
||||
a.AsyncImpl.ObserveOne(ctx, number.NewInt64Number(value), attrs)
|
||||
}
|
||||
}
|
94
vendor/go.opentelemetry.io/otel/sdk/metric/selector/simple/simple.go
generated
vendored
94
vendor/go.opentelemetry.io/otel/sdk/metric/selector/simple/simple.go
generated
vendored
@ -1,94 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
type (
|
||||
selectorInexpensive struct{}
|
||||
selectorHistogram struct {
|
||||
options []histogram.Option
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
_ export.AggregatorSelector = selectorInexpensive{}
|
||||
_ export.AggregatorSelector = selectorHistogram{}
|
||||
)
|
||||
|
||||
// NewWithInexpensiveDistribution returns a simple aggregator selector
|
||||
// that uses minmaxsumcount aggregators for `Histogram`
|
||||
// instruments. This selector is faster and uses less memory than the
|
||||
// others in this package because minmaxsumcount aggregators maintain
|
||||
// the least information about the distribution among these choices.
|
||||
func NewWithInexpensiveDistribution() export.AggregatorSelector {
|
||||
return selectorInexpensive{}
|
||||
}
|
||||
|
||||
// NewWithHistogramDistribution returns a simple aggregator selector
|
||||
// that uses histogram aggregators for `Histogram` instruments.
|
||||
// This selector is a good default choice for most metric exporters.
|
||||
func NewWithHistogramDistribution(options ...histogram.Option) export.AggregatorSelector {
|
||||
return selectorHistogram{options: options}
|
||||
}
|
||||
|
||||
func sumAggs(aggPtrs []*aggregator.Aggregator) {
|
||||
aggs := sum.New(len(aggPtrs))
|
||||
for i := range aggPtrs {
|
||||
*aggPtrs[i] = &aggs[i]
|
||||
}
|
||||
}
|
||||
|
||||
func lastValueAggs(aggPtrs []*aggregator.Aggregator) {
|
||||
aggs := lastvalue.New(len(aggPtrs))
|
||||
for i := range aggPtrs {
|
||||
*aggPtrs[i] = &aggs[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (selectorInexpensive) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) {
|
||||
switch descriptor.InstrumentKind() {
|
||||
case sdkapi.GaugeObserverInstrumentKind:
|
||||
lastValueAggs(aggPtrs)
|
||||
case sdkapi.HistogramInstrumentKind:
|
||||
aggs := sum.New(len(aggPtrs))
|
||||
for i := range aggPtrs {
|
||||
*aggPtrs[i] = &aggs[i]
|
||||
}
|
||||
default:
|
||||
sumAggs(aggPtrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (s selectorHistogram) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) {
|
||||
switch descriptor.InstrumentKind() {
|
||||
case sdkapi.GaugeObserverInstrumentKind:
|
||||
lastValueAggs(aggPtrs)
|
||||
case sdkapi.HistogramInstrumentKind:
|
||||
aggs := histogram.New(len(aggPtrs), descriptor, s.options...)
|
||||
for i := range aggPtrs {
|
||||
*aggPtrs[i] = &aggs[i]
|
||||
}
|
||||
default:
|
||||
sumAggs(aggPtrs)
|
||||
}
|
||||
}
|
20
vendor/go.opentelemetry.io/otel/sdk/metric/view/doc.go
generated
vendored
Normal file
20
vendor/go.opentelemetry.io/otel/sdk/metric/view/doc.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package view provides types and functionality that customize the metric
|
||||
// telemetry an SDK will produce. The View type is used when a Reader is
|
||||
// registered with a MeterProvider in the go.opentelemetry.io/otel/sdk/metric
|
||||
// package. See the WithReader option in that package for more information on
|
||||
// how this registration takes place.
|
||||
package view // import "go.opentelemetry.io/otel/sdk/metric/view"
|
@ -12,17 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metrictransform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||
package view // import "go.opentelemetry.io/otel/sdk/metric/view"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||
)
|
||||
|
||||
// Resource transforms a Resource into an OTLP Resource.
|
||||
func Resource(r *resource.Resource) *resourcepb.Resource {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
|
||||
// Instrument uniquely identifies an instrument within a meter.
|
||||
type Instrument struct {
|
||||
Scope instrumentation.Scope
|
||||
|
||||
Name string
|
||||
Description string
|
||||
Kind InstrumentKind
|
||||
Aggregation aggregation.Aggregation
|
||||
}
|
43
vendor/go.opentelemetry.io/otel/sdk/metric/view/instrumentkind.go
generated
vendored
Normal file
43
vendor/go.opentelemetry.io/otel/sdk/metric/view/instrumentkind.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package view // import "go.opentelemetry.io/otel/sdk/metric/view"
|
||||
|
||||
// InstrumentKind describes the kind of instrument a Meter can create.
|
||||
type InstrumentKind uint8
|
||||
|
||||
// These are all the instrument kinds supported by the SDK.
|
||||
const (
|
||||
// undefinedInstrument is an uninitialized instrument kind, should not be used.
|
||||
//nolint:deadcode,varcheck
|
||||
undefinedInstrument InstrumentKind = iota
|
||||
// SyncCounter is an instrument kind that records increasing values
|
||||
// synchronously in application code.
|
||||
SyncCounter
|
||||
// SyncUpDownCounter is an instrument kind that records increasing and
|
||||
// decreasing values synchronously in application code.
|
||||
SyncUpDownCounter
|
||||
// SyncHistogram is an instrument kind that records a distribution of
|
||||
// values synchronously in application code.
|
||||
SyncHistogram
|
||||
// AsyncCounter is an instrument kind that records increasing values in an
|
||||
// asynchronous callback.
|
||||
AsyncCounter
|
||||
// AsyncUpDownCounter is an instrument kind that records increasing and
|
||||
// decreasing values in an asynchronous callback.
|
||||
AsyncUpDownCounter
|
||||
// AsyncGauge is an instrument kind that records current values in an
|
||||
// asynchronous callback.
|
||||
AsyncGauge
|
||||
)
|
232
vendor/go.opentelemetry.io/otel/sdk/metric/view/view.go
generated
vendored
Normal file
232
vendor/go.opentelemetry.io/otel/sdk/metric/view/view.go
generated
vendored
Normal file
@ -0,0 +1,232 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package view // import "go.opentelemetry.io/otel/sdk/metric/view"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||
)
|
||||
|
||||
// View provides users with the flexibility to customize the metrics that are
|
||||
// output by the SDK. A View can be used to ignore, change the name,
|
||||
// description, and aggregation of, and customize which attribute(s) are to be
|
||||
// reported by Instruments.
|
||||
//
|
||||
// An empty View will match all instruments, and do no transformations.
|
||||
type View struct {
|
||||
instrumentName *regexp.Regexp
|
||||
hasWildcard bool
|
||||
scope instrumentation.Scope
|
||||
instrumentKind InstrumentKind
|
||||
|
||||
filter attribute.Filter
|
||||
name string
|
||||
description string
|
||||
agg aggregation.Aggregation
|
||||
}
|
||||
|
||||
// New returns a new configured View. If there are any duplicate Options passed,
|
||||
// the last one passed will take precedence. The unique, de-duplicated,
|
||||
// Options are all applied to the View. An instrument needs to match all of
|
||||
// the match Options passed for the View to be applied to it. Similarly, all
|
||||
// transform operation Options are applied to matched Instruments.
|
||||
func New(opts ...Option) (View, error) {
|
||||
v := View{}
|
||||
|
||||
for _, opt := range opts {
|
||||
v = opt.apply(v)
|
||||
}
|
||||
|
||||
emptyScope := instrumentation.Scope{}
|
||||
if v.instrumentName == nil &&
|
||||
v.scope == emptyScope &&
|
||||
v.instrumentKind == undefinedInstrument {
|
||||
return View{}, fmt.Errorf("must provide at least 1 match option")
|
||||
}
|
||||
|
||||
if v.hasWildcard && v.name != "" {
|
||||
return View{}, fmt.Errorf("invalid view: view name specified for multiple instruments")
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// TransformInstrument will check if an instrument matches this view
|
||||
// and will convert it if it does.
|
||||
func (v View) TransformInstrument(inst Instrument) (transformed Instrument, match bool) {
|
||||
if !v.match(inst) {
|
||||
return Instrument{}, false
|
||||
}
|
||||
if v.name != "" {
|
||||
inst.Name = v.name
|
||||
}
|
||||
if v.description != "" {
|
||||
inst.Description = v.description
|
||||
}
|
||||
if v.agg != nil {
|
||||
inst.Aggregation = v.agg
|
||||
}
|
||||
return inst, true
|
||||
}
|
||||
|
||||
// AttributeFilter returns a function that returns only attributes specified by
|
||||
// WithFilterAttributes. If no filter was provided nil is returned.
|
||||
func (v View) AttributeFilter() func(attribute.Set) attribute.Set {
|
||||
if v.filter == nil {
|
||||
return nil
|
||||
}
|
||||
return func(input attribute.Set) attribute.Set {
|
||||
out, _ := input.Filter(v.filter)
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (v View) matchName(name string) bool {
|
||||
return v.instrumentName == nil || v.instrumentName.MatchString(name)
|
||||
}
|
||||
|
||||
func (v View) matchScopeName(name string) bool {
|
||||
return v.scope.Name == "" || name == v.scope.Name
|
||||
}
|
||||
|
||||
func (v View) matchScopeVersion(version string) bool {
|
||||
return v.scope.Version == "" || version == v.scope.Version
|
||||
}
|
||||
|
||||
func (v View) matchScopeSchemaURL(schemaURL string) bool {
|
||||
return v.scope.SchemaURL == "" || schemaURL == v.scope.SchemaURL
|
||||
}
|
||||
|
||||
func (v View) matchInstrumentKind(kind InstrumentKind) bool {
|
||||
return v.instrumentKind == undefinedInstrument || kind == v.instrumentKind
|
||||
}
|
||||
|
||||
func (v View) match(i Instrument) bool {
|
||||
return v.matchName(i.Name) &&
|
||||
v.matchScopeName(i.Scope.Name) &&
|
||||
v.matchScopeSchemaURL(i.Scope.SchemaURL) &&
|
||||
v.matchScopeVersion(i.Scope.Version) &&
|
||||
v.matchInstrumentKind(i.Kind)
|
||||
}
|
||||
|
||||
// Option applies a configuration option value to a View.
|
||||
type Option interface {
|
||||
apply(View) View
|
||||
}
|
||||
|
||||
type optionFunc func(View) View
|
||||
|
||||
func (f optionFunc) apply(v View) View {
|
||||
return f(v)
|
||||
}
|
||||
|
||||
// MatchInstrumentName will match an instrument based on the its name.
|
||||
// This will accept wildcards of * for zero or more characters, and ? for
|
||||
// exactly one character. A name of "*" (default) will match all instruments.
|
||||
func MatchInstrumentName(name string) Option {
|
||||
return optionFunc(func(v View) View {
|
||||
if strings.ContainsAny(name, "*?") {
|
||||
v.hasWildcard = true
|
||||
}
|
||||
name = regexp.QuoteMeta(name)
|
||||
name = "^" + name + "$"
|
||||
name = strings.ReplaceAll(name, "\\?", ".")
|
||||
name = strings.ReplaceAll(name, "\\*", ".*")
|
||||
v.instrumentName = regexp.MustCompile(name)
|
||||
return v
|
||||
})
|
||||
}
|
||||
|
||||
// MatchInstrumentKind with match an instrument based on the instrument's kind.
|
||||
// The default is to match all instrument kinds.
|
||||
func MatchInstrumentKind(kind InstrumentKind) Option {
|
||||
return optionFunc(func(v View) View {
|
||||
v.instrumentKind = kind
|
||||
return v
|
||||
})
|
||||
}
|
||||
|
||||
// MatchInstrumentationScope will do an exact match on any
|
||||
// instrumentation.Scope field that is non-empty (""). The default is to match all
|
||||
// instrumentation scopes.
|
||||
func MatchInstrumentationScope(scope instrumentation.Scope) Option {
|
||||
return optionFunc(func(v View) View {
|
||||
v.scope = scope
|
||||
return v
|
||||
})
|
||||
}
|
||||
|
||||
// WithRename will rename the instrument the view matches. If not used or empty the
|
||||
// instrument name will not be changed. Must be used with a non-wildcard
|
||||
// instrument name match. The default does not change the instrument name.
|
||||
func WithRename(name string) Option {
|
||||
return optionFunc(func(v View) View {
|
||||
v.name = name
|
||||
return v
|
||||
})
|
||||
}
|
||||
|
||||
// WithSetDescription will change the description of the instruments the view
|
||||
// matches to desc. If not used or empty the description will not be changed.
|
||||
func WithSetDescription(desc string) Option {
|
||||
return optionFunc(func(v View) View {
|
||||
v.description = desc
|
||||
return v
|
||||
})
|
||||
}
|
||||
|
||||
// WithFilterAttributes will select attributes that have a matching key. If not used
|
||||
// or empty no filter will be applied.
|
||||
func WithFilterAttributes(keys ...attribute.Key) Option {
|
||||
return optionFunc(func(v View) View {
|
||||
if len(keys) == 0 {
|
||||
return v
|
||||
}
|
||||
filterKeys := map[attribute.Key]struct{}{}
|
||||
for _, key := range keys {
|
||||
filterKeys[key] = struct{}{}
|
||||
}
|
||||
|
||||
v.filter = attribute.Filter(func(kv attribute.KeyValue) bool {
|
||||
_, ok := filterKeys[kv.Key]
|
||||
return ok
|
||||
})
|
||||
return v
|
||||
})
|
||||
}
|
||||
|
||||
// WithSetAggregation will use the aggregation a for matching instruments. If
|
||||
// this option is not provided, the reader defined aggregation for the
|
||||
// instrument will be used.
|
||||
//
|
||||
// If a is misconfigured, it will not be used and an error will be logged.
|
||||
func WithSetAggregation(a aggregation.Aggregation) Option {
|
||||
cpA := a.Copy()
|
||||
if err := cpA.Err(); err != nil {
|
||||
global.Error(err, "not using aggregation with view", "aggregation", a)
|
||||
return optionFunc(func(v View) View { return v })
|
||||
}
|
||||
|
||||
return optionFunc(func(v View) View {
|
||||
v.agg = cpA
|
||||
return v
|
||||
})
|
||||
}
|
11
vendor/go.opentelemetry.io/otel/sdk/resource/env.go
generated
vendored
11
vendor/go.opentelemetry.io/otel/sdk/resource/env.go
generated
vendored
@ -17,9 +17,11 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
||||
)
|
||||
@ -88,7 +90,14 @@ func constructOTResources(s string) (*Resource, error) {
|
||||
invalid = append(invalid, p)
|
||||
continue
|
||||
}
|
||||
k, v := strings.TrimSpace(field[0]), strings.TrimSpace(field[1])
|
||||
k := strings.TrimSpace(field[0])
|
||||
v, err := url.QueryUnescape(strings.TrimSpace(field[1]))
|
||||
if err != nil {
|
||||
// Retain original value if decoding fails, otherwise it will be
|
||||
// an empty string.
|
||||
v = field[1]
|
||||
otel.Handle(err)
|
||||
}
|
||||
attrs = append(attrs, attribute.String(k, v))
|
||||
}
|
||||
var err error
|
||||
|
61
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
61
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
@ -90,10 +90,10 @@ var _ trace.TracerProvider = &TracerProvider{}
|
||||
// NewTracerProvider returns a new and configured TracerProvider.
|
||||
//
|
||||
// By default the returned TracerProvider is configured with:
|
||||
// - a ParentBased(AlwaysSample) Sampler
|
||||
// - a random number IDGenerator
|
||||
// - the resource.Default() Resource
|
||||
// - the default SpanLimits.
|
||||
// - a ParentBased(AlwaysSample) Sampler
|
||||
// - a random number IDGenerator
|
||||
// - the resource.Default() Resource
|
||||
// - the default SpanLimits.
|
||||
//
|
||||
// The passed opts are used to override these default values and configure the
|
||||
// returned TracerProvider appropriately.
|
||||
@ -116,12 +116,13 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
|
||||
spanLimits: o.spanLimits,
|
||||
resource: o.resource,
|
||||
}
|
||||
|
||||
global.Info("TracerProvider created", "config", o)
|
||||
|
||||
spss := spanProcessorStates{}
|
||||
for _, sp := range o.processors {
|
||||
tp.RegisterSpanProcessor(sp)
|
||||
spss = append(spss, newSpanProcessorState(sp))
|
||||
}
|
||||
tp.spanProcessors.Store(spss)
|
||||
|
||||
return tp
|
||||
}
|
||||
@ -159,44 +160,38 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
||||
}
|
||||
|
||||
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
|
||||
func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) {
|
||||
func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
new := spanProcessorStates{}
|
||||
if old, ok := p.spanProcessors.Load().(spanProcessorStates); ok {
|
||||
new = append(new, old...)
|
||||
}
|
||||
newSpanSync := &spanProcessorState{
|
||||
sp: s,
|
||||
state: &sync.Once{},
|
||||
}
|
||||
new = append(new, newSpanSync)
|
||||
p.spanProcessors.Store(new)
|
||||
newSPS := spanProcessorStates{}
|
||||
newSPS = append(newSPS, p.spanProcessors.Load().(spanProcessorStates)...)
|
||||
newSPS = append(newSPS, newSpanProcessorState(sp))
|
||||
p.spanProcessors.Store(newSPS)
|
||||
}
|
||||
|
||||
// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
|
||||
func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) {
|
||||
func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
spss := spanProcessorStates{}
|
||||
old, ok := p.spanProcessors.Load().(spanProcessorStates)
|
||||
if !ok || len(old) == 0 {
|
||||
old := p.spanProcessors.Load().(spanProcessorStates)
|
||||
if len(old) == 0 {
|
||||
return
|
||||
}
|
||||
spss := spanProcessorStates{}
|
||||
spss = append(spss, old...)
|
||||
|
||||
// stop the span processor if it is started and remove it from the list
|
||||
var stopOnce *spanProcessorState
|
||||
var idx int
|
||||
for i, sps := range spss {
|
||||
if sps.sp == s {
|
||||
if sps.sp == sp {
|
||||
stopOnce = sps
|
||||
idx = i
|
||||
}
|
||||
}
|
||||
if stopOnce != nil {
|
||||
stopOnce.state.Do(func() {
|
||||
if err := s.Shutdown(context.Background()); err != nil {
|
||||
if err := sp.Shutdown(context.Background()); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
})
|
||||
@ -213,10 +208,7 @@ func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) {
|
||||
// ForceFlush immediately exports all spans that have not yet been exported for
|
||||
// all the registered span processors.
|
||||
func (p *TracerProvider) ForceFlush(ctx context.Context) error {
|
||||
spss, ok := p.spanProcessors.Load().(spanProcessorStates)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to load span processors")
|
||||
}
|
||||
spss := p.spanProcessors.Load().(spanProcessorStates)
|
||||
if len(spss) == 0 {
|
||||
return nil
|
||||
}
|
||||
@ -237,14 +229,12 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error {
|
||||
|
||||
// Shutdown shuts down the span processors in the order they were registered.
|
||||
func (p *TracerProvider) Shutdown(ctx context.Context) error {
|
||||
spss, ok := p.spanProcessors.Load().(spanProcessorStates)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to load span processors")
|
||||
}
|
||||
spss := p.spanProcessors.Load().(spanProcessorStates)
|
||||
if len(spss) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var retErr error
|
||||
for _, sps := range spss {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@ -257,10 +247,15 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error {
|
||||
err = sps.sp.Shutdown(ctx)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
} else {
|
||||
// Poor man's list of errors
|
||||
retErr = fmt.Errorf("%v; %v", retErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return retErr
|
||||
}
|
||||
|
||||
// TracerProviderOption configures a TracerProvider.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user