use asynq as job handler (#182)

Reviewed-on: https://git.ptzo.gdn/feditools/relay/pulls/182
Co-authored-by: Tyr Mactire <tyr@pettingzoo.co>
Co-committed-by: Tyr Mactire <tyr@pettingzoo.co>
This commit is contained in:
Tyr Mactire 2023-03-10 07:05:22 +00:00 committed by PettingZoo Gitea
parent 52e7a747fe
commit cd97960f32
No known key found for this signature in database
GPG Key ID: 39788A4390A1372F
76 changed files with 14719 additions and 30 deletions

View File

@ -13,6 +13,7 @@ clean:
@find . -name ".DS_Store" -exec rm -v {} \;
docker-pull:
docker-compose --project-name ${PROJECT_NAME} -f deployments/docker-compose-test.yaml build --pull
docker-compose --project-name ${PROJECT_NAME} -f deployments/docker-compose-test.yaml pull
docker-restart: docker-stop docker-start

View File

@ -15,7 +15,7 @@ import (
"git.ptzo.gdn/feditools/relay/internal/language"
"git.ptzo.gdn/feditools/relay/internal/logic/logic1"
"git.ptzo.gdn/feditools/relay/internal/metrics"
"git.ptzo.gdn/feditools/relay/internal/runner/faktory"
"git.ptzo.gdn/feditools/relay/internal/runner/asynq"
"git.ptzo.gdn/feditools/relay/internal/scheduler"
"git.ptzo.gdn/feditools/relay/internal/token"
"github.com/spf13/viper"
@ -160,15 +160,31 @@ var Start action.Action = func(topCtx context.Context) error {
logicMod.SetNotifier(notifier)
// create runner
runnerMod, err := faktory.New(logicMod)
l.Debug("creating runner module")
runnerAddr := viper.GetString(config.Keys.RedisAddress)
runnerPassword := viper.GetString(config.Keys.RedisPassword)
runnerDB := viper.GetInt(config.Keys.RedisDB)
if viper.GetString(config.Keys.RunnerAddress) != "" {
runnerAddr = viper.GetString(config.Keys.RunnerAddress)
runnerPassword = viper.GetString(config.Keys.RunnerPassword)
runnerDB = viper.GetInt(config.Keys.RunnerDB)
}
runnerMod, err := asynq.New(&asynq.Config{
Logic: logicMod,
Address: runnerAddr,
Password: runnerPassword,
DB: runnerDB,
})
if err != nil {
l.Errorf("runner: %s", err.Error())
l.Errorf("runner server: %s", err.Error())
cancel()
return err
}
logicMod.SetRunner(runnerMod)
runnerMod.Start(ctx)
// create scheduler
schedulerMod, err := scheduler.New(logicMod, runnerMod)

View File

@ -0,0 +1,30 @@
package worker
import (
"git.ptzo.gdn/feditools/relay/internal/logic"
"git.ptzo.gdn/feditools/relay/internal/notification/manager"
"git.ptzo.gdn/feditools/relay/internal/notification/telegram"
)
func newNotifier(
logicMod logic.Logic,
) (*manager.Manager, error) {
l := logger.WithField("func", "newNotifier")
newManager, err := manager.New()
if err != nil {
l.Errorf("notification manager: %s", err.Error())
return nil, err
}
telegramMod, err := telegram.New(logicMod)
if err != nil {
l.Errorf("notification telegram: %s", err.Error())
return nil, err
}
newManager.AddService(telegramMod)
return newManager, nil
}

View File

@ -12,7 +12,7 @@ import (
"git.ptzo.gdn/feditools/relay/internal/kv/redis"
"git.ptzo.gdn/feditools/relay/internal/logic/logic1"
"git.ptzo.gdn/feditools/relay/internal/metrics"
"git.ptzo.gdn/feditools/relay/internal/runner/faktory"
"git.ptzo.gdn/feditools/relay/internal/runner/asynq"
"git.ptzo.gdn/feditools/relay/internal/token"
"github.com/spf13/viper"
"github.com/uptrace/uptrace-go/uptrace"
@ -127,15 +127,50 @@ var Start action.Action = func(ctx context.Context) error {
}
// create runner
runnerMod, err := faktory.New(logicMod)
// create runner
l.Debug("creating runner module")
runnerAddr := viper.GetString(config.Keys.RedisAddress)
runnerPassword := viper.GetString(config.Keys.RedisPassword)
runnerDB := viper.GetInt(config.Keys.RedisDB)
if viper.GetString(config.Keys.RunnerAddress) != "" {
runnerAddr = viper.GetString(config.Keys.RunnerAddress)
runnerPassword = viper.GetString(config.Keys.RunnerPassword)
runnerDB = viper.GetInt(config.Keys.RunnerDB)
}
runnerMod, err := asynq.New(&asynq.Config{
Logic: logicMod,
Concurrency: viper.GetInt(config.Keys.RunnerConcurrency),
Address: runnerAddr,
Password: runnerPassword,
DB: runnerDB,
})
if err != nil {
l.Errorf("runner: %s", err.Error())
l.Errorf("runner server: %s", err.Error())
cancel()
return err
}
defer func() {
l.Debug("closing runner")
err := runnerMod.Stop()
if err != nil {
l.Errorf("closing runner: %s", err.Error())
}
}()
logicMod.SetRunner(runnerMod)
runnerMod.Start(ctx)
// create language module
notifier, err := newNotifier(logicMod)
if err != nil {
l.Errorf("notifier: %s", err.Error())
cancel()
return err
}
logicMod.SetNotifier(notifier)
// ** start application **
errChan := make(chan error)
@ -153,6 +188,15 @@ var Start action.Action = func(ctx context.Context) error {
}
}(metricsServer, errChan)
// start runner server
go func(m *asynq.Runner, errChan chan error) {
l.Debug("starting runner server")
err := m.Start(ctx)
if err != nil {
errChan <- fmt.Errorf("runner server: %s", err.Error())
}
}(runnerMod, errChan)
// wait for event
select {
case sig := <-stopSigChan:

View File

@ -9,4 +9,7 @@ import (
func Runner(cmd *cobra.Command, values config.Values) {
cmd.PersistentFlags().Int(config.Keys.RunnerConcurrency, values.RunnerConcurrency, usage.RunnerConcurrency)
cmd.PersistentFlags().Int(config.Keys.RunnerPoolSize, values.RunnerPoolSize, usage.RunnerPoolSize)
cmd.PersistentFlags().String(config.Keys.RunnerAddress, values.RunnerAddress, usage.RunnerAddress)
cmd.PersistentFlags().Int(config.Keys.RunnerDB, values.RunnerDB, usage.RunnerDB)
cmd.PersistentFlags().String(config.Keys.RunnerPassword, values.RunnerPassword, usage.RunnerPassword)
}

View File

@ -2,26 +2,52 @@
version: '3'
services:
postgres:
image: postgres:14
image: postgres:15
ports:
- 127.0.0.1:5432:5432/tcp
- "127.0.0.1:5432:5432/tcp"
environment:
- POSTGRES_PASSWORD=test
- POSTGRES_USER=test
- POSTGRES_DB=test
restart: always
faktory:
image: contribsys/faktory:1.6.1
command: /faktory -b :7419 -w :7420 -e production
ports:
- 127.0.0.1:7419:7419/tcp
- 127.0.0.1:7420:7420/tcp
environment:
- FAKTORY_PASSWORD=test
restart: always
# faktory:
# image: contribsys/faktory:1.6.1
# command: /faktory -b :7419 -w :7420 -e production
# ports:
# - 127.0.0.1:7419:7419/tcp
# - 127.0.0.1:7420:7420/tcp
# environment:
# - FAKTORY_PASSWORD=test
# restart: always
redis:
image: redis:6
command: redis-server --requirepass test
ports:
- "127.0.0.1:6379:6379/tcp"
restart: always
redis-runner:
image: redis:6
command: redis-server --requirepass test2
ports:
- "127.0.0.1:6380:6379/tcp"
restart: always
asynqmon:
image: hibiken/asynqmon:latest
ports:
- "127.0.0.1:8080:8080/tcp"
environment:
- "REDIS_ADDR=redis-runner:6379"
- "REDIS_PASSWORD=test2"
- "ENABLE_METRICS_EXPORTER=true"
- "PROMETHEUS_ADDR=http://prometheus:9090"
prometheus:
build:
context: ./prometheus
ports:
- "127.0.0.1:9090:9090/tcp"
grafana:
build:
context: ./grafana
ports:
- "127.0.0.1:3000:3000/tcp"

View File

@ -0,0 +1,4 @@
FROM grafana/grafana:latest
ADD grafana.ini /etc/grafana/
ADD datasource.yaml /etc/grafana/provisioning/datasources/

View File

@ -0,0 +1,9 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://prometheus:9090
isDefault: true
editable: true

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,3 @@
FROM prom/prometheus
ADD prometheus.yml /etc/prometheus/

View File

@ -0,0 +1,11 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
- job_name: "asynq"
static_configs:
- targets: ["asynqmon:8080"]

2
go.mod
View File

@ -18,6 +18,7 @@ require (
github.com/gorilla/mux v1.8.0
github.com/gorilla/sessions v1.2.1
github.com/hashicorp/golang-lru v0.5.4
github.com/hibiken/asynq v0.24.0
github.com/jackc/pgconn v1.13.0
github.com/jackc/pgx/v4 v4.17.2
github.com/jellydator/ttlcache/v3 v3.0.0
@ -121,6 +122,7 @@ require (
golang.org/x/crypto v0.3.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/sys v0.2.0 // indirect
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
golang.org/x/tools v0.3.0 // indirect
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect
google.golang.org/grpc v1.51.0 // indirect

15
go.sum
View File

@ -155,6 +155,7 @@ github.com/go-redis/redis/extra/rediscmd/v8 v8.11.5/go.mod h1:s9f/6bSbS5r/jC2ozp
github.com/go-redis/redis/extra/redisotel/v8 v8.11.5 h1:BqyYJgvdSr2S/6O2l7zmCj26ocUTxDLgagsGIRfkS+Q=
github.com/go-redis/redis/extra/redisotel/v8 v8.11.5/go.mod h1:LlDT9RRdBgOrMGvFjT/m1+GrZAmRlBaMcM3UXHPWf8g=
github.com/go-redis/redis/v8 v8.3.3/go.mod h1:jszGxBCez8QA1HWSmQxJO9Y82kNibbUmeYhKWrBejTU=
github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@ -228,6 +229,7 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
@ -254,6 +256,8 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hibiken/asynq v0.24.0 h1:r1CiSVYCy1vGq9REKGI/wdB2D5n/QmtzihYHHXOuBUs=
github.com/hibiken/asynq v0.24.0/go.mod h1:FVnRfUTm6gcoDkM/EjF4OIh5/06ergCPUO6pS2B2y+w=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -388,6 +392,7 @@ github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
@ -395,6 +400,7 @@ github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
@ -469,6 +475,7 @@ github.com/speps/go-hashids/v2 v2.0.1 h1:ViWOEqWES/pdOSq+C1SLVa8/Tnsd52XC34RY7lt
github.com/speps/go-hashids/v2 v2.0.1/go.mod h1:47LKunwvDZki/uRVD6NImtyk712yFzIs3UF3KlHohGw=
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
@ -536,6 +543,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@ -582,6 +590,7 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
@ -642,6 +651,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -678,6 +688,7 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@ -712,6 +723,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180525142821-c11f84a56e43/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -800,6 +812,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -853,6 +867,7 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -37,6 +37,9 @@ type KeyNames struct {
// runner
RunnerConcurrency string
RunnerPoolSize string
RunnerAddress string
RunnerDB string
RunnerPassword string
// server
ServerExternalHostname string
@ -101,6 +104,9 @@ var Keys = KeyNames{
// runner
RunnerConcurrency: "runner-concurrency",
RunnerPoolSize: "runner-pool-size",
RunnerAddress: "runner-address",
RunnerDB: "runner-db",
RunnerPassword: "runner-password",
// server
ServerExternalHostname: "external-hostname",

View File

@ -37,6 +37,9 @@ type Values struct {
// runner
RunnerConcurrency int
RunnerPoolSize int
RunnerAddress string
RunnerDB int
RunnerPassword string
// server
ServerExternalHostname string
@ -95,7 +98,7 @@ var Defaults = Values{
RedisDB: 0,
// runner
RunnerConcurrency: 4,
RunnerConcurrency: 10,
RunnerPoolSize: 4,
// server

View File

@ -0,0 +1,59 @@
package asynq
import (
"context"
"encoding/json"
"fmt"
"git.ptzo.gdn/feditools/go-lib/fedihelper"
"git.ptzo.gdn/feditools/relay/internal/runner"
"github.com/hibiken/asynq"
)
const TypeDeliverActivity = "activity:deliver"
type inboxDeliverPayload struct {
InstanceID int64
Activity fedihelper.Activity
}
func (r *Runner) EnqueueDeliverActivity(ctx context.Context, instanceID int64, activity fedihelper.Activity) error {
_, span := r.tracer.Start(ctx, "EnqueueDeliverActivity")
defer span.End()
l := logger.WithField("func", "EnqueueDeliverActivity")
payload, err := json.Marshal(inboxDeliverPayload{
InstanceID: instanceID,
Activity: activity,
})
if err != nil {
return err
}
task := asynq.NewTask(TypeDeliverActivity, payload, asynq.Queue(runner.QueueDelivery))
info, err := r.client.Enqueue(task, asynq.MaxRetry(5))
if err != nil {
l.Debugf("can't enqueue: %s", err.Error())
return err
}
l.Debugf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
return nil
}
func (r *Runner) handleDeliverActivity(ctx context.Context, t *asynq.Task) error {
_, span := r.tracer.Start(ctx, "handleDeliverActivity")
defer span.End()
//l := logger.WithField("func", "handleDeliverActivity")
var p inboxDeliverPayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
// process activity
return r.logic.DeliverActivity(ctx, "", p.InstanceID, p.Activity, false)
}

View File

@ -0,0 +1,68 @@
package asynq
import (
"context"
"encoding/json"
"fmt"
"git.ptzo.gdn/feditools/go-lib/fedihelper"
"github.com/hibiken/asynq"
"net/url"
)
const TypeInboxActivity = "activity:inbox"
type inboxActivityPayload struct {
InstanceID int64
ActorIRI *url.URL
Activity fedihelper.Activity
}
func (r *Runner) EnqueueInboxActivity(ctx context.Context, instanceID int64, actorIRI string, activity fedihelper.Activity) error {
_, span := r.tracer.Start(ctx, "EnqueueInboxAction")
defer span.End()
l := logger.WithField("func", "EnqueueInboxActivity")
actorURL, err := url.Parse(actorIRI)
if err != nil {
l.Errorf("cant parse url from argument 1: %s", err.Error())
return fmt.Errorf("cant parse url from argument 1: %s", err.Error())
}
payload, err := json.Marshal(inboxActivityPayload{
InstanceID: instanceID,
ActorIRI: actorURL,
Activity: activity,
})
if err != nil {
return err
}
task := asynq.NewTask(TypeInboxActivity, payload)
info, err := r.client.Enqueue(task, asynq.MaxRetry(5))
if err != nil {
l.Debugf("can't enqueue: %s", err.Error())
return err
}
l.Debugf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
return nil
}
func (r *Runner) handleInboxActivity(ctx context.Context, t *asynq.Task) error {
_, span := r.tracer.Start(ctx, "handleInboxActivity")
defer span.End()
//l := logger.WithField("func", "handleInboxAction")
var p inboxActivityPayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
// process activity
return r.logic.ProcessActivity(ctx, "", p.InstanceID, p.ActorIRI, p.Activity)
}

View File

@ -0,0 +1,55 @@
package asynq
import (
"context"
"encoding/json"
"fmt"
"github.com/hibiken/asynq"
)
const TypeProcessBlockAdd = "block:add"
type processBlockAddPayload struct {
BlockID int64
}
func (r *Runner) EnqueueProcessBlockAdd(ctx context.Context, blockID int64) error {
_, span := r.tracer.Start(ctx, "EnqueueProcessBlockAdd")
defer span.End()
l := logger.WithField("func", "EnqueueProcessBlockAdd")
payload, err := json.Marshal(processBlockAddPayload{
BlockID: blockID,
})
if err != nil {
return err
}
task := asynq.NewTask(TypeProcessBlockAdd, payload)
info, err := r.client.Enqueue(task)
if err != nil {
l.Debugf("can't enqueue: %s", err.Error())
return err
}
l.Debugf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
return nil
}
func (r *Runner) handleProcessBlockAdd(ctx context.Context, t *asynq.Task) error {
_, span := r.tracer.Start(ctx, "handleProcessBlockAdd")
defer span.End()
//l := logger.WithField("func", "handleProcessBlockAdd")
var p processBlockAddPayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
// process activity
return r.logic.ProcessBlockAdd(ctx, p.BlockID)
}

View File

@ -0,0 +1,55 @@
package asynq
import (
"context"
"encoding/json"
"fmt"
"github.com/hibiken/asynq"
)
const TypeProcessBlockDelete = "block:delete"
type processBlockDeletePayload struct {
BlockID int64
}
func (r *Runner) EnqueueProcessBlockDelete(ctx context.Context, blockID int64) error {
_, span := r.tracer.Start(ctx, "EnqueueProcessBlockDelete")
defer span.End()
l := logger.WithField("func", "EnqueueProcessBlockDelete")
payload, err := json.Marshal(processBlockDeletePayload{
BlockID: blockID,
})
if err != nil {
return err
}
task := asynq.NewTask(TypeProcessBlockDelete, payload)
info, err := r.client.Enqueue(task)
if err != nil {
l.Debugf("can't enqueue: %s", err.Error())
return err
}
l.Debugf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
return nil
}
func (r *Runner) handleProcessBlockDelete(ctx context.Context, t *asynq.Task) error {
_, span := r.tracer.Start(ctx, "handleProcessBlockDelete")
defer span.End()
//l := logger.WithField("func", "handleProcessBlockDelete")
var p processBlockDeletePayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
// process activity
return r.logic.ProcessBlockDelete(ctx, p.BlockID)
}

View File

@ -0,0 +1,55 @@
package asynq
import (
"context"
"encoding/json"
"fmt"
"github.com/hibiken/asynq"
)
const TypeProcessBlockUpdate = "block:update"
type processBlockUpdatePayload struct {
BlockID int64
}
func (r *Runner) EnqueueProcessBlockUpdate(ctx context.Context, blockID int64) error {
_, span := r.tracer.Start(ctx, "EnqueueInboxAction")
defer span.End()
l := logger.WithField("func", "EnqueueInboxActivity")
payload, err := json.Marshal(processBlockUpdatePayload{
BlockID: blockID,
})
if err != nil {
return err
}
task := asynq.NewTask(TypeProcessBlockUpdate, payload)
info, err := r.client.Enqueue(task)
if err != nil {
l.Debugf("can't enqueue: %s", err.Error())
return err
}
l.Debugf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
return nil
}
func (r *Runner) handleProcessBlockUpdate(ctx context.Context, t *asynq.Task) error {
_, span := r.tracer.Start(ctx, "handleProcessBlockUpdate")
defer span.End()
//l := logger.WithField("func", "handleProcessBlockUpdate")
var p processBlockUpdatePayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
// process activity
return r.logic.ProcessBlockUpdate(ctx, p.BlockID)
}

View File

@ -0,0 +1,12 @@
package asynq
import "git.ptzo.gdn/feditools/relay/internal/logic"
type Config struct {
Logic logic.Logic
Concurrency int
Address string
Password string
DB int
}

View File

@ -0,0 +1,9 @@
package asynq
import (
"git.ptzo.gdn/feditools/relay/internal/log"
)
type empty struct{}
var logger = log.WithPackageField(empty{})

View File

@ -0,0 +1,36 @@
package asynq
import (
"context"
"github.com/hibiken/asynq"
)
const TypeMaintDeliveryErrorTimeout = "maint:delivery-timeout"
func (r *Runner) EnqueueMaintDeliveryErrorTimeout(ctx context.Context) error {
_, span := r.tracer.Start(ctx, "EnqueueMaintDeliveryErrorTimeout")
defer span.End()
l := logger.WithField("func", "EnqueueMaintDeliveryErrorTimeout")
task := asynq.NewTask(TypeMaintDeliveryErrorTimeout, nil)
info, err := r.client.Enqueue(task)
if err != nil {
l.Debugf("can't enqueue: %s", err.Error())
return err
}
l.Debugf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
return nil
}
func (r *Runner) handleMaintDeliveryErrorTimeout(ctx context.Context, _ *asynq.Task) error {
_, span := r.tracer.Start(ctx, "handleMaintDeliveryErrorTimeout")
defer span.End()
//l := logger.WithField("func", "handleMaintDeliveryErrorTimeout")
return r.logic.MaintDeliveryErrorTimeout(ctx, "")
}

View File

@ -0,0 +1,80 @@
package asynq
import (
"context"
"git.ptzo.gdn/feditools/relay/internal/logic"
"git.ptzo.gdn/feditools/relay/internal/runner"
"github.com/hibiken/asynq"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
func New(c *Config) (*Runner, error) {
client := asynq.NewClient(asynq.RedisClientOpt{
Addr: c.Address,
Password: c.Password,
DB: c.DB,
})
return &Runner{
client: client,
logic: c.Logic,
tracer: otel.Tracer("internal/runner/asynq"),
concurrency: c.Concurrency,
address: c.Address,
password: c.Password,
db: c.DB,
}, nil
}
type Runner struct {
client *asynq.Client
logic logic.Logic
server *asynq.Server
tracer trace.Tracer
concurrency int
address string
password string
db int
}
var _ runner.Runner = (*Runner)(nil)
func (r *Runner) Start(_ context.Context) error {
r.server = asynq.NewServer(
asynq.RedisClientOpt{
Addr: r.address,
Password: r.password,
DB: r.db,
},
asynq.Config{
Concurrency: r.concurrency,
Queues: map[string]int{
runner.QueuePriority: 6,
runner.QueueDefault: 3,
runner.QueueDelivery: 1,
},
},
)
mux := asynq.NewServeMux()
mux.HandleFunc(TypeDeliverActivity, r.handleDeliverActivity)
mux.HandleFunc(TypeInboxActivity, r.handleInboxActivity)
mux.HandleFunc(TypeProcessBlockAdd, r.handleProcessBlockAdd)
mux.HandleFunc(TypeProcessBlockDelete, r.handleProcessBlockDelete)
mux.HandleFunc(TypeProcessBlockUpdate, r.handleProcessBlockUpdate)
mux.HandleFunc(TypeMaintDeliveryErrorTimeout, r.handleMaintDeliveryErrorTimeout)
mux.HandleFunc(TypeSendNotification, r.handleSendNotification)
mux.HandleFunc(TypeUpdateAccountInfo, r.handleUpdateAccountInfo)
mux.HandleFunc(TypeUpdateInstanceInfo, r.handleUpdateInstanceInfo)
return r.server.Run(mux)
}
func (r *Runner) Stop() error {
r.server.Stop()
return nil
}

View File

@ -0,0 +1,63 @@
package asynq
import (
"context"
"encoding/json"
"fmt"
"git.ptzo.gdn/feditools/relay/internal/models"
"git.ptzo.gdn/feditools/relay/internal/runner"
"github.com/hibiken/asynq"
)
const TypeSendNotification = "notification:send"
type sendNotificationPayload struct {
Event models.EventType
Metadata map[string]interface{}
}
func (r *Runner) EnqueueSendNotification(ctx context.Context, event models.EventType, metadata map[string]interface{}) error {
_, span := r.tracer.Start(ctx, "EnqueueSendNotification")
defer span.End()
l := logger.WithField("func", "EnqueueSendNotification")
payload, err := json.Marshal(sendNotificationPayload{
Event: event,
Metadata: metadata,
})
if err != nil {
return err
}
task := asynq.NewTask(TypeSendNotification, payload, asynq.Queue(runner.QueuePriority))
info, err := r.client.Enqueue(task)
if err != nil {
l.Debugf("can't enqueue: %s", err.Error())
return err
}
l.Debugf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
return nil
}
func (r *Runner) handleSendNotification(ctx context.Context, t *asynq.Task) error {
_, span := r.tracer.Start(ctx, "handleSendNotification")
defer span.End()
//l := logger.WithField("func", "handleSendNotification")
var p sendNotificationPayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
// process activity
if err := r.logic.SendNotification(ctx, "", p.Event, p.Metadata); err != nil {
return fmt.Errorf("send notification failed: %v: %w", err, asynq.SkipRetry)
}
return nil
}

View File

@ -0,0 +1,59 @@
package asynq
import (
"context"
"encoding/json"
"fmt"
"github.com/hibiken/asynq"
)
const TypeUpdateAccountInfo = "update:account"
type updateAccountInfoPayload struct {
AccountID int64
}
func (r *Runner) EnqueueUpdateAccountInfo(ctx context.Context, accountID int64) error {
_, span := r.tracer.Start(ctx, "EnqueueUpdateAccountInfo")
defer span.End()
l := logger.WithField("func", "EnqueueUpdateAccountInfo")
payload, err := json.Marshal(updateAccountInfoPayload{
AccountID: accountID,
})
if err != nil {
return err
}
task := asynq.NewTask(TypeUpdateAccountInfo, payload)
info, err := r.client.Enqueue(task)
if err != nil {
l.Debugf("can't enqueue: %s", err.Error())
return err
}
l.Debugf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
return nil
}
func (r *Runner) handleUpdateAccountInfo(ctx context.Context, t *asynq.Task) error {
_, span := r.tracer.Start(ctx, "handleUpdateAccountInfo")
defer span.End()
//l := logger.WithField("func", "handleUpdateAccountInfo")
var p updateAccountInfoPayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
// process activity
if err := r.logic.UpdateAccountInfo(ctx, "", p.AccountID); err != nil {
return fmt.Errorf("account update failed: %v: %w", err, asynq.SkipRetry)
}
return nil
}

View File

@ -0,0 +1,59 @@
package asynq
import (
"context"
"encoding/json"
"fmt"
"github.com/hibiken/asynq"
)
const TypeUpdateInstanceInfo = "update:instance"
type updateInstanceInfoPayload struct {
InstanceID int64
}
func (r *Runner) EnqueueUpdateInstanceInfo(ctx context.Context, instanceID int64) error {
_, span := r.tracer.Start(ctx, "EnqueueUpdateInstanceInfo")
defer span.End()
l := logger.WithField("func", "EnqueueUpdateInstanceInfo")
payload, err := json.Marshal(updateInstanceInfoPayload{
InstanceID: instanceID,
})
if err != nil {
return err
}
task := asynq.NewTask(TypeUpdateInstanceInfo, payload)
info, err := r.client.Enqueue(task)
if err != nil {
l.Debugf("can't enqueue: %s", err.Error())
return err
}
l.Debugf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
return nil
}
func (r *Runner) handleUpdateInstanceInfo(ctx context.Context, t *asynq.Task) error {
_, span := r.tracer.Start(ctx, "handleUpdateInstanceInfo")
defer span.End()
//l := logger.WithField("func", "handleUpdateInstanceInfo")
var p updateInstanceInfoPayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
// process activity
if err := r.logic.UpdateInstanceInfo(ctx, "", p.InstanceID); err != nil {
return fmt.Errorf("account update failed: %v: %w", err, asynq.SkipRetry)
}
return nil
}

View File

@ -3,5 +3,9 @@ package runner
import "go.opentelemetry.io/otel/attribute"
const (
RunnerJobIDKey = attribute.Key("runner.job.id")
JobIDKey = attribute.Key("runner.job.id")
QueuePriority = "priority" // high
QueueDefault = "default" // medium
QueueDelivery = "delivery" // low
)

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"git.ptzo.gdn/feditools/go-lib/fedihelper"
"git.ptzo.gdn/feditools/relay/internal/runner"
faktory "github.com/contribsys/faktory/client"
worker "github.com/contribsys/faktory_worker_go"
"github.com/sirupsen/logrus"
@ -15,7 +16,7 @@ func (r *Runner) EnqueueDeliverActivity(_ context.Context, instanceID int64, act
retry := 8
job := faktory.NewJob(JobDeliverActivity, strconv.FormatInt(instanceID, 10), activity)
job.Queue = QueueDelivery
job.Queue = runner.QueueDelivery
job.Retry = &retry
return r.manager.Pool.With(func(conn *faktory.Client) error {
@ -61,7 +62,7 @@ func (r *Runner) deliverActivity(ctx context.Context, args ...interface{}) error
func (r *Runner) EnqueueInboxActivity(_ context.Context, instanceID int64, actorIRI string, activity fedihelper.Activity) error {
job := faktory.NewJob(JobInboxActivity, strconv.FormatInt(instanceID, 10), actorIRI, activity)
job.Queue = QueueDefault
job.Queue = runner.QueueDefault
return r.manager.Pool.With(func(conn *faktory.Client) error {
return conn.Push(job)

View File

@ -12,8 +12,4 @@ const (
JobSendNotification = "SendNotification"
JobUpdateAccountInfo = "UpdateAccountInfo"
JobUpdateInstanceInfo = "UpdateInstanceInfo"
QueueDefault = "default" // medium
QueueDelivery = "delivery" // low
QueuePriority = "priority" // high
)

View File

@ -9,7 +9,7 @@ import (
func (r *Runner) Middleware(ctx context.Context, job *faktory.Job, next func(ctx context.Context) error) error {
opts := []trace.SpanStartOption{
trace.WithAttributes(runner.RunnerJobIDKey.String(job.Jid)),
trace.WithAttributes(runner.JobIDKey.String(job.Jid)),
trace.WithSpanKind(trace.SpanKindConsumer),
}
tctx, main := r.tracer.Start(ctx, job.Type, opts...)

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"git.ptzo.gdn/feditools/relay/internal/models"
"git.ptzo.gdn/feditools/relay/internal/runner"
faktory "github.com/contribsys/faktory/client"
worker "github.com/contribsys/faktory_worker_go"
"github.com/sirupsen/logrus"
@ -11,7 +12,7 @@ import (
func (r *Runner) EnqueueSendNotification(_ context.Context, event models.EventType, metadata map[string]interface{}) error {
job := faktory.NewJob(JobSendNotification, event, metadata)
job.Queue = QueuePriority
job.Queue = runner.QueuePriority
return r.manager.Pool.With(func(conn *faktory.Client) error {
return conn.Push(job)

View File

@ -4,6 +4,7 @@ import (
"context"
"git.ptzo.gdn/feditools/relay/internal/config"
"git.ptzo.gdn/feditools/relay/internal/logic"
"git.ptzo.gdn/feditools/relay/internal/runner"
faktory "github.com/contribsys/faktory/client"
worker "github.com/contribsys/faktory_worker_go"
"github.com/spf13/viper"
@ -33,7 +34,7 @@ func New(l logic.Logic) (*Runner, error) {
mgr := worker.NewManager()
mgr.Pool = faktoryPool
mgr.Concurrency = viper.GetInt(config.Keys.RunnerConcurrency)
mgr.ProcessWeightedPriorityQueues(map[string]int{QueuePriority: 3, QueueDefault: 2, QueueDelivery: 1})
mgr.ProcessWeightedPriorityQueues(map[string]int{runner.QueuePriority: 3, runner.QueueDefault: 2, runner.QueueDelivery: 1})
// add handlers
mgr.Use(newRunner.Middleware)

View File

@ -88,6 +88,24 @@ db-crypto-key: "test1234test5678test9123test4567"
# Default: ""
redis-password: "test"
############
## RUNNER ##
############
# String. Address and port of the redis instance
# Default: "localhost:6379"
runner-address: "localhost:6380"
# Int. Redis database to use
# Examples: [0, 1, 15]
# Default: 0
#runner-db: 0
# String. Password to use for the redis connection
# Examples: ["","test","password"]
# Default: ""
runner-password: "test2"
#######
# WEB #
#######

27
vendor/github.com/hibiken/asynq/.gitignore generated vendored Normal file
View File

@ -0,0 +1,27 @@
vendor
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Ignore examples for now
/examples
# Ignore tool binaries
/tools/asynq/asynq
/tools/metrics_exporter/metrics_exporter
# Ignore asynq config file
.asynq.*
# Ignore editor config files
.vscode
.idea

526
vendor/github.com/hibiken/asynq/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,526 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on ["Keep a Changelog"](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.24.0] - 2023-01-02
### Added
- `PreEnqueueFunc`, `PostEnqueueFunc` is added in `Scheduler` and deprecated `EnqueueErrorHandler` (PR: https://github.com/hibiken/asynq/pull/476)
### Changed
- Removed error log when `Scheduler` failed to enqueue a task. Use `PostEnqueueFunc` to check for errors and task actions if needed.
- Changed log level from ERROR to WARNINING when `Scheduler` failed to record `SchedulerEnqueueEvent`.
## [0.23.0] - 2022-04-11
### Added
- `Group` option is introduced to enqueue task in a group.
- `GroupAggregator` and related types are introduced for task aggregation feature.
- `GroupGracePeriod`, `GroupMaxSize`, `GroupMaxDelay`, and `GroupAggregator` fields are added to `Config`.
- `Inspector` has new methods related to "aggregating tasks".
- `Group` field is added to `TaskInfo`.
- (CLI): `group ls` command is added
- (CLI): `task ls` supports listing aggregating tasks via `--state=aggregating --group=<GROUP>` flags
- Enable rediss url parsing support
### Fixed
- Fixed overflow issue with 32-bit systems (For details, see https://github.com/hibiken/asynq/pull/426)
## [0.22.1] - 2022-02-20
### Fixed
- Fixed Redis version compatibility: Keep support for redis v4.0+
## [0.22.0] - 2022-02-19
### Added
- `BaseContext` is introduced in `Config` to specify callback hook to provide a base `context` from which `Handler` `context` is derived
- `IsOrphaned` field is added to `TaskInfo` to describe a task left in active state with no worker processing it.
### Changed
- `Server` now recovers tasks with an expired lease. Recovered tasks are retried/archived with `ErrLeaseExpired` error.
## [0.21.0] - 2022-01-22
### Added
- `PeriodicTaskManager` is added. Prefer using this over `Scheduler` as it has better support for dynamic periodic tasks.
- The `asynq stats` command now supports a `--json` option, making its output a JSON object
- Introduced new configuration for `DelayedTaskCheckInterval`. See [godoc](https://godoc.org/github.com/hibiken/asynq) for more details.
## [0.20.0] - 2021-12-19
### Added
- Package `x/metrics` is added.
- Tool `tools/metrics_exporter` binary is added.
- `ProcessedTotal` and `FailedTotal` fields were added to `QueueInfo` struct.
## [0.19.1] - 2021-12-12
### Added
- `Latency` field is added to `QueueInfo`.
- `EnqueueContext` method is added to `Client`.
### Fixed
- Fixed an error when user pass a duration less than 1s to `Unique` option
## [0.19.0] - 2021-11-06
### Changed
- `NewTask` takes `Option` as variadic argument
- Bumped minimum supported go version to 1.14 (i.e. go1.14 or higher is required).
### Added
- `Retention` option is added to allow user to specify task retention duration after completion.
- `TaskID` option is added to allow user to specify task ID.
- `ErrTaskIDConflict` sentinel error value is added.
- `ResultWriter` type is added and provided through `Task.ResultWriter` method.
- `TaskInfo` has new fields `CompletedAt`, `Result` and `Retention`.
### Removed
- `Client.SetDefaultOptions` is removed. Use `NewTask` instead to pass default options for tasks.
## [0.18.6] - 2021-10-03
### Changed
- Updated `github.com/go-redis/redis` package to v8
## [0.18.5] - 2021-09-01
### Added
- `IsFailure` config option is added to determine whether error returned from Handler counts as a failure.
## [0.18.4] - 2021-08-17
### Fixed
- Scheduler methods are now thread-safe. It's now safe to call `Register` and `Unregister` concurrently.
## [0.18.3] - 2021-08-09
### Changed
- `Client.Enqueue` no longer enqueues tasks with empty typename; Error message is returned.
## [0.18.2] - 2021-07-15
### Changed
- Changed `Queue` function to not to convert the provided queue name to lowercase. Queue names are now case-sensitive.
- `QueueInfo.MemoryUsage` is now an approximate usage value.
### Fixed
- Fixed latency issue around memory usage (see https://github.com/hibiken/asynq/issues/309).
## [0.18.1] - 2021-07-04
### Changed
- Changed to execute task recovering logic when server starts up; Previously it needed to wait for a minute for task recovering logic to exeucte.
### Fixed
- Fixed task recovering logic to execute every minute
## [0.18.0] - 2021-06-29
### Changed
- NewTask function now takes array of bytes as payload.
- Task `Type` and `Payload` should be accessed by a method call.
- `Server` API has changed. Renamed `Quiet` to `Stop`. Renamed `Stop` to `Shutdown`. _Note:_ As a result of this renaming, the behavior of `Stop` has changed. Please update the exising code to call `Shutdown` where it used to call `Stop`.
- `Scheduler` API has changed. Renamed `Stop` to `Shutdown`.
- Requires redis v4.0+ for multiple field/value pair support
- `Client.Enqueue` now returns `TaskInfo`
- `Inspector.RunTaskByKey` is replaced with `Inspector.RunTask`
- `Inspector.DeleteTaskByKey` is replaced with `Inspector.DeleteTask`
- `Inspector.ArchiveTaskByKey` is replaced with `Inspector.ArchiveTask`
- `inspeq` package is removed. All types and functions from the package is moved to `asynq` package.
- `WorkerInfo` field names have changed.
- `Inspector.CancelActiveTask` is renamed to `Inspector.CancelProcessing`
## [0.17.2] - 2021-06-06
### Fixed
- Free unique lock when task is deleted (https://github.com/hibiken/asynq/issues/275).
## [0.17.1] - 2021-04-04
### Fixed
- Fix bug in internal `RDB.memoryUsage` method.
## [0.17.0] - 2021-03-24
### Added
- `DialTimeout`, `ReadTimeout`, and `WriteTimeout` options are added to `RedisConnOpt`.
## [0.16.1] - 2021-03-20
### Fixed
- Replace `KEYS` command with `SCAN` as recommended by [redis doc](https://redis.io/commands/KEYS).
## [0.16.0] - 2021-03-10
### Added
- `Unregister` method is added to `Scheduler` to remove a registered entry.
## [0.15.0] - 2021-01-31
**IMPORTATNT**: All `Inspector` related code are moved to subpackage "github.com/hibiken/asynq/inspeq"
### Changed
- `Inspector` related code are moved to subpackage "github.com/hibken/asynq/inspeq".
- `RedisConnOpt` interface has changed slightly. If you have been passing `RedisClientOpt`, `RedisFailoverClientOpt`, or `RedisClusterClientOpt` as a pointer,
update your code to pass as a value.
- `ErrorMsg` field in `RetryTask` and `ArchivedTask` was renamed to `LastError`.
### Added
- `MaxRetry`, `Retried`, `LastError` fields were added to all task types returned from `Inspector`.
- `MemoryUsage` field was added to `QueueStats`.
- `DeleteAllPendingTasks`, `ArchiveAllPendingTasks` were added to `Inspector`
- `DeleteTaskByKey` and `ArchiveTaskByKey` now supports deleting/archiving `PendingTask`.
- asynq CLI now supports deleting/archiving pending tasks.
## [0.14.1] - 2021-01-19
### Fixed
- `go.mod` file for CLI
## [0.14.0] - 2021-01-14
**IMPORTATNT**: Please run `asynq migrate` command to migrate from the previous versions.
### Changed
- Renamed `DeadTask` to `ArchivedTask`.
- Renamed the operation `Kill` to `Archive` in `Inpsector`.
- Print stack trace when Handler panics.
- Include a file name and a line number in the error message when recovering from a panic.
### Added
- `DefaultRetryDelayFunc` is now a public API, which can be used in the custom `RetryDelayFunc`.
- `SkipRetry` error is added to be used as a return value from `Handler`.
- `Servers` method is added to `Inspector`
- `CancelActiveTask` method is added to `Inspector`.
- `ListSchedulerEnqueueEvents` method is added to `Inspector`.
- `SchedulerEntries` method is added to `Inspector`.
- `DeleteQueue` method is added to `Inspector`.
## [0.13.1] - 2020-11-22
### Fixed
- Fixed processor to wait for specified time duration before forcefully shutdown workers.
## [0.13.0] - 2020-10-13
### Added
- `Scheduler` type is added to enable periodic tasks. See the godoc for its APIs and [wiki](https://github.com/hibiken/asynq/wiki/Periodic-Tasks) for the getting-started guide.
### Changed
- interface `Option` has changed. See the godoc for the new interface.
This change would have no impact as long as you are using exported functions (e.g. `MaxRetry`, `Queue`, etc)
to create `Option`s.
### Added
- `Payload.String() string` method is added
- `Payload.MarshalJSON() ([]byte, error)` method is added
## [0.12.0] - 2020-09-12
**IMPORTANT**: If you are upgrading from a previous version, please install the latest version of the CLI `go get -u github.com/hibiken/asynq/tools/asynq` and run `asynq migrate` command. No process should be writing to Redis while you run the migration command.
## The semantics of queue have changed
Previously, we called tasks that are ready to be processed _"Enqueued tasks"_, and other tasks that are scheduled to be processed in the future _"Scheduled tasks"_, etc.
We changed the semantics of _"Enqueue"_ slightly; All tasks that client pushes to Redis are _Enqueued_ to a queue. Within a queue, tasks will transition from one state to another.
Possible task states are:
- `Pending`: task is ready to be processed (previously called "Enqueued")
- `Active`: tasks is currently being processed (previously called "InProgress")
- `Scheduled`: task is scheduled to be processed in the future
- `Retry`: task failed to be processed and will be retried again in the future
- `Dead`: task has exhausted all of its retries and stored for manual inspection purpose
**These semantics change is reflected in the new `Inspector` API and CLI commands.**
---
### Changed
#### `Client`
Use `ProcessIn` or `ProcessAt` option to schedule a task instead of `EnqueueIn` or `EnqueueAt`.
| Previously | v0.12.0 |
| --------------------------- | ------------------------------------------ |
| `client.EnqueueAt(t, task)` | `client.Enqueue(task, asynq.ProcessAt(t))` |
| `client.EnqueueIn(d, task)` | `client.Enqueue(task, asynq.ProcessIn(d))` |
#### `Inspector`
All Inspector methods are scoped to a queue, and the methods take `qname (string)` as the first argument.
`EnqueuedTask` is renamed to `PendingTask` and its corresponding methods.
`InProgressTask` is renamed to `ActiveTask` and its corresponding methods.
Command "Enqueue" is replaced by the verb "Run" (e.g. `EnqueueAllScheduledTasks` --> `RunAllScheduledTasks`)
#### `CLI`
CLI commands are restructured to use subcommands. Commands are organized into a few management commands:
To view details on any command, use `asynq help <command> <subcommand>`.
- `asynq stats`
- `asynq queue [ls inspect history rm pause unpause]`
- `asynq task [ls cancel delete kill run delete-all kill-all run-all]`
- `asynq server [ls]`
### Added
#### `RedisConnOpt`
- `RedisClusterClientOpt` is added to connect to Redis Cluster.
- `Username` field is added to all `RedisConnOpt` types in order to authenticate connection when Redis ACLs are used.
#### `Client`
- `ProcessIn(d time.Duration) Option` and `ProcessAt(t time.Time) Option` are added to replace `EnqueueIn` and `EnqueueAt` functionality.
#### `Inspector`
- `Queues() ([]string, error)` method is added to get all queue names.
- `ClusterKeySlot(qname string) (int64, error)` method is added to get queue's hash slot in Redis cluster.
- `ClusterNodes(qname string) ([]ClusterNode, error)` method is added to get a list of Redis cluster nodes for the given queue.
- `Close() error` method is added to close connection with redis.
### `Handler`
- `GetQueueName(ctx context.Context) (string, bool)` helper is added to extract queue name from a context.
## [0.11.0] - 2020-07-28
### Added
- `Inspector` type was added to monitor and mutate state of queues and tasks.
- `HealthCheckFunc` and `HealthCheckInterval` fields were added to `Config` to allow user to specify a callback
function to check for broker connection.
## [0.10.0] - 2020-07-06
### Changed
- All tasks now requires timeout or deadline. By default, timeout is set to 30 mins.
- Tasks that exceed its deadline are automatically retried.
- Encoding schema for task message has changed. Please install the latest CLI and run `migrate` command if
you have tasks enqueued with the previous version of asynq.
- API of `(*Client).Enqueue`, `(*Client).EnqueueIn`, and `(*Client).EnqueueAt` has changed to return a `*Result`.
- API of `ErrorHandler` has changed. It now takes context as the first argument and removed `retried`, `maxRetry` from the argument list.
Use `GetRetryCount` and/or `GetMaxRetry` to get the count values.
## [0.9.4] - 2020-06-13
### Fixed
- Fixes issue of same tasks processed by more than one worker (https://github.com/hibiken/asynq/issues/90).
## [0.9.3] - 2020-06-12
### Fixed
- Fixes the JSON number overflow issue (https://github.com/hibiken/asynq/issues/166).
## [0.9.2] - 2020-06-08
### Added
- The `pause` and `unpause` commands were added to the CLI. See README for the CLI for details.
## [0.9.1] - 2020-05-29
### Added
- `GetTaskID`, `GetRetryCount`, and `GetMaxRetry` functions were added to extract task metadata from context.
## [0.9.0] - 2020-05-16
### Changed
- `Logger` interface has changed. Please see the godoc for the new interface.
### Added
- `LogLevel` type is added. Server's log level can be specified through `LogLevel` field in `Config`.
## [0.8.3] - 2020-05-08
### Added
- `Close` method is added to `Client`.
## [0.8.2] - 2020-05-03
### Fixed
- [Fixed cancelfunc leak](https://github.com/hibiken/asynq/pull/145)
## [0.8.1] - 2020-04-27
### Added
- `ParseRedisURI` helper function is added to create a `RedisConnOpt` from a URI string.
- `SetDefaultOptions` method is added to `Client`.
## [0.8.0] - 2020-04-19
### Changed
- `Background` type is renamed to `Server`.
- To upgrade from the previous version, Update `NewBackground` to `NewServer` and pass `Config` by value.
- CLI is renamed to `asynq`.
- To upgrade the CLI to the latest version run `go get -u github.com/hibiken/tools/asynq`
- The `ps` command in CLI is renamed to `servers`
- `Concurrency` defaults to the number of CPUs when unset or set to a negative value.
### Added
- `ShutdownTimeout` field is added to `Config` to speicfy timeout duration used during graceful shutdown.
- New `Server` type exposes `Start`, `Stop`, and `Quiet` as well as `Run`.
## [0.7.1] - 2020-04-05
### Fixed
- Fixed signal handling for windows.
## [0.7.0] - 2020-03-22
### Changed
- Support Go v1.13+, dropped support for go v1.12
### Added
- `Unique` option was added to allow client to enqueue a task only if it's unique within a certain time period.
## [0.6.2] - 2020-03-15
### Added
- `Use` method was added to `ServeMux` to apply middlewares to all handlers.
## [0.6.1] - 2020-03-12
### Added
- `Client` can optionally schedule task with `asynq.Deadline(time)` to specify deadline for task's context. Default is no deadline.
- `Logger` option was added to config, which allows user to specify the logger used by the background instance.
## [0.6.0] - 2020-03-01
### Added
- Added `ServeMux` type to make it easy for users to implement Handler interface.
- `ErrorHandler` type was added. Allow users to specify error handling function (e.g. Report error to error reporting service such as Honeybadger, Bugsnag, etc)
## [0.5.0] - 2020-02-23
### Changed
- `Client` API has changed. Use `Enqueue`, `EnqueueAt` and `EnqueueIn` to enqueue and schedule tasks.
### Added
- `asynqmon workers` was added to list all running workers information
## [0.4.0] - 2020-02-13
### Changed
- `Handler` interface has changed. `ProcessTask` method takes two arguments `context.Context` and `*asynq.Task`
- `Queues` field in `Config` has change from `map[string]uint` to `map[string]int`
### Added
- `Client` can optionally schedule task with `asynq.Timeout(duration)` to specify timeout duration for task. Default is no timeout.
- `asynqmon cancel [task id]` will send a cancelation signal to the goroutine processing the speicified task.
## [0.3.0] - 2020-02-04
### Added
- `asynqmon ps` was added to list all background worker processes
## [0.2.2] - 2020-01-26
### Fixed
- Fixed restoring unfinished tasks back to correct queues.
### Changed
- `asynqmon ls` command is now paginated (default 30 tasks from first page)
- `asynqmon ls enqueued:[queue name]` requires queue name to be specified
## [0.2.1] - 2020-01-22
### Fixed
- More structured log messages
- Prevent spamming logs with a bunch of errors when Redis connection is lost
- Fixed and updated README doc
## [0.2.0] - 2020-01-19
### Added
- NewTask constructor
- `Queues` option in `Config` to specify mutiple queues with priority level
- `Client` can schedule a task with `asynq.Queue(name)` to specify which queue to use
- `StrictPriority` option in `Config` to specify whether the priority should be followed strictly
- `RedisConnOpt` to abstract away redis client implementation
- [CLI] `asynqmon rmq` command to remove queue
### Changed
- `Client` and `Background` constructors take `RedisConnOpt` as their first argument.
- `asynqmon stats` now shows the total of all enqueued tasks under "Enqueued"
- `asynqmon stats` now shows each queue's task count
- `asynqmon history` now doesn't take any arguments and shows data from the last 10 days by default (use `--days` flag to change the number of days)
- Task type is now immutable (i.e., Payload is read-only)
## [0.1.0] - 2020-01-04
### Added
- Initial version of asynq package
- Initial version of asynqmon CLI

128
vendor/github.com/hibiken/asynq/CODE_OF_CONDUCT.md generated vendored Normal file
View File

@ -0,0 +1,128 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
ken.hibino7@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

52
vendor/github.com/hibiken/asynq/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,52 @@
# Contributing
Thanks for your interest in contributing to Asynq!
We are open to, and grateful for, any contributions made by the community.
## Reporting Bugs
Have a look at our [issue tracker](https://github.com/hibiken/asynq/issues). If you can't find an issue (open or closed)
describing your problem (or a very similar one) there, please open a new issue with
the following details:
- Which versions of Go and Redis are you using?
- What are you trying to accomplish?
- What is the full error you are seeing?
- How can we reproduce this?
- Please quote as much of your code as needed to reproduce (best link to a
public repository or Gist)
## Getting Help
We run a [Gitter
channel](https://gitter.im/go-asynq/community) where you can ask questions and
get help. Feel free to ask there before opening a GitHub issue.
## Submitting Feature Requests
If you can't find an issue (open or closed) describing your idea on our [issue
tracker](https://github.com/hibiken/asynq/issues), open an issue. Adding answers to the following
questions in your description is +1:
- What do you want to do, and how do you expect Asynq to support you with that?
- How might this be added to Asynq?
- What are possible alternatives?
- Are there any disadvantages?
Thank you! We'll try to respond as quickly as possible.
## Contributing Code
1. Fork this repo
2. Download your fork `git clone git@github.com:your-username/asynq.git && cd asynq`
3. Create your branch `git checkout -b your-branch-name`
4. Make and commit your changes
5. Push the branch `git push origin your-branch-name`
6. Create a new pull request
Please try to keep your pull request focused in scope and avoid including unrelated commits.
Please run tests against redis cluster locally with `--redis_cluster` flag to ensure that code works for Redis cluster. TODO: Run tests using Redis cluster on CI.
After you have submitted your pull request, we'll try to get back to you as soon as possible. We may suggest some changes or improvements.
Thank you for contributing!

21
vendor/github.com/hibiken/asynq/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Kentaro Hibino
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

7
vendor/github.com/hibiken/asynq/Makefile generated vendored Normal file
View File

@ -0,0 +1,7 @@
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
proto: internal/proto/asynq.proto
protoc -I=$(ROOT_DIR)/internal/proto \
--go_out=$(ROOT_DIR)/internal/proto \
--go_opt=module=github.com/hibiken/asynq/internal/proto \
$(ROOT_DIR)/internal/proto/asynq.proto

312
vendor/github.com/hibiken/asynq/README.md generated vendored Normal file
View File

@ -0,0 +1,312 @@
<img src="https://user-images.githubusercontent.com/11155743/114697792-ffbfa580-9d26-11eb-8e5b-33bef69476dc.png" alt="Asynq logo" width="360px" />
# Simple, reliable & efficient distributed task queue in Go
[![GoDoc](https://godoc.org/github.com/hibiken/asynq?status.svg)](https://godoc.org/github.com/hibiken/asynq)
[![Go Report Card](https://goreportcard.com/badge/github.com/hibiken/asynq)](https://goreportcard.com/report/github.com/hibiken/asynq)
![Build Status](https://github.com/hibiken/asynq/workflows/build/badge.svg)
[![License: MIT](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT)
[![Gitter chat](https://badges.gitter.im/go-asynq/gitter.svg)](https://gitter.im/go-asynq/community)
Asynq is a Go library for queueing tasks and processing them asynchronously with workers. It's backed by [Redis](https://redis.io/) and is designed to be scalable yet easy to get started.
Highlevel overview of how Asynq works:
- Client puts tasks on a queue
- Server pulls tasks off queues and starts a worker goroutine for each task
- Tasks are processed concurrently by multiple workers
Task queues are used as a mechanism to distribute work across multiple machines. A system can consist of multiple worker servers and brokers, giving way to high availability and horizontal scaling.
**Example use case**
![Task Queue Diagram](https://user-images.githubusercontent.com/11155743/116358505-656f5f80-a806-11eb-9c16-94e49dab0f99.jpg)
## Features
- Guaranteed [at least one execution](https://www.cloudcomputingpatterns.org/at_least_once_delivery/) of a task
- Scheduling of tasks
- [Retries](https://github.com/hibiken/asynq/wiki/Task-Retry) of failed tasks
- Automatic recovery of tasks in the event of a worker crash
- [Weighted priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#weighted-priority)
- [Strict priority queues](https://github.com/hibiken/asynq/wiki/Queue-Priority#strict-priority)
- Low latency to add a task since writes are fast in Redis
- De-duplication of tasks using [unique option](https://github.com/hibiken/asynq/wiki/Unique-Tasks)
- Allow [timeout and deadline per task](https://github.com/hibiken/asynq/wiki/Task-Timeout-and-Cancelation)
- Allow [aggregating group of tasks](https://github.com/hibiken/asynq/wiki/Task-aggregation) to batch multiple successive operations
- [Flexible handler interface with support for middlewares](https://github.com/hibiken/asynq/wiki/Handler-Deep-Dive)
- [Ability to pause queue](/tools/asynq/README.md#pause) to stop processing tasks from the queue
- [Periodic Tasks](https://github.com/hibiken/asynq/wiki/Periodic-Tasks)
- [Support Redis Cluster](https://github.com/hibiken/asynq/wiki/Redis-Cluster) for automatic sharding and high availability
- [Support Redis Sentinels](https://github.com/hibiken/asynq/wiki/Automatic-Failover) for high availability
- Integration with [Prometheus](https://prometheus.io/) to collect and visualize queue metrics
- [Web UI](#web-ui) to inspect and remote-control queues and tasks
- [CLI](#command-line-tool) to inspect and remote-control queues and tasks
## Stability and Compatibility
**Status**: The library is currently undergoing **heavy development** with frequent, breaking API changes.
> ☝️ **Important Note**: Current major version is zero (`v0.x.x`) to accomodate rapid development and fast iteration while getting early feedback from users (_feedback on APIs are appreciated!_). The public API could change without a major version update before `v1.0.0` release.
## Quickstart
Make sure you have Go installed ([download](https://golang.org/dl/)). Version `1.14` or higher is required.
Initialize your project by creating a folder and then running `go mod init github.com/your/repo` ([learn more](https://blog.golang.org/using-go-modules)) inside the folder. Then install Asynq library with the [`go get`](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them) command:
```sh
go get -u github.com/hibiken/asynq
```
Make sure you're running a Redis server locally or from a [Docker](https://hub.docker.com/_/redis) container. Version `4.0` or higher is required.
Next, write a package that encapsulates task creation and task handling.
```go
package tasks
import (
"context"
"encoding/json"
"fmt"
"log"
"time"
"github.com/hibiken/asynq"
)
// A list of task types.
const (
TypeEmailDelivery = "email:deliver"
TypeImageResize = "image:resize"
)
type EmailDeliveryPayload struct {
UserID int
TemplateID string
}
type ImageResizePayload struct {
SourceURL string
}
//----------------------------------------------
// Write a function NewXXXTask to create a task.
// A task consists of a type and a payload.
//----------------------------------------------
func NewEmailDeliveryTask(userID int, tmplID string) (*asynq.Task, error) {
payload, err := json.Marshal(EmailDeliveryPayload{UserID: userID, TemplateID: tmplID})
if err != nil {
return nil, err
}
return asynq.NewTask(TypeEmailDelivery, payload), nil
}
func NewImageResizeTask(src string) (*asynq.Task, error) {
payload, err := json.Marshal(ImageResizePayload{SourceURL: src})
if err != nil {
return nil, err
}
// task options can be passed to NewTask, which can be overridden at enqueue time.
return asynq.NewTask(TypeImageResize, payload, asynq.MaxRetry(5), asynq.Timeout(20 * time.Minute)), nil
}
//---------------------------------------------------------------
// Write a function HandleXXXTask to handle the input task.
// Note that it satisfies the asynq.HandlerFunc interface.
//
// Handler doesn't need to be a function. You can define a type
// that satisfies asynq.Handler interface. See examples below.
//---------------------------------------------------------------
func HandleEmailDeliveryTask(ctx context.Context, t *asynq.Task) error {
var p EmailDeliveryPayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
log.Printf("Sending Email to User: user_id=%d, template_id=%s", p.UserID, p.TemplateID)
// Email delivery code ...
return nil
}
// ImageProcessor implements asynq.Handler interface.
type ImageProcessor struct {
// ... fields for struct
}
func (processor *ImageProcessor) ProcessTask(ctx context.Context, t *asynq.Task) error {
var p ImageResizePayload
if err := json.Unmarshal(t.Payload(), &p); err != nil {
return fmt.Errorf("json.Unmarshal failed: %v: %w", err, asynq.SkipRetry)
}
log.Printf("Resizing image: src=%s", p.SourceURL)
// Image resizing code ...
return nil
}
func NewImageProcessor() *ImageProcessor {
return &ImageProcessor{}
}
```
In your application code, import the above package and use [`Client`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Client) to put tasks on queues.
```go
package main
import (
"log"
"time"
"github.com/hibiken/asynq"
"your/app/package/tasks"
)
const redisAddr = "127.0.0.1:6379"
func main() {
client := asynq.NewClient(asynq.RedisClientOpt{Addr: redisAddr})
defer client.Close()
// ------------------------------------------------------
// Example 1: Enqueue task to be processed immediately.
// Use (*Client).Enqueue method.
// ------------------------------------------------------
task, err := tasks.NewEmailDeliveryTask(42, "some:template:id")
if err != nil {
log.Fatalf("could not create task: %v", err)
}
info, err := client.Enqueue(task)
if err != nil {
log.Fatalf("could not enqueue task: %v", err)
}
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
// ------------------------------------------------------------
// Example 2: Schedule task to be processed in the future.
// Use ProcessIn or ProcessAt option.
// ------------------------------------------------------------
info, err = client.Enqueue(task, asynq.ProcessIn(24*time.Hour))
if err != nil {
log.Fatalf("could not schedule task: %v", err)
}
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
// ----------------------------------------------------------------------------
// Example 3: Set other options to tune task processing behavior.
// Options include MaxRetry, Queue, Timeout, Deadline, Unique etc.
// ----------------------------------------------------------------------------
task, err = tasks.NewImageResizeTask("https://example.com/myassets/image.jpg")
if err != nil {
log.Fatalf("could not create task: %v", err)
}
info, err = client.Enqueue(task, asynq.MaxRetry(10), asynq.Timeout(3 * time.Minute))
if err != nil {
log.Fatalf("could not enqueue task: %v", err)
}
log.Printf("enqueued task: id=%s queue=%s", info.ID, info.Queue)
}
```
Next, start a worker server to process these tasks in the background. To start the background workers, use [`Server`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Server) and provide your [`Handler`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#Handler) to process the tasks.
You can optionally use [`ServeMux`](https://pkg.go.dev/github.com/hibiken/asynq?tab=doc#ServeMux) to create a handler, just as you would with [`net/http`](https://golang.org/pkg/net/http/) Handler.
```go
package main
import (
"log"
"github.com/hibiken/asynq"
"your/app/package/tasks"
)
const redisAddr = "127.0.0.1:6379"
func main() {
srv := asynq.NewServer(
asynq.RedisClientOpt{Addr: redisAddr},
asynq.Config{
// Specify how many concurrent workers to use
Concurrency: 10,
// Optionally specify multiple queues with different priority.
Queues: map[string]int{
"critical": 6,
"default": 3,
"low": 1,
},
// See the godoc for other configuration options
},
)
// mux maps a type to a handler
mux := asynq.NewServeMux()
mux.HandleFunc(tasks.TypeEmailDelivery, tasks.HandleEmailDeliveryTask)
mux.Handle(tasks.TypeImageResize, tasks.NewImageProcessor())
// ...register other handlers...
if err := srv.Run(mux); err != nil {
log.Fatalf("could not run server: %v", err)
}
}
```
For a more detailed walk-through of the library, see our [Getting Started](https://github.com/hibiken/asynq/wiki/Getting-Started) guide.
To learn more about `asynq` features and APIs, see the package [godoc](https://godoc.org/github.com/hibiken/asynq).
## Web UI
[Asynqmon](https://github.com/hibiken/asynqmon) is a web based tool for monitoring and administrating Asynq queues and tasks.
Here's a few screenshots of the Web UI:
**Queues view**
![Web UI Queues View](https://user-images.githubusercontent.com/11155743/114697016-07327f00-9d26-11eb-808c-0ac841dc888e.png)
**Tasks view**
![Web UI TasksView](https://user-images.githubusercontent.com/11155743/114697070-1f0a0300-9d26-11eb-855c-d3ec263865b7.png)
**Metrics view**
<img width="1532" alt="Screen Shot 2021-12-19 at 4 37 19 PM" src="https://user-images.githubusercontent.com/10953044/146777420-cae6c476-bac6-469c-acce-b2f6584e8707.png">
**Settings and adaptive dark mode**
![Web UI Settings and adaptive dark mode](https://user-images.githubusercontent.com/11155743/114697149-3517c380-9d26-11eb-9f7a-ae2dd00aad5b.png)
For details on how to use the tool, refer to the tool's [README](https://github.com/hibiken/asynqmon#readme).
## Command Line Tool
Asynq ships with a command line tool to inspect the state of queues and tasks.
To install the CLI tool, run the following command:
```sh
go install github.com/hibiken/asynq/tools/asynq
```
Here's an example of running the `asynq dash` command:
![Gif](/docs/assets/dash.gif)
For details on how to use the tool, refer to the tool's [README](/tools/asynq/README.md).
## Contributing
We are open to, and grateful for, any contributions (GitHub issues/PRs, feedback on [Gitter channel](https://gitter.im/go-asynq/community), etc) made by the community.
Please see the [Contribution Guide](/CONTRIBUTING.md) before contributing.
## License
Copyright (c) 2019-present [Ken Hibino](https://github.com/hibiken) and [Contributors](https://github.com/hibiken/asynq/graphs/contributors). `Asynq` is free and open-source software licensed under the [MIT License](https://github.com/hibiken/asynq/blob/master/LICENSE). Official logo was created by [Vic Shóstak](https://github.com/koddr) and distributed under [Creative Commons](https://creativecommons.org/publicdomain/zero/1.0/) license (CC0 1.0 Universal).

176
vendor/github.com/hibiken/asynq/aggregator.go generated vendored Normal file
View File

@ -0,0 +1,176 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"context"
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// An aggregator is responsible for checking groups and aggregate into one task
// if any of the grouping condition is met.
type aggregator struct {
logger *log.Logger
broker base.Broker
client *Client
// channel to communicate back to the long running "aggregator" goroutine.
done chan struct{}
// list of queue names to check and aggregate.
queues []string
// Group configurations
gracePeriod time.Duration
maxDelay time.Duration
maxSize int
// User provided group aggregator.
ga GroupAggregator
// interval used to check for aggregation
interval time.Duration
// sema is a counting semaphore to ensure the number of active aggregating function
// does not exceed the limit.
sema chan struct{}
}
type aggregatorParams struct {
logger *log.Logger
broker base.Broker
queues []string
gracePeriod time.Duration
maxDelay time.Duration
maxSize int
groupAggregator GroupAggregator
}
const (
// Maximum number of aggregation checks in flight concurrently.
maxConcurrentAggregationChecks = 3
// Default interval used for aggregation checks. If the provided gracePeriod is less than
// the default, use the gracePeriod.
defaultAggregationCheckInterval = 7 * time.Second
)
func newAggregator(params aggregatorParams) *aggregator {
interval := defaultAggregationCheckInterval
if params.gracePeriod < interval {
interval = params.gracePeriod
}
return &aggregator{
logger: params.logger,
broker: params.broker,
client: &Client{broker: params.broker},
done: make(chan struct{}),
queues: params.queues,
gracePeriod: params.gracePeriod,
maxDelay: params.maxDelay,
maxSize: params.maxSize,
ga: params.groupAggregator,
sema: make(chan struct{}, maxConcurrentAggregationChecks),
interval: interval,
}
}
func (a *aggregator) shutdown() {
if a.ga == nil {
return
}
a.logger.Debug("Aggregator shutting down...")
// Signal the aggregator goroutine to stop.
a.done <- struct{}{}
}
func (a *aggregator) start(wg *sync.WaitGroup) {
if a.ga == nil {
return
}
wg.Add(1)
go func() {
defer wg.Done()
ticker := time.NewTicker(a.interval)
for {
select {
case <-a.done:
a.logger.Debug("Waiting for all aggregation checks to finish...")
// block until all aggregation checks released the token
for i := 0; i < cap(a.sema); i++ {
a.sema <- struct{}{}
}
a.logger.Debug("Aggregator done")
ticker.Stop()
return
case t := <-ticker.C:
a.exec(t)
}
}
}()
}
func (a *aggregator) exec(t time.Time) {
select {
case a.sema <- struct{}{}: // acquire token
go a.aggregate(t)
default:
// If the semaphore blocks, then we are currently running max number of
// aggregation checks. Skip this round and log warning.
a.logger.Warnf("Max number of aggregation checks in flight. Skipping")
}
}
func (a *aggregator) aggregate(t time.Time) {
defer func() { <-a.sema /* release token */ }()
for _, qname := range a.queues {
groups, err := a.broker.ListGroups(qname)
if err != nil {
a.logger.Errorf("Failed to list groups in queue: %q", qname)
continue
}
for _, gname := range groups {
aggregationSetID, err := a.broker.AggregationCheck(
qname, gname, t, a.gracePeriod, a.maxDelay, a.maxSize)
if err != nil {
a.logger.Errorf("Failed to run aggregation check: queue=%q group=%q", qname, gname)
continue
}
if aggregationSetID == "" {
a.logger.Debugf("No aggregation needed at this time: queue=%q group=%q", qname, gname)
continue
}
// Aggregate and enqueue.
msgs, deadline, err := a.broker.ReadAggregationSet(qname, gname, aggregationSetID)
if err != nil {
a.logger.Errorf("Failed to read aggregation set: queue=%q, group=%q, setID=%q",
qname, gname, aggregationSetID)
continue
}
tasks := make([]*Task, len(msgs))
for i, m := range msgs {
tasks[i] = NewTask(m.Type, m.Payload)
}
aggregatedTask := a.ga.Aggregate(gname, tasks)
ctx, cancel := context.WithDeadline(context.Background(), deadline)
if _, err := a.client.EnqueueContext(ctx, aggregatedTask, Queue(qname)); err != nil {
a.logger.Errorf("Failed to enqueue aggregated task (queue=%q, group=%q, setID=%q): %v",
qname, gname, aggregationSetID, err)
cancel()
continue
}
if err := a.broker.DeleteAggregationSet(ctx, qname, gname, aggregationSetID); err != nil {
a.logger.Warnf("Failed to delete aggregation set: queue=%q, group=%q, setID=%q",
qname, gname, aggregationSetID)
}
cancel()
}
}
}

547
vendor/github.com/hibiken/asynq/asynq.go generated vendored Normal file
View File

@ -0,0 +1,547 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/url"
"strconv"
"strings"
"time"
"github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base"
)
// Task represents a unit of work to be performed.
type Task struct {
// typename indicates the type of task to be performed.
typename string
// payload holds data needed to perform the task.
payload []byte
// opts holds options for the task.
opts []Option
// w is the ResultWriter for the task.
w *ResultWriter
}
func (t *Task) Type() string { return t.typename }
func (t *Task) Payload() []byte { return t.payload }
// ResultWriter returns a pointer to the ResultWriter associated with the task.
//
// Nil pointer is returned if called on a newly created task (i.e. task created by calling NewTask).
// Only the tasks passed to Handler.ProcessTask have a valid ResultWriter pointer.
func (t *Task) ResultWriter() *ResultWriter { return t.w }
// NewTask returns a new Task given a type name and payload data.
// Options can be passed to configure task processing behavior.
func NewTask(typename string, payload []byte, opts ...Option) *Task {
return &Task{
typename: typename,
payload: payload,
opts: opts,
}
}
// newTask creates a task with the given typename, payload and ResultWriter.
func newTask(typename string, payload []byte, w *ResultWriter) *Task {
return &Task{
typename: typename,
payload: payload,
w: w,
}
}
// A TaskInfo describes a task and its metadata.
type TaskInfo struct {
// ID is the identifier of the task.
ID string
// Queue is the name of the queue in which the task belongs.
Queue string
// Type is the type name of the task.
Type string
// Payload is the payload data of the task.
Payload []byte
// State indicates the task state.
State TaskState
// MaxRetry is the maximum number of times the task can be retried.
MaxRetry int
// Retried is the number of times the task has retried so far.
Retried int
// LastErr is the error message from the last failure.
LastErr string
// LastFailedAt is the time time of the last failure if any.
// If the task has no failures, LastFailedAt is zero time (i.e. time.Time{}).
LastFailedAt time.Time
// Timeout is the duration the task can be processed by Handler before being retried,
// zero if not specified
Timeout time.Duration
// Deadline is the deadline for the task, zero value if not specified.
Deadline time.Time
// Group is the name of the group in which the task belongs.
//
// Tasks in the same queue can be grouped together by Group name and will be aggregated into one task
// by a Server processing the queue.
//
// Empty string (default) indicates task does not belong to any groups, and no aggregation will be applied to the task.
Group string
// NextProcessAt is the time the task is scheduled to be processed,
// zero if not applicable.
NextProcessAt time.Time
// IsOrphaned describes whether the task is left in active state with no worker processing it.
// An orphaned task indicates that the worker has crashed or experienced network failures and was not able to
// extend its lease on the task.
//
// This task will be recovered by running a server against the queue the task is in.
// This field is only applicable to tasks with TaskStateActive.
IsOrphaned bool
// Retention is duration of the retention period after the task is successfully processed.
Retention time.Duration
// CompletedAt is the time when the task is processed successfully.
// Zero value (i.e. time.Time{}) indicates no value.
CompletedAt time.Time
// Result holds the result data associated with the task.
// Use ResultWriter to write result data from the Handler.
Result []byte
}
// If t is non-zero, returns time converted from t as unix time in seconds.
// If t is zero, returns zero value of time.Time.
func fromUnixTimeOrZero(t int64) time.Time {
if t == 0 {
return time.Time{}
}
return time.Unix(t, 0)
}
func newTaskInfo(msg *base.TaskMessage, state base.TaskState, nextProcessAt time.Time, result []byte) *TaskInfo {
info := TaskInfo{
ID: msg.ID,
Queue: msg.Queue,
Type: msg.Type,
Payload: msg.Payload, // Do we need to make a copy?
MaxRetry: msg.Retry,
Retried: msg.Retried,
LastErr: msg.ErrorMsg,
Group: msg.GroupKey,
Timeout: time.Duration(msg.Timeout) * time.Second,
Deadline: fromUnixTimeOrZero(msg.Deadline),
Retention: time.Duration(msg.Retention) * time.Second,
NextProcessAt: nextProcessAt,
LastFailedAt: fromUnixTimeOrZero(msg.LastFailedAt),
CompletedAt: fromUnixTimeOrZero(msg.CompletedAt),
Result: result,
}
switch state {
case base.TaskStateActive:
info.State = TaskStateActive
case base.TaskStatePending:
info.State = TaskStatePending
case base.TaskStateScheduled:
info.State = TaskStateScheduled
case base.TaskStateRetry:
info.State = TaskStateRetry
case base.TaskStateArchived:
info.State = TaskStateArchived
case base.TaskStateCompleted:
info.State = TaskStateCompleted
case base.TaskStateAggregating:
info.State = TaskStateAggregating
default:
panic(fmt.Sprintf("internal error: unknown state: %d", state))
}
return &info
}
// TaskState denotes the state of a task.
type TaskState int
const (
// Indicates that the task is currently being processed by Handler.
TaskStateActive TaskState = iota + 1
// Indicates that the task is ready to be processed by Handler.
TaskStatePending
// Indicates that the task is scheduled to be processed some time in the future.
TaskStateScheduled
// Indicates that the task has previously failed and scheduled to be processed some time in the future.
TaskStateRetry
// Indicates that the task is archived and stored for inspection purposes.
TaskStateArchived
// Indicates that the task is processed successfully and retained until the retention TTL expires.
TaskStateCompleted
// Indicates that the task is waiting in a group to be aggregated into one task.
TaskStateAggregating
)
func (s TaskState) String() string {
switch s {
case TaskStateActive:
return "active"
case TaskStatePending:
return "pending"
case TaskStateScheduled:
return "scheduled"
case TaskStateRetry:
return "retry"
case TaskStateArchived:
return "archived"
case TaskStateCompleted:
return "completed"
case TaskStateAggregating:
return "aggregating"
}
panic("asynq: unknown task state")
}
// RedisConnOpt is a discriminated union of types that represent Redis connection configuration option.
//
// RedisConnOpt represents a sum of following types:
//
// - RedisClientOpt
// - RedisFailoverClientOpt
// - RedisClusterClientOpt
type RedisConnOpt interface {
// MakeRedisClient returns a new redis client instance.
// Return value is intentionally opaque to hide the implementation detail of redis client.
MakeRedisClient() interface{}
}
// RedisClientOpt is used to create a redis client that connects
// to a redis server directly.
type RedisClientOpt struct {
// Network type to use, either tcp or unix.
// Default is tcp.
Network string
// Redis server address in "host:port" format.
Addr string
// Username to authenticate the current connection when Redis ACLs are used.
// See: https://redis.io/commands/auth.
Username string
// Password to authenticate the current connection.
// See: https://redis.io/commands/auth.
Password string
// Redis DB to select after connecting to a server.
// See: https://redis.io/commands/select.
DB int
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads.
// If timeout is reached, read commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is 3 seconds.
ReadTimeout time.Duration
// Timeout for socket writes.
// If timeout is reached, write commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is ReadTimout.
WriteTimeout time.Duration
// Maximum number of socket connections.
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
PoolSize int
// TLS Config used to connect to a server.
// TLS will be negotiated only if this field is set.
TLSConfig *tls.Config
}
func (opt RedisClientOpt) MakeRedisClient() interface{} {
return redis.NewClient(&redis.Options{
Network: opt.Network,
Addr: opt.Addr,
Username: opt.Username,
Password: opt.Password,
DB: opt.DB,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.PoolSize,
TLSConfig: opt.TLSConfig,
})
}
// RedisFailoverClientOpt is used to creates a redis client that talks
// to redis sentinels for service discovery and has an automatic failover
// capability.
type RedisFailoverClientOpt struct {
// Redis master name that monitored by sentinels.
MasterName string
// Addresses of sentinels in "host:port" format.
// Use at least three sentinels to avoid problems described in
// https://redis.io/topics/sentinel.
SentinelAddrs []string
// Redis sentinel password.
SentinelPassword string
// Username to authenticate the current connection when Redis ACLs are used.
// See: https://redis.io/commands/auth.
Username string
// Password to authenticate the current connection.
// See: https://redis.io/commands/auth.
Password string
// Redis DB to select after connecting to a server.
// See: https://redis.io/commands/select.
DB int
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads.
// If timeout is reached, read commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is 3 seconds.
ReadTimeout time.Duration
// Timeout for socket writes.
// If timeout is reached, write commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is ReadTimeout
WriteTimeout time.Duration
// Maximum number of socket connections.
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
PoolSize int
// TLS Config used to connect to a server.
// TLS will be negotiated only if this field is set.
TLSConfig *tls.Config
}
func (opt RedisFailoverClientOpt) MakeRedisClient() interface{} {
return redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: opt.MasterName,
SentinelAddrs: opt.SentinelAddrs,
SentinelPassword: opt.SentinelPassword,
Username: opt.Username,
Password: opt.Password,
DB: opt.DB,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.PoolSize,
TLSConfig: opt.TLSConfig,
})
}
// RedisClusterClientOpt is used to creates a redis client that connects to
// redis cluster.
type RedisClusterClientOpt struct {
// A seed list of host:port addresses of cluster nodes.
Addrs []string
// The maximum number of retries before giving up.
// Command is retried on network errors and MOVED/ASK redirects.
// Default is 8 retries.
MaxRedirects int
// Username to authenticate the current connection when Redis ACLs are used.
// See: https://redis.io/commands/auth.
Username string
// Password to authenticate the current connection.
// See: https://redis.io/commands/auth.
Password string
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads.
// If timeout is reached, read commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is 3 seconds.
ReadTimeout time.Duration
// Timeout for socket writes.
// If timeout is reached, write commands will fail with a timeout error
// instead of blocking.
//
// Use value -1 for no timeout and 0 for default.
// Default is ReadTimeout.
WriteTimeout time.Duration
// TLS Config used to connect to a server.
// TLS will be negotiated only if this field is set.
TLSConfig *tls.Config
}
func (opt RedisClusterClientOpt) MakeRedisClient() interface{} {
return redis.NewClusterClient(&redis.ClusterOptions{
Addrs: opt.Addrs,
MaxRedirects: opt.MaxRedirects,
Username: opt.Username,
Password: opt.Password,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
TLSConfig: opt.TLSConfig,
})
}
// ParseRedisURI parses redis uri string and returns RedisConnOpt if uri is valid.
// It returns a non-nil error if uri cannot be parsed.
//
// Three URI schemes are supported, which are redis:, rediss:, redis-socket:, and redis-sentinel:.
// Supported formats are:
// redis://[:password@]host[:port][/dbnumber]
// rediss://[:password@]host[:port][/dbnumber]
// redis-socket://[:password@]path[?db=dbnumber]
// redis-sentinel://[:password@]host1[:port][,host2:[:port]][,hostN:[:port]][?master=masterName]
func ParseRedisURI(uri string) (RedisConnOpt, error) {
u, err := url.Parse(uri)
if err != nil {
return nil, fmt.Errorf("asynq: could not parse redis uri: %v", err)
}
switch u.Scheme {
case "redis", "rediss":
return parseRedisURI(u)
case "redis-socket":
return parseRedisSocketURI(u)
case "redis-sentinel":
return parseRedisSentinelURI(u)
default:
return nil, fmt.Errorf("asynq: unsupported uri scheme: %q", u.Scheme)
}
}
func parseRedisURI(u *url.URL) (RedisConnOpt, error) {
var db int
var err error
var redisConnOpt RedisClientOpt
if len(u.Path) > 0 {
xs := strings.Split(strings.Trim(u.Path, "/"), "/")
db, err = strconv.Atoi(xs[0])
if err != nil {
return nil, fmt.Errorf("asynq: could not parse redis uri: database number should be the first segment of the path")
}
}
var password string
if v, ok := u.User.Password(); ok {
password = v
}
if u.Scheme == "rediss" {
h, _, err := net.SplitHostPort(u.Host)
if err != nil {
h = u.Host
}
redisConnOpt.TLSConfig = &tls.Config{ServerName: h}
}
redisConnOpt.Addr = u.Host
redisConnOpt.Password = password
redisConnOpt.DB = db
return redisConnOpt, nil
}
func parseRedisSocketURI(u *url.URL) (RedisConnOpt, error) {
const errPrefix = "asynq: could not parse redis socket uri"
if len(u.Path) == 0 {
return nil, fmt.Errorf("%s: path does not exist", errPrefix)
}
q := u.Query()
var db int
var err error
if n := q.Get("db"); n != "" {
db, err = strconv.Atoi(n)
if err != nil {
return nil, fmt.Errorf("%s: query param `db` should be a number", errPrefix)
}
}
var password string
if v, ok := u.User.Password(); ok {
password = v
}
return RedisClientOpt{Network: "unix", Addr: u.Path, DB: db, Password: password}, nil
}
func parseRedisSentinelURI(u *url.URL) (RedisConnOpt, error) {
addrs := strings.Split(u.Host, ",")
master := u.Query().Get("master")
var password string
if v, ok := u.User.Password(); ok {
password = v
}
return RedisFailoverClientOpt{MasterName: master, SentinelAddrs: addrs, SentinelPassword: password}, nil
}
// ResultWriter is a client interface to write result data for a task.
// It writes the data to the redis instance the server is connected to.
type ResultWriter struct {
id string // task ID this writer is responsible for
qname string // queue name the task belongs to
broker base.Broker
ctx context.Context // context associated with the task
}
// Write writes the given data as a result of the task the ResultWriter is associated with.
func (w *ResultWriter) Write(data []byte) (n int, err error) {
select {
case <-w.ctx.Done():
return 0, fmt.Errorf("failed to result task result: %v", w.ctx.Err())
default:
}
return w.broker.WriteResult(w.qname, w.id, data)
}
// TaskID returns the ID of the task the ResultWriter is associated with.
func (w *ResultWriter) TaskID() string {
return w.id
}

428
vendor/github.com/hibiken/asynq/client.go generated vendored Normal file
View File

@ -0,0 +1,428 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"context"
"fmt"
"strings"
"time"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/rdb"
)
// A Client is responsible for scheduling tasks.
//
// A Client is used to register tasks that should be processed
// immediately or some time in the future.
//
// Clients are safe for concurrent use by multiple goroutines.
type Client struct {
broker base.Broker
}
// NewClient returns a new Client instance given a redis connection option.
func NewClient(r RedisConnOpt) *Client {
c, ok := r.MakeRedisClient().(redis.UniversalClient)
if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
}
return &Client{broker: rdb.NewRDB(c)}
}
type OptionType int
const (
MaxRetryOpt OptionType = iota
QueueOpt
TimeoutOpt
DeadlineOpt
UniqueOpt
ProcessAtOpt
ProcessInOpt
TaskIDOpt
RetentionOpt
GroupOpt
)
// Option specifies the task processing behavior.
type Option interface {
// String returns a string representation of the option.
String() string
// Type describes the type of the option.
Type() OptionType
// Value returns a value used to create this option.
Value() interface{}
}
// Internal option representations.
type (
retryOption int
queueOption string
taskIDOption string
timeoutOption time.Duration
deadlineOption time.Time
uniqueOption time.Duration
processAtOption time.Time
processInOption time.Duration
retentionOption time.Duration
groupOption string
)
// MaxRetry returns an option to specify the max number of times
// the task will be retried.
//
// Negative retry count is treated as zero retry.
func MaxRetry(n int) Option {
if n < 0 {
n = 0
}
return retryOption(n)
}
func (n retryOption) String() string { return fmt.Sprintf("MaxRetry(%d)", int(n)) }
func (n retryOption) Type() OptionType { return MaxRetryOpt }
func (n retryOption) Value() interface{} { return int(n) }
// Queue returns an option to specify the queue to enqueue the task into.
func Queue(name string) Option {
return queueOption(name)
}
func (name queueOption) String() string { return fmt.Sprintf("Queue(%q)", string(name)) }
func (name queueOption) Type() OptionType { return QueueOpt }
func (name queueOption) Value() interface{} { return string(name) }
// TaskID returns an option to specify the task ID.
func TaskID(id string) Option {
return taskIDOption(id)
}
func (id taskIDOption) String() string { return fmt.Sprintf("TaskID(%q)", string(id)) }
func (id taskIDOption) Type() OptionType { return TaskIDOpt }
func (id taskIDOption) Value() interface{} { return string(id) }
// Timeout returns an option to specify how long a task may run.
// If the timeout elapses before the Handler returns, then the task
// will be retried.
//
// Zero duration means no limit.
//
// If there's a conflicting Deadline option, whichever comes earliest
// will be used.
func Timeout(d time.Duration) Option {
return timeoutOption(d)
}
func (d timeoutOption) String() string { return fmt.Sprintf("Timeout(%v)", time.Duration(d)) }
func (d timeoutOption) Type() OptionType { return TimeoutOpt }
func (d timeoutOption) Value() interface{} { return time.Duration(d) }
// Deadline returns an option to specify the deadline for the given task.
// If it reaches the deadline before the Handler returns, then the task
// will be retried.
//
// If there's a conflicting Timeout option, whichever comes earliest
// will be used.
func Deadline(t time.Time) Option {
return deadlineOption(t)
}
func (t deadlineOption) String() string {
return fmt.Sprintf("Deadline(%v)", time.Time(t).Format(time.UnixDate))
}
func (t deadlineOption) Type() OptionType { return DeadlineOpt }
func (t deadlineOption) Value() interface{} { return time.Time(t) }
// Unique returns an option to enqueue a task only if the given task is unique.
// Task enqueued with this option is guaranteed to be unique within the given ttl.
// Once the task gets processed successfully or once the TTL has expired,
// another task with the same uniqueness may be enqueued.
// ErrDuplicateTask error is returned when enqueueing a duplicate task.
// TTL duration must be greater than or equal to 1 second.
//
// Uniqueness of a task is based on the following properties:
// - Task Type
// - Task Payload
// - Queue Name
func Unique(ttl time.Duration) Option {
return uniqueOption(ttl)
}
func (ttl uniqueOption) String() string { return fmt.Sprintf("Unique(%v)", time.Duration(ttl)) }
func (ttl uniqueOption) Type() OptionType { return UniqueOpt }
func (ttl uniqueOption) Value() interface{} { return time.Duration(ttl) }
// ProcessAt returns an option to specify when to process the given task.
//
// If there's a conflicting ProcessIn option, the last option passed to Enqueue overrides the others.
func ProcessAt(t time.Time) Option {
return processAtOption(t)
}
func (t processAtOption) String() string {
return fmt.Sprintf("ProcessAt(%v)", time.Time(t).Format(time.UnixDate))
}
func (t processAtOption) Type() OptionType { return ProcessAtOpt }
func (t processAtOption) Value() interface{} { return time.Time(t) }
// ProcessIn returns an option to specify when to process the given task relative to the current time.
//
// If there's a conflicting ProcessAt option, the last option passed to Enqueue overrides the others.
func ProcessIn(d time.Duration) Option {
return processInOption(d)
}
func (d processInOption) String() string { return fmt.Sprintf("ProcessIn(%v)", time.Duration(d)) }
func (d processInOption) Type() OptionType { return ProcessInOpt }
func (d processInOption) Value() interface{} { return time.Duration(d) }
// Retention returns an option to specify the duration of retention period for the task.
// If this option is provided, the task will be stored as a completed task after successful processing.
// A completed task will be deleted after the specified duration elapses.
func Retention(d time.Duration) Option {
return retentionOption(d)
}
func (ttl retentionOption) String() string { return fmt.Sprintf("Retention(%v)", time.Duration(ttl)) }
func (ttl retentionOption) Type() OptionType { return RetentionOpt }
func (ttl retentionOption) Value() interface{} { return time.Duration(ttl) }
// Group returns an option to specify the group used for the task.
// Tasks in a given queue with the same group will be aggregated into one task before passed to Handler.
func Group(name string) Option {
return groupOption(name)
}
func (name groupOption) String() string { return fmt.Sprintf("Group(%q)", string(name)) }
func (name groupOption) Type() OptionType { return GroupOpt }
func (name groupOption) Value() interface{} { return string(name) }
// ErrDuplicateTask indicates that the given task could not be enqueued since it's a duplicate of another task.
//
// ErrDuplicateTask error only applies to tasks enqueued with a Unique option.
var ErrDuplicateTask = errors.New("task already exists")
// ErrTaskIDConflict indicates that the given task could not be enqueued since its task ID already exists.
//
// ErrTaskIDConflict error only applies to tasks enqueued with a TaskID option.
var ErrTaskIDConflict = errors.New("task ID conflicts with another task")
type option struct {
retry int
queue string
taskID string
timeout time.Duration
deadline time.Time
uniqueTTL time.Duration
processAt time.Time
retention time.Duration
group string
}
// composeOptions merges user provided options into the default options
// and returns the composed option.
// It also validates the user provided options and returns an error if any of
// the user provided options fail the validations.
func composeOptions(opts ...Option) (option, error) {
res := option{
retry: defaultMaxRetry,
queue: base.DefaultQueueName,
taskID: uuid.NewString(),
timeout: 0, // do not set to defaultTimeout here
deadline: time.Time{},
processAt: time.Now(),
}
for _, opt := range opts {
switch opt := opt.(type) {
case retryOption:
res.retry = int(opt)
case queueOption:
qname := string(opt)
if err := base.ValidateQueueName(qname); err != nil {
return option{}, err
}
res.queue = qname
case taskIDOption:
id := string(opt)
if isBlank(id) {
return option{}, errors.New("task ID cannot be empty")
}
res.taskID = id
case timeoutOption:
res.timeout = time.Duration(opt)
case deadlineOption:
res.deadline = time.Time(opt)
case uniqueOption:
ttl := time.Duration(opt)
if ttl < 1*time.Second {
return option{}, errors.New("Unique TTL cannot be less than 1s")
}
res.uniqueTTL = ttl
case processAtOption:
res.processAt = time.Time(opt)
case processInOption:
res.processAt = time.Now().Add(time.Duration(opt))
case retentionOption:
res.retention = time.Duration(opt)
case groupOption:
key := string(opt)
if isBlank(key) {
return option{}, errors.New("group key cannot be empty")
}
res.group = key
default:
// ignore unexpected option
}
}
return res, nil
}
// isBlank returns true if the given s is empty or consist of all whitespaces.
func isBlank(s string) bool {
return strings.TrimSpace(s) == ""
}
const (
// Default max retry count used if nothing is specified.
defaultMaxRetry = 25
// Default timeout used if both timeout and deadline are not specified.
defaultTimeout = 30 * time.Minute
)
// Value zero indicates no timeout and no deadline.
var (
noTimeout time.Duration = 0
noDeadline time.Time = time.Unix(0, 0)
)
// Close closes the connection with redis.
func (c *Client) Close() error {
return c.broker.Close()
}
// Enqueue enqueues the given task to a queue.
//
// Enqueue returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
//
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
// Any options provided to NewTask can be overridden by options passed to Enqueue.
// By default, max retry is set to 25 and timeout is set to 30 minutes.
//
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
//
// Enqueue uses context.Background internally; to specify the context, use EnqueueContext.
func (c *Client) Enqueue(task *Task, opts ...Option) (*TaskInfo, error) {
return c.EnqueueContext(context.Background(), task, opts...)
}
// EnqueueContext enqueues the given task to a queue.
//
// EnqueueContext returns TaskInfo and nil error if the task is enqueued successfully, otherwise returns a non-nil error.
//
// The argument opts specifies the behavior of task processing.
// If there are conflicting Option values the last one overrides others.
// Any options provided to NewTask can be overridden by options passed to Enqueue.
// By default, max retry is set to 25 and timeout is set to 30 minutes.
//
// If no ProcessAt or ProcessIn options are provided, the task will be pending immediately.
//
// The first argument context applies to the enqueue operation. To specify task timeout and deadline, use Timeout and Deadline option instead.
func (c *Client) EnqueueContext(ctx context.Context, task *Task, opts ...Option) (*TaskInfo, error) {
if task == nil {
return nil, fmt.Errorf("task cannot be nil")
}
if strings.TrimSpace(task.Type()) == "" {
return nil, fmt.Errorf("task typename cannot be empty")
}
// merge task options with the options provided at enqueue time.
opts = append(task.opts, opts...)
opt, err := composeOptions(opts...)
if err != nil {
return nil, err
}
deadline := noDeadline
if !opt.deadline.IsZero() {
deadline = opt.deadline
}
timeout := noTimeout
if opt.timeout != 0 {
timeout = opt.timeout
}
if deadline.Equal(noDeadline) && timeout == noTimeout {
// If neither deadline nor timeout are set, use default timeout.
timeout = defaultTimeout
}
var uniqueKey string
if opt.uniqueTTL > 0 {
uniqueKey = base.UniqueKey(opt.queue, task.Type(), task.Payload())
}
msg := &base.TaskMessage{
ID: opt.taskID,
Type: task.Type(),
Payload: task.Payload(),
Queue: opt.queue,
Retry: opt.retry,
Deadline: deadline.Unix(),
Timeout: int64(timeout.Seconds()),
UniqueKey: uniqueKey,
GroupKey: opt.group,
Retention: int64(opt.retention.Seconds()),
}
now := time.Now()
var state base.TaskState
if opt.processAt.After(now) {
err = c.schedule(ctx, msg, opt.processAt, opt.uniqueTTL)
state = base.TaskStateScheduled
} else if opt.group != "" {
// Use zero value for processAt since we don't know when the task will be aggregated and processed.
opt.processAt = time.Time{}
err = c.addToGroup(ctx, msg, opt.group, opt.uniqueTTL)
state = base.TaskStateAggregating
} else {
opt.processAt = now
err = c.enqueue(ctx, msg, opt.uniqueTTL)
state = base.TaskStatePending
}
switch {
case errors.Is(err, errors.ErrDuplicateTask):
return nil, fmt.Errorf("%w", ErrDuplicateTask)
case errors.Is(err, errors.ErrTaskIdConflict):
return nil, fmt.Errorf("%w", ErrTaskIDConflict)
case err != nil:
return nil, err
}
return newTaskInfo(msg, state, opt.processAt, nil), nil
}
func (c *Client) enqueue(ctx context.Context, msg *base.TaskMessage, uniqueTTL time.Duration) error {
if uniqueTTL > 0 {
return c.broker.EnqueueUnique(ctx, msg, uniqueTTL)
}
return c.broker.Enqueue(ctx, msg)
}
func (c *Client) schedule(ctx context.Context, msg *base.TaskMessage, t time.Time, uniqueTTL time.Duration) error {
if uniqueTTL > 0 {
ttl := t.Add(uniqueTTL).Sub(time.Now())
return c.broker.ScheduleUnique(ctx, msg, t, ttl)
}
return c.broker.Schedule(ctx, msg, t)
}
func (c *Client) addToGroup(ctx context.Context, msg *base.TaskMessage, group string, uniqueTTL time.Duration) error {
if uniqueTTL > 0 {
return c.broker.AddToGroupUnique(ctx, msg, group, uniqueTTL)
}
return c.broker.AddToGroup(ctx, msg, group)
}

42
vendor/github.com/hibiken/asynq/context.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"context"
asynqcontext "github.com/hibiken/asynq/internal/context"
)
// GetTaskID extracts a task ID from a context, if any.
//
// ID of a task is guaranteed to be unique.
// ID of a task doesn't change if the task is being retried.
func GetTaskID(ctx context.Context) (id string, ok bool) {
return asynqcontext.GetTaskID(ctx)
}
// GetRetryCount extracts retry count from a context, if any.
//
// Return value n indicates the number of times associated task has been
// retried so far.
func GetRetryCount(ctx context.Context) (n int, ok bool) {
return asynqcontext.GetRetryCount(ctx)
}
// GetMaxRetry extracts maximum retry from a context, if any.
//
// Return value n indicates the maximum number of times the associated task
// can be retried if ProcessTask returns a non-nil error.
func GetMaxRetry(ctx context.Context) (n int, ok bool) {
return asynqcontext.GetMaxRetry(ctx)
}
// GetQueueName extracts queue name from a context, if any.
//
// Return value queue indicates which queue the task was pulled from.
func GetQueueName(ctx context.Context) (queue string, ok bool) {
return asynqcontext.GetQueueName(ctx)
}

72
vendor/github.com/hibiken/asynq/doc.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
/*
Package asynq provides a framework for Redis based distrubted task queue.
Asynq uses Redis as a message broker. To connect to redis,
specify the connection using one of RedisConnOpt types.
redisConnOpt = asynq.RedisClientOpt{
Addr: "127.0.0.1:6379",
Password: "xxxxx",
DB: 2,
}
The Client is used to enqueue a task.
client := asynq.NewClient(redisConnOpt)
// Task is created with two parameters: its type and payload.
// Payload data is simply an array of bytes. It can be encoded in JSON, Protocol Buffer, Gob, etc.
b, err := json.Marshal(ExamplePayload{UserID: 42})
if err != nil {
log.Fatal(err)
}
task := asynq.NewTask("example", b)
// Enqueue the task to be processed immediately.
info, err := client.Enqueue(task)
// Schedule the task to be processed after one minute.
info, err = client.Enqueue(t, asynq.ProcessIn(1*time.Minute))
The Server is used to run the task processing workers with a given
handler.
srv := asynq.NewServer(redisConnOpt, asynq.Config{
Concurrency: 10,
})
if err := srv.Run(handler); err != nil {
log.Fatal(err)
}
Handler is an interface type with a method which
takes a task and returns an error. Handler should return nil if
the processing is successful, otherwise return a non-nil error.
If handler panics or returns a non-nil error, the task will be retried in the future.
Example of a type that implements the Handler interface.
type TaskHandler struct {
// ...
}
func (h *TaskHandler) ProcessTask(ctx context.Context, task *asynq.Task) error {
switch task.Type {
case "example":
var data ExamplePayload
if err := json.Unmarshal(task.Payload(), &data); err != nil {
return err
}
// perform task with the data
default:
return fmt.Errorf("unexpected task type %q", task.Type)
}
return nil
}
*/
package asynq

77
vendor/github.com/hibiken/asynq/forwarder.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// A forwarder is responsible for moving scheduled and retry tasks to pending state
// so that the tasks get processed by the workers.
type forwarder struct {
logger *log.Logger
broker base.Broker
// channel to communicate back to the long running "forwarder" goroutine.
done chan struct{}
// list of queue names to check and enqueue.
queues []string
// poll interval on average
avgInterval time.Duration
}
type forwarderParams struct {
logger *log.Logger
broker base.Broker
queues []string
interval time.Duration
}
func newForwarder(params forwarderParams) *forwarder {
return &forwarder{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
queues: params.queues,
avgInterval: params.interval,
}
}
func (f *forwarder) shutdown() {
f.logger.Debug("Forwarder shutting down...")
// Signal the forwarder goroutine to stop polling.
f.done <- struct{}{}
}
// start starts the "forwarder" goroutine.
func (f *forwarder) start(wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
timer := time.NewTimer(f.avgInterval)
for {
select {
case <-f.done:
f.logger.Debug("Forwarder done")
return
case <-timer.C:
f.exec()
timer.Reset(f.avgInterval)
}
}
}()
}
func (f *forwarder) exec() {
if err := f.broker.ForwardIfReady(f.queues...); err != nil {
f.logger.Errorf("Failed to forward scheduled tasks: %v", err)
}
}

80
vendor/github.com/hibiken/asynq/healthcheck.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// healthchecker is responsible for pinging broker periodically
// and call user provided HeathCheckFunc with the ping result.
type healthchecker struct {
logger *log.Logger
broker base.Broker
// channel to communicate back to the long running "healthchecker" goroutine.
done chan struct{}
// interval between healthchecks.
interval time.Duration
// function to call periodically.
healthcheckFunc func(error)
}
type healthcheckerParams struct {
logger *log.Logger
broker base.Broker
interval time.Duration
healthcheckFunc func(error)
}
func newHealthChecker(params healthcheckerParams) *healthchecker {
return &healthchecker{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
interval: params.interval,
healthcheckFunc: params.healthcheckFunc,
}
}
func (hc *healthchecker) shutdown() {
if hc.healthcheckFunc == nil {
return
}
hc.logger.Debug("Healthchecker shutting down...")
// Signal the healthchecker goroutine to stop.
hc.done <- struct{}{}
}
func (hc *healthchecker) start(wg *sync.WaitGroup) {
if hc.healthcheckFunc == nil {
return
}
wg.Add(1)
go func() {
defer wg.Done()
timer := time.NewTimer(hc.interval)
for {
select {
case <-hc.done:
hc.logger.Debug("Healthchecker done")
timer.Stop()
return
case <-timer.C:
err := hc.broker.Ping()
hc.healthcheckFunc(err)
timer.Reset(hc.interval)
}
}
}()
}

200
vendor/github.com/hibiken/asynq/heartbeat.go generated vendored Normal file
View File

@ -0,0 +1,200 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"os"
"sync"
"time"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/timeutil"
)
// heartbeater is responsible for writing process info to redis periodically to
// indicate that the background worker process is up.
type heartbeater struct {
logger *log.Logger
broker base.Broker
clock timeutil.Clock
// channel to communicate back to the long running "heartbeater" goroutine.
done chan struct{}
// interval between heartbeats.
interval time.Duration
// following fields are initialized at construction time and are immutable.
host string
pid int
serverID string
concurrency int
queues map[string]int
strictPriority bool
// following fields are mutable and should be accessed only by the
// heartbeater goroutine. In other words, confine these variables
// to this goroutine only.
started time.Time
workers map[string]*workerInfo
// state is shared with other goroutine but is concurrency safe.
state *serverState
// channels to receive updates on active workers.
starting <-chan *workerInfo
finished <-chan *base.TaskMessage
}
type heartbeaterParams struct {
logger *log.Logger
broker base.Broker
interval time.Duration
concurrency int
queues map[string]int
strictPriority bool
state *serverState
starting <-chan *workerInfo
finished <-chan *base.TaskMessage
}
func newHeartbeater(params heartbeaterParams) *heartbeater {
host, err := os.Hostname()
if err != nil {
host = "unknown-host"
}
return &heartbeater{
logger: params.logger,
broker: params.broker,
clock: timeutil.NewRealClock(),
done: make(chan struct{}),
interval: params.interval,
host: host,
pid: os.Getpid(),
serverID: uuid.New().String(),
concurrency: params.concurrency,
queues: params.queues,
strictPriority: params.strictPriority,
state: params.state,
workers: make(map[string]*workerInfo),
starting: params.starting,
finished: params.finished,
}
}
func (h *heartbeater) shutdown() {
h.logger.Debug("Heartbeater shutting down...")
// Signal the heartbeater goroutine to stop.
h.done <- struct{}{}
}
// A workerInfo holds an active worker information.
type workerInfo struct {
// the task message the worker is processing.
msg *base.TaskMessage
// the time the worker has started processing the message.
started time.Time
// deadline the worker has to finish processing the task by.
deadline time.Time
// lease the worker holds for the task.
lease *base.Lease
}
func (h *heartbeater) start(wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
h.started = h.clock.Now()
h.beat()
timer := time.NewTimer(h.interval)
for {
select {
case <-h.done:
h.broker.ClearServerState(h.host, h.pid, h.serverID)
h.logger.Debug("Heartbeater done")
timer.Stop()
return
case <-timer.C:
h.beat()
timer.Reset(h.interval)
case w := <-h.starting:
h.workers[w.msg.ID] = w
case msg := <-h.finished:
delete(h.workers, msg.ID)
}
}
}()
}
// beat extends lease for workers and writes server/worker info to redis.
func (h *heartbeater) beat() {
h.state.mu.Lock()
srvStatus := h.state.value.String()
h.state.mu.Unlock()
info := base.ServerInfo{
Host: h.host,
PID: h.pid,
ServerID: h.serverID,
Concurrency: h.concurrency,
Queues: h.queues,
StrictPriority: h.strictPriority,
Status: srvStatus,
Started: h.started,
ActiveWorkerCount: len(h.workers),
}
var ws []*base.WorkerInfo
idsByQueue := make(map[string][]string)
for id, w := range h.workers {
ws = append(ws, &base.WorkerInfo{
Host: h.host,
PID: h.pid,
ServerID: h.serverID,
ID: id,
Type: w.msg.Type,
Queue: w.msg.Queue,
Payload: w.msg.Payload,
Started: w.started,
Deadline: w.deadline,
})
// Check lease before adding to the set to make sure not to extend the lease if the lease is already expired.
if w.lease.IsValid() {
idsByQueue[w.msg.Queue] = append(idsByQueue[w.msg.Queue], id)
} else {
w.lease.NotifyExpiration() // notify processor if the lease is expired
}
}
// Note: Set TTL to be long enough so that it won't expire before we write again
// and short enough to expire quickly once the process is shut down or killed.
if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil {
h.logger.Errorf("Failed to write server state data: %v", err)
}
for qname, ids := range idsByQueue {
expirationTime, err := h.broker.ExtendLease(qname, ids...)
if err != nil {
h.logger.Errorf("Failed to extend lease for tasks %v: %v", ids, err)
continue
}
for _, id := range ids {
if l := h.workers[id].lease; !l.Reset(expirationTime) {
h.logger.Warnf("Lease reset failed for %s; lease deadline: %v", id, l.Deadline())
}
}
}
}

1011
vendor/github.com/hibiken/asynq/inspector.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

755
vendor/github.com/hibiken/asynq/internal/base/base.go generated vendored Normal file
View File

@ -0,0 +1,755 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Package base defines foundational types and constants used in asynq package.
package base
import (
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/golang/protobuf/ptypes"
"github.com/hibiken/asynq/internal/errors"
pb "github.com/hibiken/asynq/internal/proto"
"github.com/hibiken/asynq/internal/timeutil"
"google.golang.org/protobuf/proto"
)
// Version of asynq library and CLI.
const Version = "0.24.0"
// DefaultQueueName is the queue name used if none are specified by user.
const DefaultQueueName = "default"
// DefaultQueue is the redis key for the default queue.
var DefaultQueue = PendingKey(DefaultQueueName)
// Global Redis keys.
const (
AllServers = "asynq:servers" // ZSET
AllWorkers = "asynq:workers" // ZSET
AllSchedulers = "asynq:schedulers" // ZSET
AllQueues = "asynq:queues" // SET
CancelChannel = "asynq:cancel" // PubSub channel
)
// TaskState denotes the state of a task.
type TaskState int
const (
TaskStateActive TaskState = iota + 1
TaskStatePending
TaskStateScheduled
TaskStateRetry
TaskStateArchived
TaskStateCompleted
TaskStateAggregating // describes a state where task is waiting in a group to be aggregated
)
func (s TaskState) String() string {
switch s {
case TaskStateActive:
return "active"
case TaskStatePending:
return "pending"
case TaskStateScheduled:
return "scheduled"
case TaskStateRetry:
return "retry"
case TaskStateArchived:
return "archived"
case TaskStateCompleted:
return "completed"
case TaskStateAggregating:
return "aggregating"
}
panic(fmt.Sprintf("internal error: unknown task state %d", s))
}
func TaskStateFromString(s string) (TaskState, error) {
switch s {
case "active":
return TaskStateActive, nil
case "pending":
return TaskStatePending, nil
case "scheduled":
return TaskStateScheduled, nil
case "retry":
return TaskStateRetry, nil
case "archived":
return TaskStateArchived, nil
case "completed":
return TaskStateCompleted, nil
case "aggregating":
return TaskStateAggregating, nil
}
return 0, errors.E(errors.FailedPrecondition, fmt.Sprintf("%q is not supported task state", s))
}
// ValidateQueueName validates a given qname to be used as a queue name.
// Returns nil if valid, otherwise returns non-nil error.
func ValidateQueueName(qname string) error {
if len(strings.TrimSpace(qname)) == 0 {
return fmt.Errorf("queue name must contain one or more characters")
}
return nil
}
// QueueKeyPrefix returns a prefix for all keys in the given queue.
func QueueKeyPrefix(qname string) string {
return fmt.Sprintf("asynq:{%s}:", qname)
}
// TaskKeyPrefix returns a prefix for task key.
func TaskKeyPrefix(qname string) string {
return fmt.Sprintf("%st:", QueueKeyPrefix(qname))
}
// TaskKey returns a redis key for the given task message.
func TaskKey(qname, id string) string {
return fmt.Sprintf("%s%s", TaskKeyPrefix(qname), id)
}
// PendingKey returns a redis key for the given queue name.
func PendingKey(qname string) string {
return fmt.Sprintf("%spending", QueueKeyPrefix(qname))
}
// ActiveKey returns a redis key for the active tasks.
func ActiveKey(qname string) string {
return fmt.Sprintf("%sactive", QueueKeyPrefix(qname))
}
// ScheduledKey returns a redis key for the scheduled tasks.
func ScheduledKey(qname string) string {
return fmt.Sprintf("%sscheduled", QueueKeyPrefix(qname))
}
// RetryKey returns a redis key for the retry tasks.
func RetryKey(qname string) string {
return fmt.Sprintf("%sretry", QueueKeyPrefix(qname))
}
// ArchivedKey returns a redis key for the archived tasks.
func ArchivedKey(qname string) string {
return fmt.Sprintf("%sarchived", QueueKeyPrefix(qname))
}
// LeaseKey returns a redis key for the lease.
func LeaseKey(qname string) string {
return fmt.Sprintf("%slease", QueueKeyPrefix(qname))
}
func CompletedKey(qname string) string {
return fmt.Sprintf("%scompleted", QueueKeyPrefix(qname))
}
// PausedKey returns a redis key to indicate that the given queue is paused.
func PausedKey(qname string) string {
return fmt.Sprintf("%spaused", QueueKeyPrefix(qname))
}
// ProcessedTotalKey returns a redis key for total processed count for the given queue.
func ProcessedTotalKey(qname string) string {
return fmt.Sprintf("%sprocessed", QueueKeyPrefix(qname))
}
// FailedTotalKey returns a redis key for total failure count for the given queue.
func FailedTotalKey(qname string) string {
return fmt.Sprintf("%sfailed", QueueKeyPrefix(qname))
}
// ProcessedKey returns a redis key for processed count for the given day for the queue.
func ProcessedKey(qname string, t time.Time) string {
return fmt.Sprintf("%sprocessed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
}
// FailedKey returns a redis key for failure count for the given day for the queue.
func FailedKey(qname string, t time.Time) string {
return fmt.Sprintf("%sfailed:%s", QueueKeyPrefix(qname), t.UTC().Format("2006-01-02"))
}
// ServerInfoKey returns a redis key for process info.
func ServerInfoKey(hostname string, pid int, serverID string) string {
return fmt.Sprintf("asynq:servers:{%s:%d:%s}", hostname, pid, serverID)
}
// WorkersKey returns a redis key for the workers given hostname, pid, and server ID.
func WorkersKey(hostname string, pid int, serverID string) string {
return fmt.Sprintf("asynq:workers:{%s:%d:%s}", hostname, pid, serverID)
}
// SchedulerEntriesKey returns a redis key for the scheduler entries given scheduler ID.
func SchedulerEntriesKey(schedulerID string) string {
return fmt.Sprintf("asynq:schedulers:{%s}", schedulerID)
}
// SchedulerHistoryKey returns a redis key for the scheduler's history for the given entry.
func SchedulerHistoryKey(entryID string) string {
return fmt.Sprintf("asynq:scheduler_history:%s", entryID)
}
// UniqueKey returns a redis key with the given type, payload, and queue name.
func UniqueKey(qname, tasktype string, payload []byte) string {
if payload == nil {
return fmt.Sprintf("%sunique:%s:", QueueKeyPrefix(qname), tasktype)
}
checksum := md5.Sum(payload)
return fmt.Sprintf("%sunique:%s:%s", QueueKeyPrefix(qname), tasktype, hex.EncodeToString(checksum[:]))
}
// GroupKeyPrefix returns a prefix for group key.
func GroupKeyPrefix(qname string) string {
return fmt.Sprintf("%sg:", QueueKeyPrefix(qname))
}
// GroupKey returns a redis key used to group tasks belong in the same group.
func GroupKey(qname, gkey string) string {
return fmt.Sprintf("%s%s", GroupKeyPrefix(qname), gkey)
}
// AggregationSetKey returns a redis key used for an aggregation set.
func AggregationSetKey(qname, gname, setID string) string {
return fmt.Sprintf("%s:%s", GroupKey(qname, gname), setID)
}
// AllGroups return a redis key used to store all group keys used in a given queue.
func AllGroups(qname string) string {
return fmt.Sprintf("%sgroups", QueueKeyPrefix(qname))
}
// AllAggregationSets returns a redis key used to store all aggregation sets (set of tasks staged to be aggregated)
// in a given queue.
func AllAggregationSets(qname string) string {
return fmt.Sprintf("%saggregation_sets", QueueKeyPrefix(qname))
}
// TaskMessage is the internal representation of a task with additional metadata fields.
// Serialized data of this type gets written to redis.
type TaskMessage struct {
// Type indicates the kind of the task to be performed.
Type string
// Payload holds data needed to process the task.
Payload []byte
// ID is a unique identifier for each task.
ID string
// Queue is a name this message should be enqueued to.
Queue string
// Retry is the max number of retry for this task.
Retry int
// Retried is the number of times we've retried this task so far.
Retried int
// ErrorMsg holds the error message from the last failure.
ErrorMsg string
// Time of last failure in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
//
// Use zero to indicate no last failure
LastFailedAt int64
// Timeout specifies timeout in seconds.
// If task processing doesn't complete within the timeout, the task will be retried
// if retry count is remaining. Otherwise it will be moved to the archive.
//
// Use zero to indicate no timeout.
Timeout int64
// Deadline specifies the deadline for the task in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// If task processing doesn't complete before the deadline, the task will be retried
// if retry count is remaining. Otherwise it will be moved to the archive.
//
// Use zero to indicate no deadline.
Deadline int64
// UniqueKey holds the redis key used for uniqueness lock for this task.
//
// Empty string indicates that no uniqueness lock was used.
UniqueKey string
// GroupKey holds the group key used for task aggregation.
//
// Empty string indicates no aggregation is used for this task.
GroupKey string
// Retention specifies the number of seconds the task should be retained after completion.
Retention int64
// CompletedAt is the time the task was processed successfully in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
//
// Use zero to indicate no value.
CompletedAt int64
}
// EncodeMessage marshals the given task message and returns an encoded bytes.
func EncodeMessage(msg *TaskMessage) ([]byte, error) {
if msg == nil {
return nil, fmt.Errorf("cannot encode nil message")
}
return proto.Marshal(&pb.TaskMessage{
Type: msg.Type,
Payload: msg.Payload,
Id: msg.ID,
Queue: msg.Queue,
Retry: int32(msg.Retry),
Retried: int32(msg.Retried),
ErrorMsg: msg.ErrorMsg,
LastFailedAt: msg.LastFailedAt,
Timeout: msg.Timeout,
Deadline: msg.Deadline,
UniqueKey: msg.UniqueKey,
GroupKey: msg.GroupKey,
Retention: msg.Retention,
CompletedAt: msg.CompletedAt,
})
}
// DecodeMessage unmarshals the given bytes and returns a decoded task message.
func DecodeMessage(data []byte) (*TaskMessage, error) {
var pbmsg pb.TaskMessage
if err := proto.Unmarshal(data, &pbmsg); err != nil {
return nil, err
}
return &TaskMessage{
Type: pbmsg.GetType(),
Payload: pbmsg.GetPayload(),
ID: pbmsg.GetId(),
Queue: pbmsg.GetQueue(),
Retry: int(pbmsg.GetRetry()),
Retried: int(pbmsg.GetRetried()),
ErrorMsg: pbmsg.GetErrorMsg(),
LastFailedAt: pbmsg.GetLastFailedAt(),
Timeout: pbmsg.GetTimeout(),
Deadline: pbmsg.GetDeadline(),
UniqueKey: pbmsg.GetUniqueKey(),
GroupKey: pbmsg.GetGroupKey(),
Retention: pbmsg.GetRetention(),
CompletedAt: pbmsg.GetCompletedAt(),
}, nil
}
// TaskInfo describes a task message and its metadata.
type TaskInfo struct {
Message *TaskMessage
State TaskState
NextProcessAt time.Time
Result []byte
}
// Z represents sorted set member.
type Z struct {
Message *TaskMessage
Score int64
}
// ServerInfo holds information about a running server.
type ServerInfo struct {
Host string
PID int
ServerID string
Concurrency int
Queues map[string]int
StrictPriority bool
Status string
Started time.Time
ActiveWorkerCount int
}
// EncodeServerInfo marshals the given ServerInfo and returns the encoded bytes.
func EncodeServerInfo(info *ServerInfo) ([]byte, error) {
if info == nil {
return nil, fmt.Errorf("cannot encode nil server info")
}
queues := make(map[string]int32)
for q, p := range info.Queues {
queues[q] = int32(p)
}
started, err := ptypes.TimestampProto(info.Started)
if err != nil {
return nil, err
}
return proto.Marshal(&pb.ServerInfo{
Host: info.Host,
Pid: int32(info.PID),
ServerId: info.ServerID,
Concurrency: int32(info.Concurrency),
Queues: queues,
StrictPriority: info.StrictPriority,
Status: info.Status,
StartTime: started,
ActiveWorkerCount: int32(info.ActiveWorkerCount),
})
}
// DecodeServerInfo decodes the given bytes into ServerInfo.
func DecodeServerInfo(b []byte) (*ServerInfo, error) {
var pbmsg pb.ServerInfo
if err := proto.Unmarshal(b, &pbmsg); err != nil {
return nil, err
}
queues := make(map[string]int)
for q, p := range pbmsg.GetQueues() {
queues[q] = int(p)
}
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
if err != nil {
return nil, err
}
return &ServerInfo{
Host: pbmsg.GetHost(),
PID: int(pbmsg.GetPid()),
ServerID: pbmsg.GetServerId(),
Concurrency: int(pbmsg.GetConcurrency()),
Queues: queues,
StrictPriority: pbmsg.GetStrictPriority(),
Status: pbmsg.GetStatus(),
Started: startTime,
ActiveWorkerCount: int(pbmsg.GetActiveWorkerCount()),
}, nil
}
// WorkerInfo holds information about a running worker.
type WorkerInfo struct {
Host string
PID int
ServerID string
ID string
Type string
Payload []byte
Queue string
Started time.Time
Deadline time.Time
}
// EncodeWorkerInfo marshals the given WorkerInfo and returns the encoded bytes.
func EncodeWorkerInfo(info *WorkerInfo) ([]byte, error) {
if info == nil {
return nil, fmt.Errorf("cannot encode nil worker info")
}
startTime, err := ptypes.TimestampProto(info.Started)
if err != nil {
return nil, err
}
deadline, err := ptypes.TimestampProto(info.Deadline)
if err != nil {
return nil, err
}
return proto.Marshal(&pb.WorkerInfo{
Host: info.Host,
Pid: int32(info.PID),
ServerId: info.ServerID,
TaskId: info.ID,
TaskType: info.Type,
TaskPayload: info.Payload,
Queue: info.Queue,
StartTime: startTime,
Deadline: deadline,
})
}
// DecodeWorkerInfo decodes the given bytes into WorkerInfo.
func DecodeWorkerInfo(b []byte) (*WorkerInfo, error) {
var pbmsg pb.WorkerInfo
if err := proto.Unmarshal(b, &pbmsg); err != nil {
return nil, err
}
startTime, err := ptypes.Timestamp(pbmsg.GetStartTime())
if err != nil {
return nil, err
}
deadline, err := ptypes.Timestamp(pbmsg.GetDeadline())
if err != nil {
return nil, err
}
return &WorkerInfo{
Host: pbmsg.GetHost(),
PID: int(pbmsg.GetPid()),
ServerID: pbmsg.GetServerId(),
ID: pbmsg.GetTaskId(),
Type: pbmsg.GetTaskType(),
Payload: pbmsg.GetTaskPayload(),
Queue: pbmsg.GetQueue(),
Started: startTime,
Deadline: deadline,
}, nil
}
// SchedulerEntry holds information about a periodic task registered with a scheduler.
type SchedulerEntry struct {
// Identifier of this entry.
ID string
// Spec describes the schedule of this entry.
Spec string
// Type is the task type of the periodic task.
Type string
// Payload is the payload of the periodic task.
Payload []byte
// Opts is the options for the periodic task.
Opts []string
// Next shows the next time the task will be enqueued.
Next time.Time
// Prev shows the last time the task was enqueued.
// Zero time if task was never enqueued.
Prev time.Time
}
// EncodeSchedulerEntry marshals the given entry and returns an encoded bytes.
func EncodeSchedulerEntry(entry *SchedulerEntry) ([]byte, error) {
if entry == nil {
return nil, fmt.Errorf("cannot encode nil scheduler entry")
}
next, err := ptypes.TimestampProto(entry.Next)
if err != nil {
return nil, err
}
prev, err := ptypes.TimestampProto(entry.Prev)
if err != nil {
return nil, err
}
return proto.Marshal(&pb.SchedulerEntry{
Id: entry.ID,
Spec: entry.Spec,
TaskType: entry.Type,
TaskPayload: entry.Payload,
EnqueueOptions: entry.Opts,
NextEnqueueTime: next,
PrevEnqueueTime: prev,
})
}
// DecodeSchedulerEntry unmarshals the given bytes and returns a decoded SchedulerEntry.
func DecodeSchedulerEntry(b []byte) (*SchedulerEntry, error) {
var pbmsg pb.SchedulerEntry
if err := proto.Unmarshal(b, &pbmsg); err != nil {
return nil, err
}
next, err := ptypes.Timestamp(pbmsg.GetNextEnqueueTime())
if err != nil {
return nil, err
}
prev, err := ptypes.Timestamp(pbmsg.GetPrevEnqueueTime())
if err != nil {
return nil, err
}
return &SchedulerEntry{
ID: pbmsg.GetId(),
Spec: pbmsg.GetSpec(),
Type: pbmsg.GetTaskType(),
Payload: pbmsg.GetTaskPayload(),
Opts: pbmsg.GetEnqueueOptions(),
Next: next,
Prev: prev,
}, nil
}
// SchedulerEnqueueEvent holds information about an enqueue event by a scheduler.
type SchedulerEnqueueEvent struct {
// ID of the task that was enqueued.
TaskID string
// Time the task was enqueued.
EnqueuedAt time.Time
}
// EncodeSchedulerEnqueueEvent marshals the given event
// and returns an encoded bytes.
func EncodeSchedulerEnqueueEvent(event *SchedulerEnqueueEvent) ([]byte, error) {
if event == nil {
return nil, fmt.Errorf("cannot encode nil enqueue event")
}
enqueuedAt, err := ptypes.TimestampProto(event.EnqueuedAt)
if err != nil {
return nil, err
}
return proto.Marshal(&pb.SchedulerEnqueueEvent{
TaskId: event.TaskID,
EnqueueTime: enqueuedAt,
})
}
// DecodeSchedulerEnqueueEvent unmarshals the given bytes
// and returns a decoded SchedulerEnqueueEvent.
func DecodeSchedulerEnqueueEvent(b []byte) (*SchedulerEnqueueEvent, error) {
var pbmsg pb.SchedulerEnqueueEvent
if err := proto.Unmarshal(b, &pbmsg); err != nil {
return nil, err
}
enqueuedAt, err := ptypes.Timestamp(pbmsg.GetEnqueueTime())
if err != nil {
return nil, err
}
return &SchedulerEnqueueEvent{
TaskID: pbmsg.GetTaskId(),
EnqueuedAt: enqueuedAt,
}, nil
}
// Cancelations is a collection that holds cancel functions for all active tasks.
//
// Cancelations are safe for concurrent use by multiple goroutines.
type Cancelations struct {
mu sync.Mutex
cancelFuncs map[string]context.CancelFunc
}
// NewCancelations returns a Cancelations instance.
func NewCancelations() *Cancelations {
return &Cancelations{
cancelFuncs: make(map[string]context.CancelFunc),
}
}
// Add adds a new cancel func to the collection.
func (c *Cancelations) Add(id string, fn context.CancelFunc) {
c.mu.Lock()
defer c.mu.Unlock()
c.cancelFuncs[id] = fn
}
// Delete deletes a cancel func from the collection given an id.
func (c *Cancelations) Delete(id string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.cancelFuncs, id)
}
// Get returns a cancel func given an id.
func (c *Cancelations) Get(id string) (fn context.CancelFunc, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
fn, ok = c.cancelFuncs[id]
return fn, ok
}
// Lease is a time bound lease for worker to process task.
// It provides a communication channel between lessor and lessee about lease expiration.
type Lease struct {
once sync.Once
ch chan struct{}
Clock timeutil.Clock
mu sync.Mutex
expireAt time.Time // guarded by mu
}
func NewLease(expirationTime time.Time) *Lease {
return &Lease{
ch: make(chan struct{}),
expireAt: expirationTime,
Clock: timeutil.NewRealClock(),
}
}
// Reset changes the lease to expire at the given time.
// It returns true if the lease is still valid and reset operation was successful, false if the lease had been expired.
func (l *Lease) Reset(expirationTime time.Time) bool {
if !l.IsValid() {
return false
}
l.mu.Lock()
defer l.mu.Unlock()
l.expireAt = expirationTime
return true
}
// Sends a notification to lessee about expired lease
// Returns true if notification was sent, returns false if the lease is still valid and notification was not sent.
func (l *Lease) NotifyExpiration() bool {
if l.IsValid() {
return false
}
l.once.Do(l.closeCh)
return true
}
func (l *Lease) closeCh() {
close(l.ch)
}
// Done returns a communication channel from which the lessee can read to get notified when lessor notifies about lease expiration.
func (l *Lease) Done() <-chan struct{} {
return l.ch
}
// Deadline returns the expiration time of the lease.
func (l *Lease) Deadline() time.Time {
l.mu.Lock()
defer l.mu.Unlock()
return l.expireAt
}
// IsValid returns true if the lease's expiration time is in the future or equals to the current time,
// returns false otherwise.
func (l *Lease) IsValid() bool {
now := l.Clock.Now()
l.mu.Lock()
defer l.mu.Unlock()
return l.expireAt.After(now) || l.expireAt.Equal(now)
}
// Broker is a message broker that supports operations to manage task queues.
//
// See rdb.RDB as a reference implementation.
type Broker interface {
Ping() error
Close() error
Enqueue(ctx context.Context, msg *TaskMessage) error
EnqueueUnique(ctx context.Context, msg *TaskMessage, ttl time.Duration) error
Dequeue(qnames ...string) (*TaskMessage, time.Time, error)
Done(ctx context.Context, msg *TaskMessage) error
MarkAsComplete(ctx context.Context, msg *TaskMessage) error
Requeue(ctx context.Context, msg *TaskMessage) error
Schedule(ctx context.Context, msg *TaskMessage, processAt time.Time) error
ScheduleUnique(ctx context.Context, msg *TaskMessage, processAt time.Time, ttl time.Duration) error
Retry(ctx context.Context, msg *TaskMessage, processAt time.Time, errMsg string, isFailure bool) error
Archive(ctx context.Context, msg *TaskMessage, errMsg string) error
ForwardIfReady(qnames ...string) error
// Group aggregation related methods
AddToGroup(ctx context.Context, msg *TaskMessage, gname string) error
AddToGroupUnique(ctx context.Context, msg *TaskMessage, groupKey string, ttl time.Duration) error
ListGroups(qname string) ([]string, error)
AggregationCheck(qname, gname string, t time.Time, gracePeriod, maxDelay time.Duration, maxSize int) (aggregationSetID string, err error)
ReadAggregationSet(qname, gname, aggregationSetID string) ([]*TaskMessage, time.Time, error)
DeleteAggregationSet(ctx context.Context, qname, gname, aggregationSetID string) error
ReclaimStaleAggregationSets(qname string) error
// Task retention related method
DeleteExpiredCompletedTasks(qname string) error
// Lease related methods
ListLeaseExpired(cutoff time.Time, qnames ...string) ([]*TaskMessage, error)
ExtendLease(qname string, ids ...string) (time.Time, error)
// State snapshot related methods
WriteServerState(info *ServerInfo, workers []*WorkerInfo, ttl time.Duration) error
ClearServerState(host string, pid int, serverID string) error
// Cancelation related methods
CancelationPubSub() (*redis.PubSub, error) // TODO: Need to decouple from redis to support other brokers
PublishCancelation(id string) error
WriteResult(qname, id string, data []byte) (n int, err error)
}

View File

@ -0,0 +1,87 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package context
import (
"context"
"time"
"github.com/hibiken/asynq/internal/base"
)
// A taskMetadata holds task scoped data to put in context.
type taskMetadata struct {
id string
maxRetry int
retryCount int
qname string
}
// ctxKey type is unexported to prevent collisions with context keys defined in
// other packages.
type ctxKey int
// metadataCtxKey is the context key for the task metadata.
// Its value of zero is arbitrary.
const metadataCtxKey ctxKey = 0
// New returns a context and cancel function for a given task message.
func New(base context.Context, msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) {
metadata := taskMetadata{
id: msg.ID,
maxRetry: msg.Retry,
retryCount: msg.Retried,
qname: msg.Queue,
}
ctx := context.WithValue(base, metadataCtxKey, metadata)
return context.WithDeadline(ctx, deadline)
}
// GetTaskID extracts a task ID from a context, if any.
//
// ID of a task is guaranteed to be unique.
// ID of a task doesn't change if the task is being retried.
func GetTaskID(ctx context.Context) (id string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return "", false
}
return metadata.id, true
}
// GetRetryCount extracts retry count from a context, if any.
//
// Return value n indicates the number of times associated task has been
// retried so far.
func GetRetryCount(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return 0, false
}
return metadata.retryCount, true
}
// GetMaxRetry extracts maximum retry from a context, if any.
//
// Return value n indicates the maximum number of times the associated task
// can be retried if ProcessTask returns a non-nil error.
func GetMaxRetry(ctx context.Context) (n int, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return 0, false
}
return metadata.maxRetry, true
}
// GetQueueName extracts queue name from a context, if any.
//
// Return value qname indicates which queue the task was pulled from.
func GetQueueName(ctx context.Context) (qname string, ok bool) {
metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata)
if !ok {
return "", false
}
return metadata.qname, true
}

View File

@ -0,0 +1,288 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Package errors defines the error type and functions used by
// asynq and its internal packages.
package errors
// Note: This package is inspired by a blog post about error handling in project Upspin
// https://commandcenter.blogspot.com/2017/12/error-handling-in-upspin.html.
import (
"errors"
"fmt"
"log"
"runtime"
"strings"
)
// Error is the type that implements the error interface.
// It contains a number of fields, each of different type.
// An Error value may leave some values unset.
type Error struct {
Code Code
Op Op
Err error
}
func (e *Error) DebugString() string {
var b strings.Builder
if e.Op != "" {
b.WriteString(string(e.Op))
}
if e.Code != Unspecified {
if b.Len() > 0 {
b.WriteString(": ")
}
b.WriteString(e.Code.String())
}
if e.Err != nil {
if b.Len() > 0 {
b.WriteString(": ")
}
b.WriteString(e.Err.Error())
}
return b.String()
}
func (e *Error) Error() string {
var b strings.Builder
if e.Code != Unspecified {
b.WriteString(e.Code.String())
}
if e.Err != nil {
if b.Len() > 0 {
b.WriteString(": ")
}
b.WriteString(e.Err.Error())
}
return b.String()
}
func (e *Error) Unwrap() error {
return e.Err
}
// Code defines the canonical error code.
type Code uint8
// List of canonical error codes.
const (
Unspecified Code = iota
NotFound
FailedPrecondition
Internal
AlreadyExists
Unknown
// Note: If you add a new value here, make sure to update String method.
)
func (c Code) String() string {
switch c {
case Unspecified:
return "ERROR_CODE_UNSPECIFIED"
case NotFound:
return "NOT_FOUND"
case FailedPrecondition:
return "FAILED_PRECONDITION"
case Internal:
return "INTERNAL_ERROR"
case AlreadyExists:
return "ALREADY_EXISTS"
case Unknown:
return "UNKNOWN"
}
panic(fmt.Sprintf("unknown error code %d", c))
}
// Op describes an operation, usually as the package and method,
// such as "rdb.Enqueue".
type Op string
// E builds an error value from its arguments.
// There must be at least one argument or E panics.
// The type of each argument determines its meaning.
// If more than one argument of a given type is presented,
// only the last one is recorded.
//
// The types are:
// errors.Op
// The operation being performed, usually the method
// being invoked (Get, Put, etc.).
// errors.Code
// The canonical error code, such as NOT_FOUND.
// string
// Treated as an error message and assigned to the
// Err field after a call to errors.New.
// error
// The underlying error that triggered this one.
//
// If the error is printed, only those items that have been
// set to non-zero values will appear in the result.
func E(args ...interface{}) error {
if len(args) == 0 {
panic("call to errors.E with no arguments")
}
e := &Error{}
for _, arg := range args {
switch arg := arg.(type) {
case Op:
e.Op = arg
case Code:
e.Code = arg
case error:
e.Err = arg
case string:
e.Err = errors.New(arg)
default:
_, file, line, _ := runtime.Caller(1)
log.Printf("errors.E: bad call from %s:%d: %v", file, line, args)
return fmt.Errorf("unknown type %T, value %v in error call", arg, arg)
}
}
return e
}
// CanonicalCode returns the canonical code of the given error if one is present.
// Otherwise it returns Unspecified.
func CanonicalCode(err error) Code {
if err == nil {
return Unspecified
}
e, ok := err.(*Error)
if !ok {
return Unspecified
}
if e.Code == Unspecified {
return CanonicalCode(e.Err)
}
return e.Code
}
/******************************************
Domain Specific Error Types & Values
*******************************************/
var (
// ErrNoProcessableTask indicates that there are no tasks ready to be processed.
ErrNoProcessableTask = errors.New("no tasks are ready for processing")
// ErrDuplicateTask indicates that another task with the same unique key holds the uniqueness lock.
ErrDuplicateTask = errors.New("task already exists")
// ErrTaskIdConflict indicates that another task with the same task ID already exist
ErrTaskIdConflict = errors.New("task id conflicts with another task")
)
// TaskNotFoundError indicates that a task with the given ID does not exist
// in the given queue.
type TaskNotFoundError struct {
Queue string // queue name
ID string // task id
}
func (e *TaskNotFoundError) Error() string {
return fmt.Sprintf("cannot find task with id=%s in queue %q", e.ID, e.Queue)
}
// IsTaskNotFound reports whether any error in err's chain is of type TaskNotFoundError.
func IsTaskNotFound(err error) bool {
var target *TaskNotFoundError
return As(err, &target)
}
// QueueNotFoundError indicates that a queue with the given name does not exist.
type QueueNotFoundError struct {
Queue string // queue name
}
func (e *QueueNotFoundError) Error() string {
return fmt.Sprintf("queue %q does not exist", e.Queue)
}
// IsQueueNotFound reports whether any error in err's chain is of type QueueNotFoundError.
func IsQueueNotFound(err error) bool {
var target *QueueNotFoundError
return As(err, &target)
}
// QueueNotEmptyError indicates that the given queue is not empty.
type QueueNotEmptyError struct {
Queue string // queue name
}
func (e *QueueNotEmptyError) Error() string {
return fmt.Sprintf("queue %q is not empty", e.Queue)
}
// IsQueueNotEmpty reports whether any error in err's chain is of type QueueNotEmptyError.
func IsQueueNotEmpty(err error) bool {
var target *QueueNotEmptyError
return As(err, &target)
}
// TaskAlreadyArchivedError indicates that the task in question is already archived.
type TaskAlreadyArchivedError struct {
Queue string // queue name
ID string // task id
}
func (e *TaskAlreadyArchivedError) Error() string {
return fmt.Sprintf("task is already archived: id=%s, queue=%s", e.ID, e.Queue)
}
// IsTaskAlreadyArchived reports whether any error in err's chain is of type TaskAlreadyArchivedError.
func IsTaskAlreadyArchived(err error) bool {
var target *TaskAlreadyArchivedError
return As(err, &target)
}
// RedisCommandError indicates that the given redis command returned error.
type RedisCommandError struct {
Command string // redis command (e.g. LRANGE, ZADD, etc)
Err error // underlying error
}
func (e *RedisCommandError) Error() string {
return fmt.Sprintf("redis command error: %s failed: %v", strings.ToUpper(e.Command), e.Err)
}
func (e *RedisCommandError) Unwrap() error { return e.Err }
// IsRedisCommandError reports whether any error in err's chain is of type RedisCommandError.
func IsRedisCommandError(err error) bool {
var target *RedisCommandError
return As(err, &target)
}
/*************************************************
Standard Library errors package functions
*************************************************/
// New returns an error that formats as the given text.
// Each call to New returns a distinct error value even if the text is identical.
//
// This function is the errors.New function from the standard library (https://golang.org/pkg/errors/#New).
// It is exported from this package for import convenience.
func New(text string) error { return errors.New(text) }
// Is reports whether any error in err's chain matches target.
//
// This function is the errors.Is function from the standard library (https://golang.org/pkg/errors/#Is).
// It is exported from this package for import convenience.
func Is(err, target error) bool { return errors.Is(err, target) }
// As finds the first error in err's chain that matches target, and if so, sets target to that error value and returns true.
// Otherwise, it returns false.
//
// This function is the errors.As function from the standard library (https://golang.org/pkg/errors/#As).
// It is exported from this package for import convenience.
func As(err error, target interface{}) bool { return errors.As(err, target) }
// Unwrap returns the result of calling the Unwrap method on err, if err's type contains an Unwrap method returning error.
// Otherwise, Unwrap returns nil.
//
// This function is the errors.Unwrap function from the standard library (https://golang.org/pkg/errors/#Unwrap).
// It is exported from this package for import convenience.
func Unwrap(err error) error { return errors.Unwrap(err) }

215
vendor/github.com/hibiken/asynq/internal/log/log.go generated vendored Normal file
View File

@ -0,0 +1,215 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Package log exports logging related types and functions.
package log
import (
"fmt"
"io"
stdlog "log"
"os"
"sync"
)
// Base supports logging at various log levels.
type Base interface {
// Debug logs a message at Debug level.
Debug(args ...interface{})
// Info logs a message at Info level.
Info(args ...interface{})
// Warn logs a message at Warning level.
Warn(args ...interface{})
// Error logs a message at Error level.
Error(args ...interface{})
// Fatal logs a message at Fatal level
// and process will exit with status set to 1.
Fatal(args ...interface{})
}
// baseLogger is a wrapper object around log.Logger from the standard library.
// It supports logging at various log levels.
type baseLogger struct {
*stdlog.Logger
}
// Debug logs a message at Debug level.
func (l *baseLogger) Debug(args ...interface{}) {
l.prefixPrint("DEBUG: ", args...)
}
// Info logs a message at Info level.
func (l *baseLogger) Info(args ...interface{}) {
l.prefixPrint("INFO: ", args...)
}
// Warn logs a message at Warning level.
func (l *baseLogger) Warn(args ...interface{}) {
l.prefixPrint("WARN: ", args...)
}
// Error logs a message at Error level.
func (l *baseLogger) Error(args ...interface{}) {
l.prefixPrint("ERROR: ", args...)
}
// Fatal logs a message at Fatal level
// and process will exit with status set to 1.
func (l *baseLogger) Fatal(args ...interface{}) {
l.prefixPrint("FATAL: ", args...)
os.Exit(1)
}
func (l *baseLogger) prefixPrint(prefix string, args ...interface{}) {
args = append([]interface{}{prefix}, args...)
l.Print(args...)
}
// newBase creates and returns a new instance of baseLogger.
func newBase(out io.Writer) *baseLogger {
prefix := fmt.Sprintf("asynq: pid=%d ", os.Getpid())
return &baseLogger{
stdlog.New(out, prefix, stdlog.Ldate|stdlog.Ltime|stdlog.Lmicroseconds|stdlog.LUTC),
}
}
// NewLogger creates and returns a new instance of Logger.
// Log level is set to DebugLevel by default.
func NewLogger(base Base) *Logger {
if base == nil {
base = newBase(os.Stderr)
}
return &Logger{base: base, level: DebugLevel}
}
// Logger logs message to io.Writer at various log levels.
type Logger struct {
base Base
mu sync.Mutex
// Minimum log level for this logger.
// Message with level lower than this level won't be outputted.
level Level
}
// Level represents a log level.
type Level int32
const (
// DebugLevel is the lowest level of logging.
// Debug logs are intended for debugging and development purposes.
DebugLevel Level = iota
// InfoLevel is used for general informational log messages.
InfoLevel
// WarnLevel is used for undesired but relatively expected events,
// which may indicate a problem.
WarnLevel
// ErrorLevel is used for undesired and unexpected events that
// the program can recover from.
ErrorLevel
// FatalLevel is used for undesired and unexpected events that
// the program cannot recover from.
FatalLevel
)
// String is part of the fmt.Stringer interface.
//
// Used for testing and debugging purposes.
func (l Level) String() string {
switch l {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
default:
return "unknown"
}
}
// canLogAt reports whether logger can log at level v.
func (l *Logger) canLogAt(v Level) bool {
l.mu.Lock()
defer l.mu.Unlock()
return v >= l.level
}
func (l *Logger) Debug(args ...interface{}) {
if !l.canLogAt(DebugLevel) {
return
}
l.base.Debug(args...)
}
func (l *Logger) Info(args ...interface{}) {
if !l.canLogAt(InfoLevel) {
return
}
l.base.Info(args...)
}
func (l *Logger) Warn(args ...interface{}) {
if !l.canLogAt(WarnLevel) {
return
}
l.base.Warn(args...)
}
func (l *Logger) Error(args ...interface{}) {
if !l.canLogAt(ErrorLevel) {
return
}
l.base.Error(args...)
}
func (l *Logger) Fatal(args ...interface{}) {
if !l.canLogAt(FatalLevel) {
return
}
l.base.Fatal(args...)
}
func (l *Logger) Debugf(format string, args ...interface{}) {
l.Debug(fmt.Sprintf(format, args...))
}
func (l *Logger) Infof(format string, args ...interface{}) {
l.Info(fmt.Sprintf(format, args...))
}
func (l *Logger) Warnf(format string, args ...interface{}) {
l.Warn(fmt.Sprintf(format, args...))
}
func (l *Logger) Errorf(format string, args ...interface{}) {
l.Error(fmt.Sprintf(format, args...))
}
func (l *Logger) Fatalf(format string, args ...interface{}) {
l.Fatal(fmt.Sprintf(format, args...))
}
// SetLevel sets the logger level.
// It panics if v is less than DebugLevel or greater than FatalLevel.
func (l *Logger) SetLevel(v Level) {
l.mu.Lock()
defer l.mu.Unlock()
if v < DebugLevel || v > FatalLevel {
panic("log: invalid log level")
}
l.level = v
}

View File

@ -0,0 +1,851 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.17.3
// source: asynq.proto
package proto
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// TaskMessage is the internal representation of a task with additional
// metadata fields.
// Next ID: 15
type TaskMessage struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Type indicates the kind of the task to be performed.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Payload holds data needed to process the task.
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
// Unique identifier for the task.
Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"`
// Name of the queue to which this task belongs.
Queue string `protobuf:"bytes,4,opt,name=queue,proto3" json:"queue,omitempty"`
// Max number of retries for this task.
Retry int32 `protobuf:"varint,5,opt,name=retry,proto3" json:"retry,omitempty"`
// Number of times this task has been retried so far.
Retried int32 `protobuf:"varint,6,opt,name=retried,proto3" json:"retried,omitempty"`
// Error message from the last failure.
ErrorMsg string `protobuf:"bytes,7,opt,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"`
// Time of last failure in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// Use zero to indicate no last failure.
LastFailedAt int64 `protobuf:"varint,11,opt,name=last_failed_at,json=lastFailedAt,proto3" json:"last_failed_at,omitempty"`
// Timeout specifies timeout in seconds.
// Use zero to indicate no timeout.
Timeout int64 `protobuf:"varint,8,opt,name=timeout,proto3" json:"timeout,omitempty"`
// Deadline specifies the deadline for the task in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// Use zero to indicate no deadline.
Deadline int64 `protobuf:"varint,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
// UniqueKey holds the redis key used for uniqueness lock for this task.
// Empty string indicates that no uniqueness lock was used.
UniqueKey string `protobuf:"bytes,10,opt,name=unique_key,json=uniqueKey,proto3" json:"unique_key,omitempty"`
// GroupKey is a name of the group used for task aggregation.
// This field is optional and empty value means no aggregation for the task.
GroupKey string `protobuf:"bytes,14,opt,name=group_key,json=groupKey,proto3" json:"group_key,omitempty"`
// Retention period specified in a number of seconds.
// The task will be stored in redis as a completed task until the TTL
// expires.
Retention int64 `protobuf:"varint,12,opt,name=retention,proto3" json:"retention,omitempty"`
// Time when the task completed in success in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// This field is populated if result_ttl > 0 upon completion.
CompletedAt int64 `protobuf:"varint,13,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"`
}
func (x *TaskMessage) Reset() {
*x = TaskMessage{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TaskMessage) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TaskMessage) ProtoMessage() {}
func (x *TaskMessage) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TaskMessage.ProtoReflect.Descriptor instead.
func (*TaskMessage) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{0}
}
func (x *TaskMessage) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *TaskMessage) GetPayload() []byte {
if x != nil {
return x.Payload
}
return nil
}
func (x *TaskMessage) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *TaskMessage) GetQueue() string {
if x != nil {
return x.Queue
}
return ""
}
func (x *TaskMessage) GetRetry() int32 {
if x != nil {
return x.Retry
}
return 0
}
func (x *TaskMessage) GetRetried() int32 {
if x != nil {
return x.Retried
}
return 0
}
func (x *TaskMessage) GetErrorMsg() string {
if x != nil {
return x.ErrorMsg
}
return ""
}
func (x *TaskMessage) GetLastFailedAt() int64 {
if x != nil {
return x.LastFailedAt
}
return 0
}
func (x *TaskMessage) GetTimeout() int64 {
if x != nil {
return x.Timeout
}
return 0
}
func (x *TaskMessage) GetDeadline() int64 {
if x != nil {
return x.Deadline
}
return 0
}
func (x *TaskMessage) GetUniqueKey() string {
if x != nil {
return x.UniqueKey
}
return ""
}
func (x *TaskMessage) GetGroupKey() string {
if x != nil {
return x.GroupKey
}
return ""
}
func (x *TaskMessage) GetRetention() int64 {
if x != nil {
return x.Retention
}
return 0
}
func (x *TaskMessage) GetCompletedAt() int64 {
if x != nil {
return x.CompletedAt
}
return 0
}
// ServerInfo holds information about a running server.
type ServerInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Host machine the server is running on.
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
// PID of the server process.
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
// Unique identifier for this server.
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
// Maximum number of concurrency this server will use.
Concurrency int32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
// List of queue names with their priorities.
// The server will consume tasks from the queues and prioritize
// queues with higher priority numbers.
Queues map[string]int32 `protobuf:"bytes,5,rep,name=queues,proto3" json:"queues,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
// If set, the server will always consume tasks from a queue with higher
// priority.
StrictPriority bool `protobuf:"varint,6,opt,name=strict_priority,json=strictPriority,proto3" json:"strict_priority,omitempty"`
// Status indicates the status of the server.
Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
// Time this server was started.
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Number of workers currently processing tasks.
ActiveWorkerCount int32 `protobuf:"varint,9,opt,name=active_worker_count,json=activeWorkerCount,proto3" json:"active_worker_count,omitempty"`
}
func (x *ServerInfo) Reset() {
*x = ServerInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ServerInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServerInfo) ProtoMessage() {}
func (x *ServerInfo) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServerInfo.ProtoReflect.Descriptor instead.
func (*ServerInfo) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{1}
}
func (x *ServerInfo) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *ServerInfo) GetPid() int32 {
if x != nil {
return x.Pid
}
return 0
}
func (x *ServerInfo) GetServerId() string {
if x != nil {
return x.ServerId
}
return ""
}
func (x *ServerInfo) GetConcurrency() int32 {
if x != nil {
return x.Concurrency
}
return 0
}
func (x *ServerInfo) GetQueues() map[string]int32 {
if x != nil {
return x.Queues
}
return nil
}
func (x *ServerInfo) GetStrictPriority() bool {
if x != nil {
return x.StrictPriority
}
return false
}
func (x *ServerInfo) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
func (x *ServerInfo) GetStartTime() *timestamppb.Timestamp {
if x != nil {
return x.StartTime
}
return nil
}
func (x *ServerInfo) GetActiveWorkerCount() int32 {
if x != nil {
return x.ActiveWorkerCount
}
return 0
}
// WorkerInfo holds information about a running worker.
type WorkerInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Host matchine this worker is running on.
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
// PID of the process in which this worker is running.
Pid int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
// ID of the server in which this worker is running.
ServerId string `protobuf:"bytes,3,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
// ID of the task this worker is processing.
TaskId string `protobuf:"bytes,4,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
// Type of the task this worker is processing.
TaskType string `protobuf:"bytes,5,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
// Payload of the task this worker is processing.
TaskPayload []byte `protobuf:"bytes,6,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
// Name of the queue the task the worker is processing belongs.
Queue string `protobuf:"bytes,7,opt,name=queue,proto3" json:"queue,omitempty"`
// Time this worker started processing the task.
StartTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Deadline by which the worker needs to complete processing
// the task. If worker exceeds the deadline, the task will fail.
Deadline *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=deadline,proto3" json:"deadline,omitempty"`
}
func (x *WorkerInfo) Reset() {
*x = WorkerInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *WorkerInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*WorkerInfo) ProtoMessage() {}
func (x *WorkerInfo) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use WorkerInfo.ProtoReflect.Descriptor instead.
func (*WorkerInfo) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{2}
}
func (x *WorkerInfo) GetHost() string {
if x != nil {
return x.Host
}
return ""
}
func (x *WorkerInfo) GetPid() int32 {
if x != nil {
return x.Pid
}
return 0
}
func (x *WorkerInfo) GetServerId() string {
if x != nil {
return x.ServerId
}
return ""
}
func (x *WorkerInfo) GetTaskId() string {
if x != nil {
return x.TaskId
}
return ""
}
func (x *WorkerInfo) GetTaskType() string {
if x != nil {
return x.TaskType
}
return ""
}
func (x *WorkerInfo) GetTaskPayload() []byte {
if x != nil {
return x.TaskPayload
}
return nil
}
func (x *WorkerInfo) GetQueue() string {
if x != nil {
return x.Queue
}
return ""
}
func (x *WorkerInfo) GetStartTime() *timestamppb.Timestamp {
if x != nil {
return x.StartTime
}
return nil
}
func (x *WorkerInfo) GetDeadline() *timestamppb.Timestamp {
if x != nil {
return x.Deadline
}
return nil
}
// SchedulerEntry holds information about a periodic task registered
// with a scheduler.
type SchedulerEntry struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Identifier of the scheduler entry.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Periodic schedule spec of the entry.
Spec string `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"`
// Task type of the periodic task.
TaskType string `protobuf:"bytes,3,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
// Task payload of the periodic task.
TaskPayload []byte `protobuf:"bytes,4,opt,name=task_payload,json=taskPayload,proto3" json:"task_payload,omitempty"`
// Options used to enqueue the periodic task.
EnqueueOptions []string `protobuf:"bytes,5,rep,name=enqueue_options,json=enqueueOptions,proto3" json:"enqueue_options,omitempty"`
// Next time the task will be enqueued.
NextEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=next_enqueue_time,json=nextEnqueueTime,proto3" json:"next_enqueue_time,omitempty"`
// Last time the task was enqueued.
// Zero time if task was never enqueued.
PrevEnqueueTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=prev_enqueue_time,json=prevEnqueueTime,proto3" json:"prev_enqueue_time,omitempty"`
}
func (x *SchedulerEntry) Reset() {
*x = SchedulerEntry{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SchedulerEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SchedulerEntry) ProtoMessage() {}
func (x *SchedulerEntry) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SchedulerEntry.ProtoReflect.Descriptor instead.
func (*SchedulerEntry) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{3}
}
func (x *SchedulerEntry) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *SchedulerEntry) GetSpec() string {
if x != nil {
return x.Spec
}
return ""
}
func (x *SchedulerEntry) GetTaskType() string {
if x != nil {
return x.TaskType
}
return ""
}
func (x *SchedulerEntry) GetTaskPayload() []byte {
if x != nil {
return x.TaskPayload
}
return nil
}
func (x *SchedulerEntry) GetEnqueueOptions() []string {
if x != nil {
return x.EnqueueOptions
}
return nil
}
func (x *SchedulerEntry) GetNextEnqueueTime() *timestamppb.Timestamp {
if x != nil {
return x.NextEnqueueTime
}
return nil
}
func (x *SchedulerEntry) GetPrevEnqueueTime() *timestamppb.Timestamp {
if x != nil {
return x.PrevEnqueueTime
}
return nil
}
// SchedulerEnqueueEvent holds information about an enqueue event
// by a scheduler.
type SchedulerEnqueueEvent struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// ID of the task that was enqueued.
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
// Time the task was enqueued.
EnqueueTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=enqueue_time,json=enqueueTime,proto3" json:"enqueue_time,omitempty"`
}
func (x *SchedulerEnqueueEvent) Reset() {
*x = SchedulerEnqueueEvent{}
if protoimpl.UnsafeEnabled {
mi := &file_asynq_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SchedulerEnqueueEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SchedulerEnqueueEvent) ProtoMessage() {}
func (x *SchedulerEnqueueEvent) ProtoReflect() protoreflect.Message {
mi := &file_asynq_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SchedulerEnqueueEvent.ProtoReflect.Descriptor instead.
func (*SchedulerEnqueueEvent) Descriptor() ([]byte, []int) {
return file_asynq_proto_rawDescGZIP(), []int{4}
}
func (x *SchedulerEnqueueEvent) GetTaskId() string {
if x != nil {
return x.TaskId
}
return ""
}
func (x *SchedulerEnqueueEvent) GetEnqueueTime() *timestamppb.Timestamp {
if x != nil {
return x.EnqueueTime
}
return nil
}
var File_asynq_proto protoreflect.FileDescriptor
var file_asynq_proto_rawDesc = []byte{
0x0a, 0x0b, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61,
0x73, 0x79, 0x6e, 0x71, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x03, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79,
0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x74,
0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x72, 0x65, 0x74, 0x72, 0x79, 0x12,
0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05,
0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66,
0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c,
0x6c, 0x61, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07,
0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74,
0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69,
0x6e, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79,
0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65,
0x79, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0e,
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x1c,
0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28,
0x03, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c,
0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0d, 0x20, 0x01,
0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22,
0x8f, 0x03, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12,
0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f,
0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49,
0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79,
0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
0x6e, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, 0x05, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x73, 0x79, 0x6e, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76,
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x74,
0x72, 0x69, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20,
0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x50, 0x72, 0x69, 0x6f, 0x72,
0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x73,
0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61,
0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20,
0x01, 0x28, 0x05, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x65,
0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x22, 0xb1, 0x02, 0x0a, 0x0a, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x68, 0x6f, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65,
0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x04,
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09,
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x73,
0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14, 0x0a, 0x05,
0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65,
0x75, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x36, 0x0a,
0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61,
0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
0x6c, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x1b, 0x0a, 0x09,
0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x73,
0x6b, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x0b, 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x27, 0x0a, 0x0f,
0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x4f, 0x70,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x11, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x6e,
0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x6e, 0x65,
0x78, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x46, 0x0a,
0x11, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x69,
0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x76, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75,
0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
0x65, 0x72, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x17,
0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x6e, 0x71, 0x75, 0x65,
0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x65, 0x6e, 0x71, 0x75, 0x65,
0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x69, 0x62, 0x69, 0x6b, 0x65, 0x6e, 0x2f, 0x61, 0x73, 0x79,
0x6e, 0x71, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_asynq_proto_rawDescOnce sync.Once
file_asynq_proto_rawDescData = file_asynq_proto_rawDesc
)
func file_asynq_proto_rawDescGZIP() []byte {
file_asynq_proto_rawDescOnce.Do(func() {
file_asynq_proto_rawDescData = protoimpl.X.CompressGZIP(file_asynq_proto_rawDescData)
})
return file_asynq_proto_rawDescData
}
var file_asynq_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_asynq_proto_goTypes = []interface{}{
(*TaskMessage)(nil), // 0: asynq.TaskMessage
(*ServerInfo)(nil), // 1: asynq.ServerInfo
(*WorkerInfo)(nil), // 2: asynq.WorkerInfo
(*SchedulerEntry)(nil), // 3: asynq.SchedulerEntry
(*SchedulerEnqueueEvent)(nil), // 4: asynq.SchedulerEnqueueEvent
nil, // 5: asynq.ServerInfo.QueuesEntry
(*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
}
var file_asynq_proto_depIdxs = []int32{
5, // 0: asynq.ServerInfo.queues:type_name -> asynq.ServerInfo.QueuesEntry
6, // 1: asynq.ServerInfo.start_time:type_name -> google.protobuf.Timestamp
6, // 2: asynq.WorkerInfo.start_time:type_name -> google.protobuf.Timestamp
6, // 3: asynq.WorkerInfo.deadline:type_name -> google.protobuf.Timestamp
6, // 4: asynq.SchedulerEntry.next_enqueue_time:type_name -> google.protobuf.Timestamp
6, // 5: asynq.SchedulerEntry.prev_enqueue_time:type_name -> google.protobuf.Timestamp
6, // 6: asynq.SchedulerEnqueueEvent.enqueue_time:type_name -> google.protobuf.Timestamp
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_asynq_proto_init() }
func file_asynq_proto_init() {
if File_asynq_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_asynq_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TaskMessage); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_asynq_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ServerInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_asynq_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*WorkerInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_asynq_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SchedulerEntry); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_asynq_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SchedulerEnqueueEvent); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_asynq_proto_rawDesc,
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_asynq_proto_goTypes,
DependencyIndexes: file_asynq_proto_depIdxs,
MessageInfos: file_asynq_proto_msgTypes,
}.Build()
File_asynq_proto = out.File
file_asynq_proto_rawDesc = nil
file_asynq_proto_goTypes = nil
file_asynq_proto_depIdxs = nil
}

View File

@ -0,0 +1,168 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
syntax = "proto3";
package asynq;
import "google/protobuf/timestamp.proto";
option go_package = "github.com/hibiken/asynq/internal/proto";
// TaskMessage is the internal representation of a task with additional
// metadata fields.
// Next ID: 15
message TaskMessage {
// Type indicates the kind of the task to be performed.
string type = 1;
// Payload holds data needed to process the task.
bytes payload = 2;
// Unique identifier for the task.
string id = 3;
// Name of the queue to which this task belongs.
string queue = 4;
// Max number of retries for this task.
int32 retry = 5;
// Number of times this task has been retried so far.
int32 retried = 6;
// Error message from the last failure.
string error_msg = 7;
// Time of last failure in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// Use zero to indicate no last failure.
int64 last_failed_at = 11;
// Timeout specifies timeout in seconds.
// Use zero to indicate no timeout.
int64 timeout = 8;
// Deadline specifies the deadline for the task in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// Use zero to indicate no deadline.
int64 deadline = 9;
// UniqueKey holds the redis key used for uniqueness lock for this task.
// Empty string indicates that no uniqueness lock was used.
string unique_key = 10;
// GroupKey is a name of the group used for task aggregation.
// This field is optional and empty value means no aggregation for the task.
string group_key = 14;
// Retention period specified in a number of seconds.
// The task will be stored in redis as a completed task until the TTL
// expires.
int64 retention = 12;
// Time when the task completed in success in Unix time,
// the number of seconds elapsed since January 1, 1970 UTC.
// This field is populated if result_ttl > 0 upon completion.
int64 completed_at = 13;
};
// ServerInfo holds information about a running server.
message ServerInfo {
// Host machine the server is running on.
string host = 1;
// PID of the server process.
int32 pid = 2;
// Unique identifier for this server.
string server_id = 3;
// Maximum number of concurrency this server will use.
int32 concurrency = 4;
// List of queue names with their priorities.
// The server will consume tasks from the queues and prioritize
// queues with higher priority numbers.
map<string, int32> queues = 5;
// If set, the server will always consume tasks from a queue with higher
// priority.
bool strict_priority = 6;
// Status indicates the status of the server.
string status = 7;
// Time this server was started.
google.protobuf.Timestamp start_time = 8;
// Number of workers currently processing tasks.
int32 active_worker_count = 9;
};
// WorkerInfo holds information about a running worker.
message WorkerInfo {
// Host matchine this worker is running on.
string host = 1;
// PID of the process in which this worker is running.
int32 pid = 2;
// ID of the server in which this worker is running.
string server_id = 3;
// ID of the task this worker is processing.
string task_id = 4;
// Type of the task this worker is processing.
string task_type = 5;
// Payload of the task this worker is processing.
bytes task_payload = 6;
// Name of the queue the task the worker is processing belongs.
string queue = 7;
// Time this worker started processing the task.
google.protobuf.Timestamp start_time = 8;
// Deadline by which the worker needs to complete processing
// the task. If worker exceeds the deadline, the task will fail.
google.protobuf.Timestamp deadline = 9;
};
// SchedulerEntry holds information about a periodic task registered
// with a scheduler.
message SchedulerEntry {
// Identifier of the scheduler entry.
string id = 1;
// Periodic schedule spec of the entry.
string spec = 2;
// Task type of the periodic task.
string task_type = 3;
// Task payload of the periodic task.
bytes task_payload = 4;
// Options used to enqueue the periodic task.
repeated string enqueue_options = 5;
// Next time the task will be enqueued.
google.protobuf.Timestamp next_enqueue_time = 6;
// Last time the task was enqueued.
// Zero time if task was never enqueued.
google.protobuf.Timestamp prev_enqueue_time = 7;
};
// SchedulerEnqueueEvent holds information about an enqueue event
// by a scheduler.
message SchedulerEnqueueEvent {
// ID of the task that was enqueued.
string task_id = 1;
// Time the task was enqueued.
google.protobuf.Timestamp enqueue_time = 2;
};

2019
vendor/github.com/hibiken/asynq/internal/rdb/inspect.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1521
vendor/github.com/hibiken/asynq/internal/rdb/rdb.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,59 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
// Package timeutil exports functions and types related to time and date.
package timeutil
import (
"sync"
"time"
)
// A Clock is an object that can tell you the current time.
//
// This interface allows decoupling code that uses time from the code that creates
// a point in time. You can use this to your advantage by injecting Clocks into interfaces
// rather than having implementations call time.Now() directly.
//
// Use RealClock() in production.
// Use SimulatedClock() in test.
type Clock interface {
Now() time.Time
}
func NewRealClock() Clock { return &realTimeClock{} }
type realTimeClock struct{}
func (_ *realTimeClock) Now() time.Time { return time.Now() }
// A SimulatedClock is a concrete Clock implementation that doesn't "tick" on its own.
// Time is advanced by explicit call to the AdvanceTime() or SetTime() functions.
// This object is concurrency safe.
type SimulatedClock struct {
mu sync.Mutex
t time.Time // guarded by mu
}
func NewSimulatedClock(t time.Time) *SimulatedClock {
return &SimulatedClock{t: t}
}
func (c *SimulatedClock) Now() time.Time {
c.mu.Lock()
defer c.mu.Unlock()
return c.t
}
func (c *SimulatedClock) SetTime(t time.Time) {
c.mu.Lock()
defer c.mu.Unlock()
c.t = t
}
func (c *SimulatedClock) AdvanceTime(d time.Duration) {
c.mu.Lock()
defer c.mu.Unlock()
c.t = c.t.Add(d)
}

81
vendor/github.com/hibiken/asynq/janitor.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
// Copyright 2021 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
// A janitor is responsible for deleting expired completed tasks from the specified
// queues. It periodically checks for any expired tasks in the completed set, and
// deletes them.
type janitor struct {
logger *log.Logger
broker base.Broker
// channel to communicate back to the long running "janitor" goroutine.
done chan struct{}
// list of queue names to check.
queues []string
// average interval between checks.
avgInterval time.Duration
}
type janitorParams struct {
logger *log.Logger
broker base.Broker
queues []string
interval time.Duration
}
func newJanitor(params janitorParams) *janitor {
return &janitor{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
queues: params.queues,
avgInterval: params.interval,
}
}
func (j *janitor) shutdown() {
j.logger.Debug("Janitor shutting down...")
// Signal the janitor goroutine to stop.
j.done <- struct{}{}
}
// start starts the "janitor" goroutine.
func (j *janitor) start(wg *sync.WaitGroup) {
wg.Add(1)
timer := time.NewTimer(j.avgInterval) // randomize this interval with margin of 1s
go func() {
defer wg.Done()
for {
select {
case <-j.done:
j.logger.Debug("Janitor done")
return
case <-timer.C:
j.exec()
timer.Reset(j.avgInterval)
}
}
}()
}
func (j *janitor) exec() {
for _, qname := range j.queues {
if err := j.broker.DeleteExpiredCompletedTasks(qname); err != nil {
j.logger.Errorf("Failed to delete expired completed tasks from queue %q: %v",
qname, err)
}
}
}

View File

@ -0,0 +1,243 @@
// Copyright 2022 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"crypto/sha256"
"fmt"
"io"
"sort"
"sync"
"time"
)
// PeriodicTaskManager manages scheduling of periodic tasks.
// It syncs scheduler's entries by calling the config provider periodically.
type PeriodicTaskManager struct {
s *Scheduler
p PeriodicTaskConfigProvider
syncInterval time.Duration
done chan (struct{})
wg sync.WaitGroup
m map[string]string // map[hash]entryID
}
type PeriodicTaskManagerOpts struct {
// Required: must be non nil
PeriodicTaskConfigProvider PeriodicTaskConfigProvider
// Required: must be non nil
RedisConnOpt RedisConnOpt
// Optional: scheduler options
*SchedulerOpts
// Optional: default is 3m
SyncInterval time.Duration
}
const defaultSyncInterval = 3 * time.Minute
// NewPeriodicTaskManager returns a new PeriodicTaskManager instance.
// The given opts should specify the RedisConnOp and PeriodicTaskConfigProvider at minimum.
func NewPeriodicTaskManager(opts PeriodicTaskManagerOpts) (*PeriodicTaskManager, error) {
if opts.PeriodicTaskConfigProvider == nil {
return nil, fmt.Errorf("PeriodicTaskConfigProvider cannot be nil")
}
if opts.RedisConnOpt == nil {
return nil, fmt.Errorf("RedisConnOpt cannot be nil")
}
scheduler := NewScheduler(opts.RedisConnOpt, opts.SchedulerOpts)
syncInterval := opts.SyncInterval
if syncInterval == 0 {
syncInterval = defaultSyncInterval
}
return &PeriodicTaskManager{
s: scheduler,
p: opts.PeriodicTaskConfigProvider,
syncInterval: syncInterval,
done: make(chan struct{}),
m: make(map[string]string),
}, nil
}
// PeriodicTaskConfigProvider provides configs for periodic tasks.
// GetConfigs will be called by a PeriodicTaskManager periodically to
// sync the scheduler's entries with the configs returned by the provider.
type PeriodicTaskConfigProvider interface {
GetConfigs() ([]*PeriodicTaskConfig, error)
}
// PeriodicTaskConfig specifies the details of a periodic task.
type PeriodicTaskConfig struct {
Cronspec string // required: must be non empty string
Task *Task // required: must be non nil
Opts []Option // optional: can be nil
}
func (c *PeriodicTaskConfig) hash() string {
h := sha256.New()
io.WriteString(h, c.Cronspec)
io.WriteString(h, c.Task.Type())
h.Write(c.Task.Payload())
opts := stringifyOptions(c.Opts)
sort.Strings(opts)
for _, opt := range opts {
io.WriteString(h, opt)
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func validatePeriodicTaskConfig(c *PeriodicTaskConfig) error {
if c == nil {
return fmt.Errorf("PeriodicTaskConfig cannot be nil")
}
if c.Task == nil {
return fmt.Errorf("PeriodicTaskConfig.Task cannot be nil")
}
if c.Cronspec == "" {
return fmt.Errorf("PeriodicTaskConfig.Cronspec cannot be empty")
}
return nil
}
// Start starts a scheduler and background goroutine to sync the scheduler with the configs
// returned by the provider.
//
// Start returns any error encountered at start up time.
func (mgr *PeriodicTaskManager) Start() error {
if mgr.s == nil || mgr.p == nil {
panic("asynq: cannot start uninitialized PeriodicTaskManager; use NewPeriodicTaskManager to initialize")
}
if err := mgr.initialSync(); err != nil {
return fmt.Errorf("asynq: %v", err)
}
if err := mgr.s.Start(); err != nil {
return fmt.Errorf("asynq: %v", err)
}
mgr.wg.Add(1)
go func() {
defer mgr.wg.Done()
ticker := time.NewTicker(mgr.syncInterval)
for {
select {
case <-mgr.done:
mgr.s.logger.Debugf("Stopping syncer goroutine")
ticker.Stop()
return
case <-ticker.C:
mgr.sync()
}
}
}()
return nil
}
// Shutdown gracefully shuts down the manager.
// It notifies a background syncer goroutine to stop and stops scheduler.
func (mgr *PeriodicTaskManager) Shutdown() {
close(mgr.done)
mgr.wg.Wait()
mgr.s.Shutdown()
}
// Run starts the manager and blocks until an os signal to exit the program is received.
// Once it receives a signal, it gracefully shuts down the manager.
func (mgr *PeriodicTaskManager) Run() error {
if err := mgr.Start(); err != nil {
return err
}
mgr.s.waitForSignals()
mgr.Shutdown()
mgr.s.logger.Debugf("PeriodicTaskManager exiting")
return nil
}
func (mgr *PeriodicTaskManager) initialSync() error {
configs, err := mgr.p.GetConfigs()
if err != nil {
return fmt.Errorf("initial call to GetConfigs failed: %v", err)
}
for _, c := range configs {
if err := validatePeriodicTaskConfig(c); err != nil {
return fmt.Errorf("initial call to GetConfigs contained an invalid config: %v", err)
}
}
mgr.add(configs)
return nil
}
func (mgr *PeriodicTaskManager) add(configs []*PeriodicTaskConfig) {
for _, c := range configs {
entryID, err := mgr.s.Register(c.Cronspec, c.Task, c.Opts...)
if err != nil {
mgr.s.logger.Errorf("Failed to register periodic task: cronspec=%q task=%q",
c.Cronspec, c.Task.Type())
continue
}
mgr.m[c.hash()] = entryID
mgr.s.logger.Infof("Successfully registered periodic task: cronspec=%q task=%q, entryID=%s",
c.Cronspec, c.Task.Type(), entryID)
}
}
func (mgr *PeriodicTaskManager) remove(removed map[string]string) {
for hash, entryID := range removed {
if err := mgr.s.Unregister(entryID); err != nil {
mgr.s.logger.Errorf("Failed to unregister periodic task: %v", err)
continue
}
delete(mgr.m, hash)
mgr.s.logger.Infof("Successfully unregistered periodic task: entryID=%s", entryID)
}
}
func (mgr *PeriodicTaskManager) sync() {
configs, err := mgr.p.GetConfigs()
if err != nil {
mgr.s.logger.Errorf("Failed to get periodic task configs: %v", err)
return
}
for _, c := range configs {
if err := validatePeriodicTaskConfig(c); err != nil {
mgr.s.logger.Errorf("Failed to sync: GetConfigs returned an invalid config: %v", err)
return
}
}
// Diff and only register/unregister the newly added/removed entries.
removed := mgr.diffRemoved(configs)
added := mgr.diffAdded(configs)
mgr.remove(removed)
mgr.add(added)
}
// diffRemoved diffs the incoming configs with the registered config and returns
// a map containing hash and entryID of each config that was removed.
func (mgr *PeriodicTaskManager) diffRemoved(configs []*PeriodicTaskConfig) map[string]string {
newConfigs := make(map[string]string)
for _, c := range configs {
newConfigs[c.hash()] = "" // empty value since we don't have entryID yet
}
removed := make(map[string]string)
for k, v := range mgr.m {
// test whether existing config is present in the incoming configs
if _, found := newConfigs[k]; !found {
removed[k] = v
}
}
return removed
}
// diffAdded diffs the incoming configs with the registered configs and returns
// a list of configs that were added.
func (mgr *PeriodicTaskManager) diffAdded(configs []*PeriodicTaskConfig) []*PeriodicTaskConfig {
var added []*PeriodicTaskConfig
for _, c := range configs {
if _, found := mgr.m[c.hash()]; !found {
added = append(added, c)
}
}
return added
}

523
vendor/github.com/hibiken/asynq/processor.go generated vendored Normal file
View File

@ -0,0 +1,523 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"context"
"fmt"
"math"
"math/rand"
"runtime"
"runtime/debug"
"sort"
"strings"
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
asynqcontext "github.com/hibiken/asynq/internal/context"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/timeutil"
"golang.org/x/time/rate"
)
type processor struct {
logger *log.Logger
broker base.Broker
clock timeutil.Clock
handler Handler
baseCtxFn func() context.Context
queueConfig map[string]int
// orderedQueues is set only in strict-priority mode.
orderedQueues []string
retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
errHandler ErrorHandler
shutdownTimeout time.Duration
// channel via which to send sync requests to syncer.
syncRequestCh chan<- *syncRequest
// rate limiter to prevent spamming logs with a bunch of errors.
errLogLimiter *rate.Limiter
// sema is a counting semaphore to ensure the number of active workers
// does not exceed the limit.
sema chan struct{}
// channel to communicate back to the long running "processor" goroutine.
// once is used to send value to the channel only once.
done chan struct{}
once sync.Once
// quit channel is closed when the shutdown of the "processor" goroutine starts.
quit chan struct{}
// abort channel communicates to the in-flight worker goroutines to stop.
abort chan struct{}
// cancelations is a set of cancel functions for all active tasks.
cancelations *base.Cancelations
starting chan<- *workerInfo
finished chan<- *base.TaskMessage
}
type processorParams struct {
logger *log.Logger
broker base.Broker
baseCtxFn func() context.Context
retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
syncCh chan<- *syncRequest
cancelations *base.Cancelations
concurrency int
queues map[string]int
strictPriority bool
errHandler ErrorHandler
shutdownTimeout time.Duration
starting chan<- *workerInfo
finished chan<- *base.TaskMessage
}
// newProcessor constructs a new processor.
func newProcessor(params processorParams) *processor {
queues := normalizeQueues(params.queues)
orderedQueues := []string(nil)
if params.strictPriority {
orderedQueues = sortByPriority(queues)
}
return &processor{
logger: params.logger,
broker: params.broker,
baseCtxFn: params.baseCtxFn,
clock: timeutil.NewRealClock(),
queueConfig: queues,
orderedQueues: orderedQueues,
retryDelayFunc: params.retryDelayFunc,
isFailureFunc: params.isFailureFunc,
syncRequestCh: params.syncCh,
cancelations: params.cancelations,
errLogLimiter: rate.NewLimiter(rate.Every(3*time.Second), 1),
sema: make(chan struct{}, params.concurrency),
done: make(chan struct{}),
quit: make(chan struct{}),
abort: make(chan struct{}),
errHandler: params.errHandler,
handler: HandlerFunc(func(ctx context.Context, t *Task) error { return fmt.Errorf("handler not set") }),
shutdownTimeout: params.shutdownTimeout,
starting: params.starting,
finished: params.finished,
}
}
// Note: stops only the "processor" goroutine, does not stop workers.
// It's safe to call this method multiple times.
func (p *processor) stop() {
p.once.Do(func() {
p.logger.Debug("Processor shutting down...")
// Unblock if processor is waiting for sema token.
close(p.quit)
// Signal the processor goroutine to stop processing tasks
// from the queue.
p.done <- struct{}{}
})
}
// NOTE: once shutdown, processor cannot be re-started.
func (p *processor) shutdown() {
p.stop()
time.AfterFunc(p.shutdownTimeout, func() { close(p.abort) })
p.logger.Info("Waiting for all workers to finish...")
// block until all workers have released the token
for i := 0; i < cap(p.sema); i++ {
p.sema <- struct{}{}
}
p.logger.Info("All workers have finished")
}
func (p *processor) start(wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-p.done:
p.logger.Debug("Processor done")
return
default:
p.exec()
}
}
}()
}
// exec pulls a task out of the queue and starts a worker goroutine to
// process the task.
func (p *processor) exec() {
select {
case <-p.quit:
return
case p.sema <- struct{}{}: // acquire token
qnames := p.queues()
msg, leaseExpirationTime, err := p.broker.Dequeue(qnames...)
switch {
case errors.Is(err, errors.ErrNoProcessableTask):
p.logger.Debug("All queues are empty")
// Queues are empty, this is a normal behavior.
// Sleep to avoid slamming redis and let scheduler move tasks into queues.
// Note: We are not using blocking pop operation and polling queues instead.
// This adds significant load to redis.
time.Sleep(time.Second)
<-p.sema // release token
return
case err != nil:
if p.errLogLimiter.Allow() {
p.logger.Errorf("Dequeue error: %v", err)
}
<-p.sema // release token
return
}
lease := base.NewLease(leaseExpirationTime)
deadline := p.computeDeadline(msg)
p.starting <- &workerInfo{msg, time.Now(), deadline, lease}
go func() {
defer func() {
p.finished <- msg
<-p.sema // release token
}()
ctx, cancel := asynqcontext.New(p.baseCtxFn(), msg, deadline)
p.cancelations.Add(msg.ID, cancel)
defer func() {
cancel()
p.cancelations.Delete(msg.ID)
}()
// check context before starting a worker goroutine.
select {
case <-ctx.Done():
// already canceled (e.g. deadline exceeded).
p.handleFailedMessage(ctx, lease, msg, ctx.Err())
return
default:
}
resCh := make(chan error, 1)
go func() {
task := newTask(
msg.Type,
msg.Payload,
&ResultWriter{
id: msg.ID,
qname: msg.Queue,
broker: p.broker,
ctx: ctx,
},
)
resCh <- p.perform(ctx, task)
}()
select {
case <-p.abort:
// time is up, push the message back to queue and quit this worker goroutine.
p.logger.Warnf("Quitting worker. task id=%s", msg.ID)
p.requeue(lease, msg)
return
case <-lease.Done():
cancel()
p.handleFailedMessage(ctx, lease, msg, ErrLeaseExpired)
return
case <-ctx.Done():
p.handleFailedMessage(ctx, lease, msg, ctx.Err())
return
case resErr := <-resCh:
if resErr != nil {
p.handleFailedMessage(ctx, lease, msg, resErr)
return
}
p.handleSucceededMessage(lease, msg)
}
}()
}
}
func (p *processor) requeue(l *base.Lease, msg *base.TaskMessage) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Requeue(ctx, msg)
if err != nil {
p.logger.Errorf("Could not push task id=%s back to queue: %v", msg.ID, err)
} else {
p.logger.Infof("Pushed task id=%s back to queue", msg.ID)
}
}
func (p *processor) handleSucceededMessage(l *base.Lease, msg *base.TaskMessage) {
if msg.Retention > 0 {
p.markAsComplete(l, msg)
} else {
p.markAsDone(l, msg)
}
}
func (p *processor) markAsComplete(l *base.Lease, msg *base.TaskMessage) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.MarkAsComplete(ctx, msg)
if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s type=%q from %q to %q: %+v",
msg.ID, msg.Type, base.ActiveKey(msg.Queue), base.CompletedKey(msg.Queue), err)
p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{
fn: func() error {
return p.broker.MarkAsComplete(ctx, msg)
},
errMsg: errMsg,
deadline: l.Deadline(),
}
}
}
func (p *processor) markAsDone(l *base.Lease, msg *base.TaskMessage) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Done(ctx, msg)
if err != nil {
errMsg := fmt.Sprintf("Could not remove task id=%s type=%q from %q err: %+v", msg.ID, msg.Type, base.ActiveKey(msg.Queue), err)
p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{
fn: func() error {
return p.broker.Done(ctx, msg)
},
errMsg: errMsg,
deadline: l.Deadline(),
}
}
}
// SkipRetry is used as a return value from Handler.ProcessTask to indicate that
// the task should not be retried and should be archived instead.
var SkipRetry = errors.New("skip retry for the task")
func (p *processor) handleFailedMessage(ctx context.Context, l *base.Lease, msg *base.TaskMessage, err error) {
if p.errHandler != nil {
p.errHandler.HandleError(ctx, NewTask(msg.Type, msg.Payload), err)
}
if !p.isFailureFunc(err) {
// retry the task without marking it as failed
p.retry(l, msg, err, false /*isFailure*/)
return
}
if msg.Retried >= msg.Retry || errors.Is(err, SkipRetry) {
p.logger.Warnf("Retry exhausted for task id=%s", msg.ID)
p.archive(l, msg, err)
} else {
p.retry(l, msg, err, true /*isFailure*/)
}
}
func (p *processor) retry(l *base.Lease, msg *base.TaskMessage, e error, isFailure bool) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
d := p.retryDelayFunc(msg.Retried, e, NewTask(msg.Type, msg.Payload))
retryAt := time.Now().Add(d)
err := p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.RetryKey(msg.Queue))
p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{
fn: func() error {
return p.broker.Retry(ctx, msg, retryAt, e.Error(), isFailure)
},
errMsg: errMsg,
deadline: l.Deadline(),
}
}
}
func (p *processor) archive(l *base.Lease, msg *base.TaskMessage, e error) {
if !l.IsValid() {
// If lease is not valid, do not write to redis; Let recoverer take care of it.
return
}
ctx, _ := context.WithDeadline(context.Background(), l.Deadline())
err := p.broker.Archive(ctx, msg, e.Error())
if err != nil {
errMsg := fmt.Sprintf("Could not move task id=%s from %q to %q", msg.ID, base.ActiveKey(msg.Queue), base.ArchivedKey(msg.Queue))
p.logger.Warnf("%s; Will retry syncing", errMsg)
p.syncRequestCh <- &syncRequest{
fn: func() error {
return p.broker.Archive(ctx, msg, e.Error())
},
errMsg: errMsg,
deadline: l.Deadline(),
}
}
}
// queues returns a list of queues to query.
// Order of the queue names is based on the priority of each queue.
// Queue names is sorted by their priority level if strict-priority is true.
// If strict-priority is false, then the order of queue names are roughly based on
// the priority level but randomized in order to avoid starving low priority queues.
func (p *processor) queues() []string {
// skip the overhead of generating a list of queue names
// if we are processing one queue.
if len(p.queueConfig) == 1 {
for qname := range p.queueConfig {
return []string{qname}
}
}
if p.orderedQueues != nil {
return p.orderedQueues
}
var names []string
for qname, priority := range p.queueConfig {
for i := 0; i < priority; i++ {
names = append(names, qname)
}
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
r.Shuffle(len(names), func(i, j int) { names[i], names[j] = names[j], names[i] })
return uniq(names, len(p.queueConfig))
}
// perform calls the handler with the given task.
// If the call returns without panic, it simply returns the value,
// otherwise, it recovers from panic and returns an error.
func (p *processor) perform(ctx context.Context, task *Task) (err error) {
defer func() {
if x := recover(); x != nil {
p.logger.Errorf("recovering from panic. See the stack trace below for details:\n%s", string(debug.Stack()))
_, file, line, ok := runtime.Caller(1) // skip the first frame (panic itself)
if ok && strings.Contains(file, "runtime/") {
// The panic came from the runtime, most likely due to incorrect
// map/slice usage. The parent frame should have the real trigger.
_, file, line, ok = runtime.Caller(2)
}
// Include the file and line number info in the error, if runtime.Caller returned ok.
if ok {
err = fmt.Errorf("panic [%s:%d]: %v", file, line, x)
} else {
err = fmt.Errorf("panic: %v", x)
}
}
}()
return p.handler.ProcessTask(ctx, task)
}
// uniq dedupes elements and returns a slice of unique names of length l.
// Order of the output slice is based on the input list.
func uniq(names []string, l int) []string {
var res []string
seen := make(map[string]struct{})
for _, s := range names {
if _, ok := seen[s]; !ok {
seen[s] = struct{}{}
res = append(res, s)
}
if len(res) == l {
break
}
}
return res
}
// sortByPriority returns a list of queue names sorted by
// their priority level in descending order.
func sortByPriority(qcfg map[string]int) []string {
var queues []*queue
for qname, n := range qcfg {
queues = append(queues, &queue{qname, n})
}
sort.Sort(sort.Reverse(byPriority(queues)))
var res []string
for _, q := range queues {
res = append(res, q.name)
}
return res
}
type queue struct {
name string
priority int
}
type byPriority []*queue
func (x byPriority) Len() int { return len(x) }
func (x byPriority) Less(i, j int) bool { return x[i].priority < x[j].priority }
func (x byPriority) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
// normalizeQueues divides priority numbers by their greatest common divisor.
func normalizeQueues(queues map[string]int) map[string]int {
var xs []int
for _, x := range queues {
xs = append(xs, x)
}
d := gcd(xs...)
res := make(map[string]int)
for q, x := range queues {
res[q] = x / d
}
return res
}
func gcd(xs ...int) int {
fn := func(x, y int) int {
for y > 0 {
x, y = y, x%y
}
return x
}
res := xs[0]
for i := 0; i < len(xs); i++ {
res = fn(xs[i], res)
if res == 1 {
return 1
}
}
return res
}
// computeDeadline returns the given task's deadline,
func (p *processor) computeDeadline(msg *base.TaskMessage) time.Time {
if msg.Timeout == 0 && msg.Deadline == 0 {
p.logger.Errorf("asynq: internal error: both timeout and deadline are not set for the task message: %s", msg.ID)
return p.clock.Now().Add(defaultTimeout)
}
if msg.Timeout != 0 && msg.Deadline != 0 {
deadlineUnix := math.Min(float64(p.clock.Now().Unix()+msg.Timeout), float64(msg.Deadline))
return time.Unix(int64(deadlineUnix), 0)
}
if msg.Timeout != 0 {
return p.clock.Now().Add(time.Duration(msg.Timeout) * time.Second)
}
return time.Unix(msg.Deadline, 0)
}

126
vendor/github.com/hibiken/asynq/recoverer.go generated vendored Normal file
View File

@ -0,0 +1,126 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"context"
"sync"
"time"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/errors"
"github.com/hibiken/asynq/internal/log"
)
type recoverer struct {
logger *log.Logger
broker base.Broker
retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
// channel to communicate back to the long running "recoverer" goroutine.
done chan struct{}
// list of queues to check for deadline.
queues []string
// poll interval.
interval time.Duration
}
type recovererParams struct {
logger *log.Logger
broker base.Broker
queues []string
interval time.Duration
retryDelayFunc RetryDelayFunc
isFailureFunc func(error) bool
}
func newRecoverer(params recovererParams) *recoverer {
return &recoverer{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
queues: params.queues,
interval: params.interval,
retryDelayFunc: params.retryDelayFunc,
isFailureFunc: params.isFailureFunc,
}
}
func (r *recoverer) shutdown() {
r.logger.Debug("Recoverer shutting down...")
// Signal the recoverer goroutine to stop polling.
r.done <- struct{}{}
}
func (r *recoverer) start(wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
r.recover()
timer := time.NewTimer(r.interval)
for {
select {
case <-r.done:
r.logger.Debug("Recoverer done")
timer.Stop()
return
case <-timer.C:
r.recover()
timer.Reset(r.interval)
}
}
}()
}
// ErrLeaseExpired error indicates that the task failed because the worker working on the task
// could not extend its lease due to missing heartbeats. The worker may have crashed or got cutoff from the network.
var ErrLeaseExpired = errors.New("asynq: task lease expired")
func (r *recoverer) recover() {
r.recoverLeaseExpiredTasks()
r.recoverStaleAggregationSets()
}
func (r *recoverer) recoverLeaseExpiredTasks() {
// Get all tasks which have expired 30 seconds ago or earlier to accomodate certain amount of clock skew.
cutoff := time.Now().Add(-30 * time.Second)
msgs, err := r.broker.ListLeaseExpired(cutoff, r.queues...)
if err != nil {
r.logger.Warnf("recoverer: could not list lease expired tasks: %v", err)
return
}
for _, msg := range msgs {
if msg.Retried >= msg.Retry {
r.archive(msg, ErrLeaseExpired)
} else {
r.retry(msg, ErrLeaseExpired)
}
}
}
func (r *recoverer) recoverStaleAggregationSets() {
for _, qname := range r.queues {
if err := r.broker.ReclaimStaleAggregationSets(qname); err != nil {
r.logger.Warnf("recoverer: could not reclaim stale aggregation sets in queue %q: %v", qname, err)
}
}
}
func (r *recoverer) retry(msg *base.TaskMessage, err error) {
delay := r.retryDelayFunc(msg.Retried, err, NewTask(msg.Type, msg.Payload))
retryAt := time.Now().Add(delay)
if err := r.broker.Retry(context.Background(), msg, retryAt, err.Error(), r.isFailureFunc(err)); err != nil {
r.logger.Warnf("recoverer: could not retry lease expired task: %v", err)
}
}
func (r *recoverer) archive(msg *base.TaskMessage, err error) {
if err := r.broker.Archive(context.Background(), msg, err.Error()); err != nil {
r.logger.Warnf("recoverer: could not move task to archive: %v", err)
}
}

322
vendor/github.com/hibiken/asynq/scheduler.go generated vendored Normal file
View File

@ -0,0 +1,322 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"fmt"
"os"
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb"
"github.com/robfig/cron/v3"
)
// A Scheduler kicks off tasks at regular intervals based on the user defined schedule.
//
// Schedulers are safe for concurrent use by multiple goroutines.
type Scheduler struct {
id string
state *serverState
logger *log.Logger
client *Client
rdb *rdb.RDB
cron *cron.Cron
location *time.Location
done chan struct{}
wg sync.WaitGroup
preEnqueueFunc func(task *Task, opts []Option)
postEnqueueFunc func(info *TaskInfo, err error)
errHandler func(task *Task, opts []Option, err error)
// guards idmap
mu sync.Mutex
// idmap maps Scheduler's entry ID to cron.EntryID
// to avoid using cron.EntryID as the public API of
// the Scheduler.
idmap map[string]cron.EntryID
}
// NewScheduler returns a new Scheduler instance given the redis connection option.
// The parameter opts is optional, defaults will be used if opts is set to nil
func NewScheduler(r RedisConnOpt, opts *SchedulerOpts) *Scheduler {
c, ok := r.MakeRedisClient().(redis.UniversalClient)
if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
}
if opts == nil {
opts = &SchedulerOpts{}
}
logger := log.NewLogger(opts.Logger)
loglevel := opts.LogLevel
if loglevel == level_unspecified {
loglevel = InfoLevel
}
logger.SetLevel(toInternalLogLevel(loglevel))
loc := opts.Location
if loc == nil {
loc = time.UTC
}
return &Scheduler{
id: generateSchedulerID(),
state: &serverState{value: srvStateNew},
logger: logger,
client: NewClient(r),
rdb: rdb.NewRDB(c),
cron: cron.New(cron.WithLocation(loc)),
location: loc,
done: make(chan struct{}),
preEnqueueFunc: opts.PreEnqueueFunc,
postEnqueueFunc: opts.PostEnqueueFunc,
errHandler: opts.EnqueueErrorHandler,
idmap: make(map[string]cron.EntryID),
}
}
func generateSchedulerID() string {
host, err := os.Hostname()
if err != nil {
host = "unknown-host"
}
return fmt.Sprintf("%s:%d:%v", host, os.Getpid(), uuid.New())
}
// SchedulerOpts specifies scheduler options.
type SchedulerOpts struct {
// Logger specifies the logger used by the scheduler instance.
//
// If unset, the default logger is used.
Logger Logger
// LogLevel specifies the minimum log level to enable.
//
// If unset, InfoLevel is used by default.
LogLevel LogLevel
// Location specifies the time zone location.
//
// If unset, the UTC time zone (time.UTC) is used.
Location *time.Location
// PreEnqueueFunc, if provided, is called before a task gets enqueued by Scheduler.
// The callback function should return quickly to not block the current thread.
PreEnqueueFunc func(task *Task, opts []Option)
// PostEnqueueFunc, if provided, is called after a task gets enqueued by Scheduler.
// The callback function should return quickly to not block the current thread.
PostEnqueueFunc func(info *TaskInfo, err error)
// Deprecated: Use PostEnqueueFunc instead
// EnqueueErrorHandler gets called when scheduler cannot enqueue a registered task
// due to an error.
EnqueueErrorHandler func(task *Task, opts []Option, err error)
}
// enqueueJob encapsulates the job of enqueuing a task and recording the event.
type enqueueJob struct {
id uuid.UUID
cronspec string
task *Task
opts []Option
location *time.Location
logger *log.Logger
client *Client
rdb *rdb.RDB
preEnqueueFunc func(task *Task, opts []Option)
postEnqueueFunc func(info *TaskInfo, err error)
errHandler func(task *Task, opts []Option, err error)
}
func (j *enqueueJob) Run() {
if j.preEnqueueFunc != nil {
j.preEnqueueFunc(j.task, j.opts)
}
info, err := j.client.Enqueue(j.task, j.opts...)
if j.postEnqueueFunc != nil {
j.postEnqueueFunc(info, err)
}
if err != nil {
if j.errHandler != nil {
j.errHandler(j.task, j.opts, err)
}
return
}
j.logger.Debugf("scheduler enqueued a task: %+v", info)
event := &base.SchedulerEnqueueEvent{
TaskID: info.ID,
EnqueuedAt: time.Now().In(j.location),
}
err = j.rdb.RecordSchedulerEnqueueEvent(j.id.String(), event)
if err != nil {
j.logger.Warnf("scheduler could not record enqueue event of enqueued task %s: %v", info.ID, err)
}
}
// Register registers a task to be enqueued on the given schedule specified by the cronspec.
// It returns an ID of the newly registered entry.
func (s *Scheduler) Register(cronspec string, task *Task, opts ...Option) (entryID string, err error) {
job := &enqueueJob{
id: uuid.New(),
cronspec: cronspec,
task: task,
opts: opts,
location: s.location,
client: s.client,
rdb: s.rdb,
logger: s.logger,
preEnqueueFunc: s.preEnqueueFunc,
postEnqueueFunc: s.postEnqueueFunc,
errHandler: s.errHandler,
}
cronID, err := s.cron.AddJob(cronspec, job)
if err != nil {
return "", err
}
s.mu.Lock()
s.idmap[job.id.String()] = cronID
s.mu.Unlock()
return job.id.String(), nil
}
// Unregister removes a registered entry by entry ID.
// Unregister returns a non-nil error if no entries were found for the given entryID.
func (s *Scheduler) Unregister(entryID string) error {
s.mu.Lock()
defer s.mu.Unlock()
cronID, ok := s.idmap[entryID]
if !ok {
return fmt.Errorf("asynq: no scheduler entry found")
}
delete(s.idmap, entryID)
s.cron.Remove(cronID)
return nil
}
// Run starts the scheduler until an os signal to exit the program is received.
// It returns an error if scheduler is already running or has been shutdown.
func (s *Scheduler) Run() error {
if err := s.Start(); err != nil {
return err
}
s.waitForSignals()
s.Shutdown()
return nil
}
// Start starts the scheduler.
// It returns an error if the scheduler is already running or has been shutdown.
func (s *Scheduler) Start() error {
if err := s.start(); err != nil {
return err
}
s.logger.Info("Scheduler starting")
s.logger.Infof("Scheduler timezone is set to %v", s.location)
s.cron.Start()
s.wg.Add(1)
go s.runHeartbeater()
return nil
}
// Checks server state and returns an error if pre-condition is not met.
// Otherwise it sets the server state to active.
func (s *Scheduler) start() error {
s.state.mu.Lock()
defer s.state.mu.Unlock()
switch s.state.value {
case srvStateActive:
return fmt.Errorf("asynq: the scheduler is already running")
case srvStateClosed:
return fmt.Errorf("asynq: the scheduler has already been stopped")
}
s.state.value = srvStateActive
return nil
}
// Shutdown stops and shuts down the scheduler.
func (s *Scheduler) Shutdown() {
s.state.mu.Lock()
if s.state.value == srvStateNew || s.state.value == srvStateClosed {
// scheduler is not running, do nothing and return.
s.state.mu.Unlock()
return
}
s.state.value = srvStateClosed
s.state.mu.Unlock()
s.logger.Info("Scheduler shutting down")
close(s.done) // signal heartbeater to stop
ctx := s.cron.Stop()
<-ctx.Done()
s.wg.Wait()
s.clearHistory()
s.client.Close()
s.rdb.Close()
s.logger.Info("Scheduler stopped")
}
func (s *Scheduler) runHeartbeater() {
defer s.wg.Done()
ticker := time.NewTicker(5 * time.Second)
for {
select {
case <-s.done:
s.logger.Debugf("Scheduler heatbeater shutting down")
s.rdb.ClearSchedulerEntries(s.id)
ticker.Stop()
return
case <-ticker.C:
s.beat()
}
}
}
// beat writes a snapshot of entries to redis.
func (s *Scheduler) beat() {
var entries []*base.SchedulerEntry
for _, entry := range s.cron.Entries() {
job := entry.Job.(*enqueueJob)
e := &base.SchedulerEntry{
ID: job.id.String(),
Spec: job.cronspec,
Type: job.task.Type(),
Payload: job.task.Payload(),
Opts: stringifyOptions(job.opts),
Next: entry.Next,
Prev: entry.Prev,
}
entries = append(entries, e)
}
s.logger.Debugf("Writing entries %v", entries)
if err := s.rdb.WriteSchedulerEntries(s.id, entries, 5*time.Second); err != nil {
s.logger.Warnf("Scheduler could not write heartbeat data: %v", err)
}
}
func stringifyOptions(opts []Option) []string {
var res []string
for _, opt := range opts {
res = append(res, opt.String())
}
return res
}
func (s *Scheduler) clearHistory() {
for _, entry := range s.cron.Entries() {
job := entry.Job.(*enqueueJob)
if err := s.rdb.ClearSchedulerHistory(job.id.String()); err != nil {
s.logger.Warnf("Could not clear scheduler history for entry %q: %v", job.id.String(), err)
}
}
}

158
vendor/github.com/hibiken/asynq/servemux.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"context"
"fmt"
"sort"
"strings"
"sync"
)
// ServeMux is a multiplexer for asynchronous tasks.
// It matches the type of each task against a list of registered patterns
// and calls the handler for the pattern that most closely matches the
// task's type name.
//
// Longer patterns take precedence over shorter ones, so that if there are
// handlers registered for both "images" and "images:thumbnails",
// the latter handler will be called for tasks with a type name beginning with
// "images:thumbnails" and the former will receive tasks with type name beginning
// with "images".
type ServeMux struct {
mu sync.RWMutex
m map[string]muxEntry
es []muxEntry // slice of entries sorted from longest to shortest.
mws []MiddlewareFunc
}
type muxEntry struct {
h Handler
pattern string
}
// MiddlewareFunc is a function which receives an asynq.Handler and returns another asynq.Handler.
// Typically, the returned handler is a closure which does something with the context and task passed
// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
type MiddlewareFunc func(Handler) Handler
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux {
return new(ServeMux)
}
// ProcessTask dispatches the task to the handler whose
// pattern most closely matches the task type.
func (mux *ServeMux) ProcessTask(ctx context.Context, task *Task) error {
h, _ := mux.Handler(task)
return h.ProcessTask(ctx, task)
}
// Handler returns the handler to use for the given task.
// It always return a non-nil handler.
//
// Handler also returns the registered pattern that matches the task.
//
// If there is no registered handler that applies to the task,
// handler returns a 'not found' handler which returns an error.
func (mux *ServeMux) Handler(t *Task) (h Handler, pattern string) {
mux.mu.RLock()
defer mux.mu.RUnlock()
h, pattern = mux.match(t.Type())
if h == nil {
h, pattern = NotFoundHandler(), ""
}
for i := len(mux.mws) - 1; i >= 0; i-- {
h = mux.mws[i](h)
}
return h, pattern
}
// Find a handler on a handler map given a typename string.
// Most-specific (longest) pattern wins.
func (mux *ServeMux) match(typename string) (h Handler, pattern string) {
// Check for exact match first.
v, ok := mux.m[typename]
if ok {
return v.h, v.pattern
}
// Check for longest valid match.
// mux.es contains all patterns from longest to shortest.
for _, e := range mux.es {
if strings.HasPrefix(typename, e.pattern) {
return e.h, e.pattern
}
}
return nil, ""
}
// Handle registers the handler for the given pattern.
// If a handler already exists for pattern, Handle panics.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
mux.mu.Lock()
defer mux.mu.Unlock()
if strings.TrimSpace(pattern) == "" {
panic("asynq: invalid pattern")
}
if handler == nil {
panic("asynq: nil handler")
}
if _, exist := mux.m[pattern]; exist {
panic("asynq: multiple registrations for " + pattern)
}
if mux.m == nil {
mux.m = make(map[string]muxEntry)
}
e := muxEntry{h: handler, pattern: pattern}
mux.m[pattern] = e
mux.es = appendSorted(mux.es, e)
}
func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
n := len(es)
i := sort.Search(n, func(i int) bool {
return len(es[i].pattern) < len(e.pattern)
})
if i == n {
return append(es, e)
}
// we now know that i points at where we want to insert.
es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
copy(es[i+1:], es[i:]) // shift shorter entries down.
es[i] = e
return es
}
// HandleFunc registers the handler function for the given pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(context.Context, *Task) error) {
if handler == nil {
panic("asynq: nil handler")
}
mux.Handle(pattern, HandlerFunc(handler))
}
// Use appends a MiddlewareFunc to the chain.
// Middlewares are executed in the order that they are applied to the ServeMux.
func (mux *ServeMux) Use(mws ...MiddlewareFunc) {
mux.mu.Lock()
defer mux.mu.Unlock()
for _, fn := range mws {
mux.mws = append(mux.mws, fn)
}
}
// NotFound returns an error indicating that the handler was not found for the given task.
func NotFound(ctx context.Context, task *Task) error {
return fmt.Errorf("handler not found for task %q", task.Type())
}
// NotFoundHandler returns a simple task handler that returns a ``not found`` error.
func NotFoundHandler() Handler { return HandlerFunc(NotFound) }

699
vendor/github.com/hibiken/asynq/server.go generated vendored Normal file
View File

@ -0,0 +1,699 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"context"
"errors"
"fmt"
"math"
"math/rand"
"runtime"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
"github.com/hibiken/asynq/internal/rdb"
)
// Server is responsible for task processing and task lifecycle management.
//
// Server pulls tasks off queues and processes them.
// If the processing of a task is unsuccessful, server will schedule it for a retry.
//
// A task will be retried until either the task gets processed successfully
// or until it reaches its max retry count.
//
// If a task exhausts its retries, it will be moved to the archive and
// will be kept in the archive set.
// Note that the archive size is finite and once it reaches its max size,
// oldest tasks in the archive will be deleted.
type Server struct {
logger *log.Logger
broker base.Broker
state *serverState
// wait group to wait for all goroutines to finish.
wg sync.WaitGroup
forwarder *forwarder
processor *processor
syncer *syncer
heartbeater *heartbeater
subscriber *subscriber
recoverer *recoverer
healthchecker *healthchecker
janitor *janitor
aggregator *aggregator
}
type serverState struct {
mu sync.Mutex
value serverStateValue
}
type serverStateValue int
const (
// StateNew represents a new server. Server begins in
// this state and then transition to StatusActive when
// Start or Run is callled.
srvStateNew serverStateValue = iota
// StateActive indicates the server is up and active.
srvStateActive
// StateStopped indicates the server is up but no longer processing new tasks.
srvStateStopped
// StateClosed indicates the server has been shutdown.
srvStateClosed
)
var serverStates = []string{
"new",
"active",
"stopped",
"closed",
}
func (s serverStateValue) String() string {
if srvStateNew <= s && s <= srvStateClosed {
return serverStates[s]
}
return "unknown status"
}
// Config specifies the server's background-task processing behavior.
type Config struct {
// Maximum number of concurrent processing of tasks.
//
// If set to a zero or negative value, NewServer will overwrite the value
// to the number of CPUs usable by the current process.
Concurrency int
// BaseContext optionally specifies a function that returns the base context for Handler invocations on this server.
//
// If BaseContext is nil, the default is context.Background().
// If this is defined, then it MUST return a non-nil context
BaseContext func() context.Context
// Function to calculate retry delay for a failed task.
//
// By default, it uses exponential backoff algorithm to calculate the delay.
RetryDelayFunc RetryDelayFunc
// Predicate function to determine whether the error returned from Handler is a failure.
// If the function returns false, Server will not increment the retried counter for the task,
// and Server won't record the queue stats (processed and failed stats) to avoid skewing the error
// rate of the queue.
//
// By default, if the given error is non-nil the function returns true.
IsFailure func(error) bool
// List of queues to process with given priority value. Keys are the names of the
// queues and values are associated priority value.
//
// If set to nil or not specified, the server will process only the "default" queue.
//
// Priority is treated as follows to avoid starving low priority queues.
//
// Example:
//
// Queues: map[string]int{
// "critical": 6,
// "default": 3,
// "low": 1,
// }
//
// With the above config and given that all queues are not empty, the tasks
// in "critical", "default", "low" should be processed 60%, 30%, 10% of
// the time respectively.
//
// If a queue has a zero or negative priority value, the queue will be ignored.
Queues map[string]int
// StrictPriority indicates whether the queue priority should be treated strictly.
//
// If set to true, tasks in the queue with the highest priority is processed first.
// The tasks in lower priority queues are processed only when those queues with
// higher priorities are empty.
StrictPriority bool
// ErrorHandler handles errors returned by the task handler.
//
// HandleError is invoked only if the task handler returns a non-nil error.
//
// Example:
//
// func reportError(ctx context, task *asynq.Task, err error) {
// retried, _ := asynq.GetRetryCount(ctx)
// maxRetry, _ := asynq.GetMaxRetry(ctx)
// if retried >= maxRetry {
// err = fmt.Errorf("retry exhausted for task %s: %w", task.Type, err)
// }
// errorReportingService.Notify(err)
// })
//
// ErrorHandler: asynq.ErrorHandlerFunc(reportError)
ErrorHandler ErrorHandler
// Logger specifies the logger used by the server instance.
//
// If unset, default logger is used.
Logger Logger
// LogLevel specifies the minimum log level to enable.
//
// If unset, InfoLevel is used by default.
LogLevel LogLevel
// ShutdownTimeout specifies the duration to wait to let workers finish their tasks
// before forcing them to abort when stopping the server.
//
// If unset or zero, default timeout of 8 seconds is used.
ShutdownTimeout time.Duration
// HealthCheckFunc is called periodically with any errors encountered during ping to the
// connected redis server.
HealthCheckFunc func(error)
// HealthCheckInterval specifies the interval between healthchecks.
//
// If unset or zero, the interval is set to 15 seconds.
HealthCheckInterval time.Duration
// DelayedTaskCheckInterval specifies the interval between checks run on 'scheduled' and 'retry'
// tasks, and forwarding them to 'pending' state if they are ready to be processed.
//
// If unset or zero, the interval is set to 5 seconds.
DelayedTaskCheckInterval time.Duration
// GroupGracePeriod specifies the amount of time the server will wait for an incoming task before aggregating
// the tasks in a group. If an incoming task is received within this period, the server will wait for another
// period of the same length, up to GroupMaxDelay if specified.
//
// If unset or zero, the grace period is set to 1 minute.
// Minimum duration for GroupGracePeriod is 1 second. If value specified is less than a second, the call to
// NewServer will panic.
GroupGracePeriod time.Duration
// GroupMaxDelay specifies the maximum amount of time the server will wait for incoming tasks before aggregating
// the tasks in a group.
//
// If unset or zero, no delay limit is used.
GroupMaxDelay time.Duration
// GroupMaxSize specifies the maximum number of tasks that can be aggregated into a single task within a group.
// If GroupMaxSize is reached, the server will aggregate the tasks into one immediately.
//
// If unset or zero, no size limit is used.
GroupMaxSize int
// GroupAggregator specifies the aggregation function used to aggregate multiple tasks in a group into one task.
//
// If unset or nil, the group aggregation feature will be disabled on the server.
GroupAggregator GroupAggregator
}
// GroupAggregator aggregates a group of tasks into one before the tasks are passed to the Handler.
type GroupAggregator interface {
// Aggregate aggregates the given tasks in a group with the given group name,
// and returns a new task which is the aggregation of those tasks.
//
// Use NewTask(typename, payload, opts...) to set any options for the aggregated task.
// The Queue option, if provided, will be ignored and the aggregated task will always be enqueued
// to the same queue the group belonged.
Aggregate(group string, tasks []*Task) *Task
}
// The GroupAggregatorFunc type is an adapter to allow the use of ordinary functions as a GroupAggregator.
// If f is a function with the appropriate signature, GroupAggregatorFunc(f) is a GroupAggregator that calls f.
type GroupAggregatorFunc func(group string, tasks []*Task) *Task
// Aggregate calls fn(group, tasks)
func (fn GroupAggregatorFunc) Aggregate(group string, tasks []*Task) *Task {
return fn(group, tasks)
}
// An ErrorHandler handles an error occurred during task processing.
type ErrorHandler interface {
HandleError(ctx context.Context, task *Task, err error)
}
// The ErrorHandlerFunc type is an adapter to allow the use of ordinary functions as a ErrorHandler.
// If f is a function with the appropriate signature, ErrorHandlerFunc(f) is a ErrorHandler that calls f.
type ErrorHandlerFunc func(ctx context.Context, task *Task, err error)
// HandleError calls fn(ctx, task, err)
func (fn ErrorHandlerFunc) HandleError(ctx context.Context, task *Task, err error) {
fn(ctx, task, err)
}
// RetryDelayFunc calculates the retry delay duration for a failed task given
// the retry count, error, and the task.
//
// n is the number of times the task has been retried.
// e is the error returned by the task handler.
// t is the task in question.
type RetryDelayFunc func(n int, e error, t *Task) time.Duration
// Logger supports logging at various log levels.
type Logger interface {
// Debug logs a message at Debug level.
Debug(args ...interface{})
// Info logs a message at Info level.
Info(args ...interface{})
// Warn logs a message at Warning level.
Warn(args ...interface{})
// Error logs a message at Error level.
Error(args ...interface{})
// Fatal logs a message at Fatal level
// and process will exit with status set to 1.
Fatal(args ...interface{})
}
// LogLevel represents logging level.
//
// It satisfies flag.Value interface.
type LogLevel int32
const (
// Note: reserving value zero to differentiate unspecified case.
level_unspecified LogLevel = iota
// DebugLevel is the lowest level of logging.
// Debug logs are intended for debugging and development purposes.
DebugLevel
// InfoLevel is used for general informational log messages.
InfoLevel
// WarnLevel is used for undesired but relatively expected events,
// which may indicate a problem.
WarnLevel
// ErrorLevel is used for undesired and unexpected events that
// the program can recover from.
ErrorLevel
// FatalLevel is used for undesired and unexpected events that
// the program cannot recover from.
FatalLevel
)
// String is part of the flag.Value interface.
func (l *LogLevel) String() string {
switch *l {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warn"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
}
panic(fmt.Sprintf("asynq: unexpected log level: %v", *l))
}
// Set is part of the flag.Value interface.
func (l *LogLevel) Set(val string) error {
switch strings.ToLower(val) {
case "debug":
*l = DebugLevel
case "info":
*l = InfoLevel
case "warn", "warning":
*l = WarnLevel
case "error":
*l = ErrorLevel
case "fatal":
*l = FatalLevel
default:
return fmt.Errorf("asynq: unsupported log level %q", val)
}
return nil
}
func toInternalLogLevel(l LogLevel) log.Level {
switch l {
case DebugLevel:
return log.DebugLevel
case InfoLevel:
return log.InfoLevel
case WarnLevel:
return log.WarnLevel
case ErrorLevel:
return log.ErrorLevel
case FatalLevel:
return log.FatalLevel
}
panic(fmt.Sprintf("asynq: unexpected log level: %v", l))
}
// DefaultRetryDelayFunc is the default RetryDelayFunc used if one is not specified in Config.
// It uses exponential back-off strategy to calculate the retry delay.
func DefaultRetryDelayFunc(n int, e error, t *Task) time.Duration {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
// Formula taken from https://github.com/mperham/sidekiq.
s := int(math.Pow(float64(n), 4)) + 15 + (r.Intn(30) * (n + 1))
return time.Duration(s) * time.Second
}
func defaultIsFailureFunc(err error) bool { return err != nil }
var defaultQueueConfig = map[string]int{
base.DefaultQueueName: 1,
}
const (
defaultShutdownTimeout = 8 * time.Second
defaultHealthCheckInterval = 15 * time.Second
defaultDelayedTaskCheckInterval = 5 * time.Second
defaultGroupGracePeriod = 1 * time.Minute
)
// NewServer returns a new Server given a redis connection option
// and server configuration.
func NewServer(r RedisConnOpt, cfg Config) *Server {
c, ok := r.MakeRedisClient().(redis.UniversalClient)
if !ok {
panic(fmt.Sprintf("asynq: unsupported RedisConnOpt type %T", r))
}
baseCtxFn := cfg.BaseContext
if baseCtxFn == nil {
baseCtxFn = context.Background
}
n := cfg.Concurrency
if n < 1 {
n = runtime.NumCPU()
}
delayFunc := cfg.RetryDelayFunc
if delayFunc == nil {
delayFunc = DefaultRetryDelayFunc
}
isFailureFunc := cfg.IsFailure
if isFailureFunc == nil {
isFailureFunc = defaultIsFailureFunc
}
queues := make(map[string]int)
for qname, p := range cfg.Queues {
if err := base.ValidateQueueName(qname); err != nil {
continue // ignore invalid queue names
}
if p > 0 {
queues[qname] = p
}
}
if len(queues) == 0 {
queues = defaultQueueConfig
}
var qnames []string
for q := range queues {
qnames = append(qnames, q)
}
shutdownTimeout := cfg.ShutdownTimeout
if shutdownTimeout == 0 {
shutdownTimeout = defaultShutdownTimeout
}
healthcheckInterval := cfg.HealthCheckInterval
if healthcheckInterval == 0 {
healthcheckInterval = defaultHealthCheckInterval
}
// TODO: Create a helper to check for zero value and fall back to default (e.g. getDurationOrDefault())
groupGracePeriod := cfg.GroupGracePeriod
if groupGracePeriod == 0 {
groupGracePeriod = defaultGroupGracePeriod
}
if groupGracePeriod < time.Second {
panic("GroupGracePeriod cannot be less than a second")
}
logger := log.NewLogger(cfg.Logger)
loglevel := cfg.LogLevel
if loglevel == level_unspecified {
loglevel = InfoLevel
}
logger.SetLevel(toInternalLogLevel(loglevel))
rdb := rdb.NewRDB(c)
starting := make(chan *workerInfo)
finished := make(chan *base.TaskMessage)
syncCh := make(chan *syncRequest)
srvState := &serverState{value: srvStateNew}
cancels := base.NewCancelations()
syncer := newSyncer(syncerParams{
logger: logger,
requestsCh: syncCh,
interval: 5 * time.Second,
})
heartbeater := newHeartbeater(heartbeaterParams{
logger: logger,
broker: rdb,
interval: 5 * time.Second,
concurrency: n,
queues: queues,
strictPriority: cfg.StrictPriority,
state: srvState,
starting: starting,
finished: finished,
})
delayedTaskCheckInterval := cfg.DelayedTaskCheckInterval
if delayedTaskCheckInterval == 0 {
delayedTaskCheckInterval = defaultDelayedTaskCheckInterval
}
forwarder := newForwarder(forwarderParams{
logger: logger,
broker: rdb,
queues: qnames,
interval: delayedTaskCheckInterval,
})
subscriber := newSubscriber(subscriberParams{
logger: logger,
broker: rdb,
cancelations: cancels,
})
processor := newProcessor(processorParams{
logger: logger,
broker: rdb,
retryDelayFunc: delayFunc,
baseCtxFn: baseCtxFn,
isFailureFunc: isFailureFunc,
syncCh: syncCh,
cancelations: cancels,
concurrency: n,
queues: queues,
strictPriority: cfg.StrictPriority,
errHandler: cfg.ErrorHandler,
shutdownTimeout: shutdownTimeout,
starting: starting,
finished: finished,
})
recoverer := newRecoverer(recovererParams{
logger: logger,
broker: rdb,
retryDelayFunc: delayFunc,
isFailureFunc: isFailureFunc,
queues: qnames,
interval: 1 * time.Minute,
})
healthchecker := newHealthChecker(healthcheckerParams{
logger: logger,
broker: rdb,
interval: healthcheckInterval,
healthcheckFunc: cfg.HealthCheckFunc,
})
janitor := newJanitor(janitorParams{
logger: logger,
broker: rdb,
queues: qnames,
interval: 8 * time.Second,
})
aggregator := newAggregator(aggregatorParams{
logger: logger,
broker: rdb,
queues: qnames,
gracePeriod: groupGracePeriod,
maxDelay: cfg.GroupMaxDelay,
maxSize: cfg.GroupMaxSize,
groupAggregator: cfg.GroupAggregator,
})
return &Server{
logger: logger,
broker: rdb,
state: srvState,
forwarder: forwarder,
processor: processor,
syncer: syncer,
heartbeater: heartbeater,
subscriber: subscriber,
recoverer: recoverer,
healthchecker: healthchecker,
janitor: janitor,
aggregator: aggregator,
}
}
// A Handler processes tasks.
//
// ProcessTask should return nil if the processing of a task
// is successful.
//
// If ProcessTask returns a non-nil error or panics, the task
// will be retried after delay if retry-count is remaining,
// otherwise the task will be archived.
//
// One exception to this rule is when ProcessTask returns a SkipRetry error.
// If the returned error is SkipRetry or an error wraps SkipRetry, retry is
// skipped and the task will be immediately archived instead.
type Handler interface {
ProcessTask(context.Context, *Task) error
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as a Handler. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler that calls f.
type HandlerFunc func(context.Context, *Task) error
// ProcessTask calls fn(ctx, task)
func (fn HandlerFunc) ProcessTask(ctx context.Context, task *Task) error {
return fn(ctx, task)
}
// ErrServerClosed indicates that the operation is now illegal because of the server has been shutdown.
var ErrServerClosed = errors.New("asynq: Server closed")
// Run starts the task processing and blocks until
// an os signal to exit the program is received. Once it receives
// a signal, it gracefully shuts down all active workers and other
// goroutines to process the tasks.
//
// Run returns any error encountered at server startup time.
// If the server has already been shutdown, ErrServerClosed is returned.
func (srv *Server) Run(handler Handler) error {
if err := srv.Start(handler); err != nil {
return err
}
srv.waitForSignals()
srv.Shutdown()
return nil
}
// Start starts the worker server. Once the server has started,
// it pulls tasks off queues and starts a worker goroutine for each task
// and then call Handler to process it.
// Tasks are processed concurrently by the workers up to the number of
// concurrency specified in Config.Concurrency.
//
// Start returns any error encountered at server startup time.
// If the server has already been shutdown, ErrServerClosed is returned.
func (srv *Server) Start(handler Handler) error {
if handler == nil {
return fmt.Errorf("asynq: server cannot run with nil handler")
}
srv.processor.handler = handler
if err := srv.start(); err != nil {
return err
}
srv.logger.Info("Starting processing")
srv.heartbeater.start(&srv.wg)
srv.healthchecker.start(&srv.wg)
srv.subscriber.start(&srv.wg)
srv.syncer.start(&srv.wg)
srv.recoverer.start(&srv.wg)
srv.forwarder.start(&srv.wg)
srv.processor.start(&srv.wg)
srv.janitor.start(&srv.wg)
srv.aggregator.start(&srv.wg)
return nil
}
// Checks server state and returns an error if pre-condition is not met.
// Otherwise it sets the server state to active.
func (srv *Server) start() error {
srv.state.mu.Lock()
defer srv.state.mu.Unlock()
switch srv.state.value {
case srvStateActive:
return fmt.Errorf("asynq: the server is already running")
case srvStateStopped:
return fmt.Errorf("asynq: the server is in the stopped state. Waiting for shutdown.")
case srvStateClosed:
return ErrServerClosed
}
srv.state.value = srvStateActive
return nil
}
// Shutdown gracefully shuts down the server.
// It gracefully closes all active workers. The server will wait for
// active workers to finish processing tasks for duration specified in Config.ShutdownTimeout.
// If worker didn't finish processing a task during the timeout, the task will be pushed back to Redis.
func (srv *Server) Shutdown() {
srv.state.mu.Lock()
if srv.state.value == srvStateNew || srv.state.value == srvStateClosed {
srv.state.mu.Unlock()
// server is not running, do nothing and return.
return
}
srv.state.value = srvStateClosed
srv.state.mu.Unlock()
srv.logger.Info("Starting graceful shutdown")
// Note: The order of shutdown is important.
// Sender goroutines should be terminated before the receiver goroutines.
// processor -> syncer (via syncCh)
// processor -> heartbeater (via starting, finished channels)
srv.forwarder.shutdown()
srv.processor.shutdown()
srv.recoverer.shutdown()
srv.syncer.shutdown()
srv.subscriber.shutdown()
srv.janitor.shutdown()
srv.aggregator.shutdown()
srv.healthchecker.shutdown()
srv.heartbeater.shutdown()
srv.wg.Wait()
srv.broker.Close()
srv.logger.Info("Exiting")
}
// Stop signals the server to stop pulling new tasks off queues.
// Stop can be used before shutting down the server to ensure that all
// currently active tasks are processed before server shutdown.
//
// Stop does not shutdown the server, make sure to call Shutdown before exit.
func (srv *Server) Stop() {
srv.state.mu.Lock()
if srv.state.value != srvStateActive {
// Invalid calll to Stop, server can only go from Active state to Stopped state.
srv.state.mu.Unlock()
return
}
srv.state.value = srvStateStopped
srv.state.mu.Unlock()
srv.logger.Info("Stopping processor")
srv.processor.stop()
srv.logger.Info("Processor stopped")
}

37
vendor/github.com/hibiken/asynq/signals_unix.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// +build linux bsd darwin
package asynq
import (
"os"
"os/signal"
"golang.org/x/sys/unix"
)
// waitForSignals waits for signals and handles them.
// It handles SIGTERM, SIGINT, and SIGTSTP.
// SIGTERM and SIGINT will signal the process to exit.
// SIGTSTP will signal the process to stop processing new tasks.
func (srv *Server) waitForSignals() {
srv.logger.Info("Send signal TSTP to stop processing new tasks")
srv.logger.Info("Send signal TERM or INT to terminate the process")
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT, unix.SIGTSTP)
for {
sig := <-sigs
if sig == unix.SIGTSTP {
srv.Stop()
continue
}
break
}
}
func (s *Scheduler) waitForSignals() {
s.logger.Info("Send signal TERM or INT to stop the scheduler")
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, unix.SIGTERM, unix.SIGINT)
<-sigs
}

29
vendor/github.com/hibiken/asynq/signals_windows.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// +build windows
package asynq
import (
"os"
"os/signal"
"golang.org/x/sys/windows"
)
// waitForSignals waits for signals and handles them.
// It handles SIGTERM and SIGINT.
// SIGTERM and SIGINT will signal the process to exit.
//
// Note: Currently SIGTSTP is not supported for windows build.
func (srv *Server) waitForSignals() {
srv.logger.Info("Send signal TERM or INT to terminate the process")
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
<-sigs
}
func (s *Scheduler) waitForSignals() {
s.logger.Info("Send signal TERM or INT to stop the scheduler")
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, windows.SIGTERM, windows.SIGINT)
<-sigs
}

90
vendor/github.com/hibiken/asynq/subscriber.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/hibiken/asynq/internal/base"
"github.com/hibiken/asynq/internal/log"
)
type subscriber struct {
logger *log.Logger
broker base.Broker
// channel to communicate back to the long running "subscriber" goroutine.
done chan struct{}
// cancelations hold cancel functions for all active tasks.
cancelations *base.Cancelations
// time to wait before retrying to connect to redis.
retryTimeout time.Duration
}
type subscriberParams struct {
logger *log.Logger
broker base.Broker
cancelations *base.Cancelations
}
func newSubscriber(params subscriberParams) *subscriber {
return &subscriber{
logger: params.logger,
broker: params.broker,
done: make(chan struct{}),
cancelations: params.cancelations,
retryTimeout: 5 * time.Second,
}
}
func (s *subscriber) shutdown() {
s.logger.Debug("Subscriber shutting down...")
// Signal the subscriber goroutine to stop.
s.done <- struct{}{}
}
func (s *subscriber) start(wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
var (
pubsub *redis.PubSub
err error
)
// Try until successfully connect to Redis.
for {
pubsub, err = s.broker.CancelationPubSub()
if err != nil {
s.logger.Errorf("cannot subscribe to cancelation channel: %v", err)
select {
case <-time.After(s.retryTimeout):
continue
case <-s.done:
s.logger.Debug("Subscriber done")
return
}
}
break
}
cancelCh := pubsub.Channel()
for {
select {
case <-s.done:
pubsub.Close()
s.logger.Debug("Subscriber done")
return
case msg := <-cancelCh:
cancel, ok := s.cancelations.Get(msg.Payload)
if ok {
cancel()
}
}
}
}()
}

87
vendor/github.com/hibiken/asynq/syncer.go generated vendored Normal file
View File

@ -0,0 +1,87 @@
// Copyright 2020 Kentaro Hibino. All rights reserved.
// Use of this source code is governed by a MIT license
// that can be found in the LICENSE file.
package asynq
import (
"sync"
"time"
"github.com/hibiken/asynq/internal/log"
)
// syncer is responsible for queuing up failed requests to redis and retry
// those requests to sync state between the background process and redis.
type syncer struct {
logger *log.Logger
requestsCh <-chan *syncRequest
// channel to communicate back to the long running "syncer" goroutine.
done chan struct{}
// interval between sync operations.
interval time.Duration
}
type syncRequest struct {
fn func() error // sync operation
errMsg string // error message
deadline time.Time // request should be dropped if deadline has been exceeded
}
type syncerParams struct {
logger *log.Logger
requestsCh <-chan *syncRequest
interval time.Duration
}
func newSyncer(params syncerParams) *syncer {
return &syncer{
logger: params.logger,
requestsCh: params.requestsCh,
done: make(chan struct{}),
interval: params.interval,
}
}
func (s *syncer) shutdown() {
s.logger.Debug("Syncer shutting down...")
// Signal the syncer goroutine to stop.
s.done <- struct{}{}
}
func (s *syncer) start(wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
var requests []*syncRequest
for {
select {
case <-s.done:
// Try sync one last time before shutting down.
for _, req := range requests {
if err := req.fn(); err != nil {
s.logger.Error(req.errMsg)
}
}
s.logger.Debug("Syncer done")
return
case req := <-s.requestsCh:
requests = append(requests, req)
case <-time.After(s.interval):
var temp []*syncRequest
for _, req := range requests {
if req.deadline.Before(time.Now()) {
continue // drop stale request
}
if err := req.fn(); err != nil {
temp = append(temp, req)
}
}
requests = temp
}
}
}()
}

3
vendor/golang.org/x/time/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/time/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/time/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/time/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

419
vendor/golang.org/x/time/rate/rate.go generated vendored Normal file
View File

@ -0,0 +1,419 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package rate provides a rate limiter.
package rate
import (
"context"
"fmt"
"math"
"sync"
"time"
)
// Limit defines the maximum frequency of some events.
// Limit is represented as number of events per second.
// A zero Limit allows no events.
type Limit float64
// Inf is the infinite rate limit; it allows all events (even if burst is zero).
const Inf = Limit(math.MaxFloat64)
// Every converts a minimum time interval between events to a Limit.
func Every(interval time.Duration) Limit {
if interval <= 0 {
return Inf
}
return 1 / Limit(interval.Seconds())
}
// A Limiter controls how frequently events are allowed to happen.
// It implements a "token bucket" of size b, initially full and refilled
// at rate r tokens per second.
// Informally, in any large enough time interval, the Limiter limits the
// rate to r tokens per second, with a maximum burst size of b events.
// As a special case, if r == Inf (the infinite rate), b is ignored.
// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets.
//
// The zero value is a valid Limiter, but it will reject all events.
// Use NewLimiter to create non-zero Limiters.
//
// Limiter has three main methods, Allow, Reserve, and Wait.
// Most callers should use Wait.
//
// Each of the three methods consumes a single token.
// They differ in their behavior when no token is available.
// If no token is available, Allow returns false.
// If no token is available, Reserve returns a reservation for a future token
// and the amount of time the caller must wait before using it.
// If no token is available, Wait blocks until one can be obtained
// or its associated context.Context is canceled.
//
// The methods AllowN, ReserveN, and WaitN consume n tokens.
type Limiter struct {
mu sync.Mutex
limit Limit
burst int
tokens float64
// last is the last time the limiter's tokens field was updated
last time.Time
// lastEvent is the latest time of a rate-limited event (past or future)
lastEvent time.Time
}
// Limit returns the maximum overall event rate.
func (lim *Limiter) Limit() Limit {
lim.mu.Lock()
defer lim.mu.Unlock()
return lim.limit
}
// Burst returns the maximum burst size. Burst is the maximum number of tokens
// that can be consumed in a single call to Allow, Reserve, or Wait, so higher
// Burst values allow more events to happen at once.
// A zero Burst allows no events, unless limit == Inf.
func (lim *Limiter) Burst() int {
lim.mu.Lock()
defer lim.mu.Unlock()
return lim.burst
}
// NewLimiter returns a new Limiter that allows events up to rate r and permits
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
limit: r,
burst: b,
}
}
// Allow is shorthand for AllowN(time.Now(), 1).
func (lim *Limiter) Allow() bool {
return lim.AllowN(time.Now(), 1)
}
// AllowN reports whether n events may happen at time now.
// Use this method if you intend to drop / skip events that exceed the rate limit.
// Otherwise use Reserve or Wait.
func (lim *Limiter) AllowN(now time.Time, n int) bool {
return lim.reserveN(now, n, 0).ok
}
// A Reservation holds information about events that are permitted by a Limiter to happen after a delay.
// A Reservation may be canceled, which may enable the Limiter to permit additional events.
type Reservation struct {
ok bool
lim *Limiter
tokens int
timeToAct time.Time
// This is the Limit at reservation time, it can change later.
limit Limit
}
// OK returns whether the limiter can provide the requested number of tokens
// within the maximum wait time. If OK is false, Delay returns InfDuration, and
// Cancel does nothing.
func (r *Reservation) OK() bool {
return r.ok
}
// Delay is shorthand for DelayFrom(time.Now()).
func (r *Reservation) Delay() time.Duration {
return r.DelayFrom(time.Now())
}
// InfDuration is the duration returned by Delay when a Reservation is not OK.
const InfDuration = time.Duration(1<<63 - 1)
// DelayFrom returns the duration for which the reservation holder must wait
// before taking the reserved action. Zero duration means act immediately.
// InfDuration means the limiter cannot grant the tokens requested in this
// Reservation within the maximum wait time.
func (r *Reservation) DelayFrom(now time.Time) time.Duration {
if !r.ok {
return InfDuration
}
delay := r.timeToAct.Sub(now)
if delay < 0 {
return 0
}
return delay
}
// Cancel is shorthand for CancelAt(time.Now()).
func (r *Reservation) Cancel() {
r.CancelAt(time.Now())
}
// CancelAt indicates that the reservation holder will not perform the reserved action
// and reverses the effects of this Reservation on the rate limit as much as possible,
// considering that other reservations may have already been made.
func (r *Reservation) CancelAt(now time.Time) {
if !r.ok {
return
}
r.lim.mu.Lock()
defer r.lim.mu.Unlock()
if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) {
return
}
// calculate tokens to restore
// The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved
// after r was obtained. These tokens should not be restored.
restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct))
if restoreTokens <= 0 {
return
}
// advance time to now
now, _, tokens := r.lim.advance(now)
// calculate new number of tokens
tokens += restoreTokens
if burst := float64(r.lim.burst); tokens > burst {
tokens = burst
}
// update state
r.lim.last = now
r.lim.tokens = tokens
if r.timeToAct == r.lim.lastEvent {
prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens)))
if !prevEvent.Before(now) {
r.lim.lastEvent = prevEvent
}
}
}
// Reserve is shorthand for ReserveN(time.Now(), 1).
func (lim *Limiter) Reserve() *Reservation {
return lim.ReserveN(time.Now(), 1)
}
// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen.
// The Limiter takes this Reservation into account when allowing future events.
// The returned Reservations OK() method returns false if n exceeds the Limiter's burst size.
// Usage example:
//
// r := lim.ReserveN(time.Now(), 1)
// if !r.OK() {
// // Not allowed to act! Did you remember to set lim.burst to be > 0 ?
// return
// }
// time.Sleep(r.Delay())
// Act()
//
// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events.
// If you need to respect a deadline or cancel the delay, use Wait instead.
// To drop or skip events exceeding rate limit, use Allow instead.
func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation {
r := lim.reserveN(now, n, InfDuration)
return &r
}
// Wait is shorthand for WaitN(ctx, 1).
func (lim *Limiter) Wait(ctx context.Context) (err error) {
return lim.WaitN(ctx, 1)
}
// WaitN blocks until lim permits n events to happen.
// It returns an error if n exceeds the Limiter's burst size, the Context is
// canceled, or the expected wait time exceeds the Context's Deadline.
// The burst limit is ignored if the rate limit is Inf.
func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
// The test code calls lim.wait with a fake timer generator.
// This is the real timer generator.
newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) {
timer := time.NewTimer(d)
return timer.C, timer.Stop, func() {}
}
return lim.wait(ctx, n, time.Now(), newTimer)
}
// wait is the internal implementation of WaitN.
func (lim *Limiter) wait(ctx context.Context, n int, now time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error {
lim.mu.Lock()
burst := lim.burst
limit := lim.limit
lim.mu.Unlock()
if n > burst && limit != Inf {
return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst)
}
// Check if ctx is already cancelled
select {
case <-ctx.Done():
return ctx.Err()
default:
}
// Determine wait limit
waitLimit := InfDuration
if deadline, ok := ctx.Deadline(); ok {
waitLimit = deadline.Sub(now)
}
// Reserve
r := lim.reserveN(now, n, waitLimit)
if !r.ok {
return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n)
}
// Wait if necessary
delay := r.DelayFrom(now)
if delay == 0 {
return nil
}
ch, stop, advance := newTimer(delay)
defer stop()
advance() // only has an effect when testing
select {
case <-ch:
// We can proceed.
return nil
case <-ctx.Done():
// Context was canceled before we could proceed. Cancel the
// reservation, which may permit other events to proceed sooner.
r.Cancel()
return ctx.Err()
}
}
// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit).
func (lim *Limiter) SetLimit(newLimit Limit) {
lim.SetLimitAt(time.Now(), newLimit)
}
// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated
// or underutilized by those which reserved (using Reserve or Wait) but did not yet act
// before SetLimitAt was called.
func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) {
lim.mu.Lock()
defer lim.mu.Unlock()
now, _, tokens := lim.advance(now)
lim.last = now
lim.tokens = tokens
lim.limit = newLimit
}
// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst).
func (lim *Limiter) SetBurst(newBurst int) {
lim.SetBurstAt(time.Now(), newBurst)
}
// SetBurstAt sets a new burst size for the limiter.
func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) {
lim.mu.Lock()
defer lim.mu.Unlock()
now, _, tokens := lim.advance(now)
lim.last = now
lim.tokens = tokens
lim.burst = newBurst
}
// reserveN is a helper method for AllowN, ReserveN, and WaitN.
// maxFutureReserve specifies the maximum reservation wait duration allowed.
// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN.
func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation {
lim.mu.Lock()
defer lim.mu.Unlock()
if lim.limit == Inf {
return Reservation{
ok: true,
lim: lim,
tokens: n,
timeToAct: now,
}
} else if lim.limit == 0 {
var ok bool
if lim.burst >= n {
ok = true
lim.burst -= n
}
return Reservation{
ok: ok,
lim: lim,
tokens: lim.burst,
timeToAct: now,
}
}
now, last, tokens := lim.advance(now)
// Calculate the remaining number of tokens resulting from the request.
tokens -= float64(n)
// Calculate the wait duration
var waitDuration time.Duration
if tokens < 0 {
waitDuration = lim.limit.durationFromTokens(-tokens)
}
// Decide result
ok := n <= lim.burst && waitDuration <= maxFutureReserve
// Prepare reservation
r := Reservation{
ok: ok,
lim: lim,
limit: lim.limit,
}
if ok {
r.tokens = n
r.timeToAct = now.Add(waitDuration)
}
// Update state
if ok {
lim.last = now
lim.tokens = tokens
lim.lastEvent = r.timeToAct
} else {
lim.last = last
}
return r
}
// advance calculates and returns an updated state for lim resulting from the passage of time.
// lim is not changed.
// advance requires that lim.mu is held.
func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) {
last := lim.last
if now.Before(last) {
last = now
}
// Calculate the new number of tokens, due to time that passed.
elapsed := now.Sub(last)
delta := lim.limit.tokensFromDuration(elapsed)
tokens := lim.tokens + delta
if burst := float64(lim.burst); tokens > burst {
tokens = burst
}
return now, last, tokens
}
// durationFromTokens is a unit conversion function from the number of tokens to the duration
// of time it takes to accumulate them at a rate of limit tokens per second.
func (limit Limit) durationFromTokens(tokens float64) time.Duration {
if limit <= 0 {
return InfDuration
}
seconds := tokens / float64(limit)
return time.Duration(float64(time.Second) * seconds)
}
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
// which could be accumulated during that duration at a rate of limit tokens per second.
func (limit Limit) tokensFromDuration(d time.Duration) float64 {
if limit <= 0 {
return 0
}
return d.Seconds() * float64(limit)
}

13
vendor/modules.txt vendored
View File

@ -310,6 +310,16 @@ github.com/hashicorp/hcl/hcl/token
github.com/hashicorp/hcl/json/parser
github.com/hashicorp/hcl/json/scanner
github.com/hashicorp/hcl/json/token
# github.com/hibiken/asynq v0.24.0
## explicit; go 1.14
github.com/hibiken/asynq
github.com/hibiken/asynq/internal/base
github.com/hibiken/asynq/internal/context
github.com/hibiken/asynq/internal/errors
github.com/hibiken/asynq/internal/log
github.com/hibiken/asynq/internal/proto
github.com/hibiken/asynq/internal/rdb
github.com/hibiken/asynq/internal/timeutil
# github.com/inconshreveable/mousetrap v1.0.1
## explicit; go 1.18
github.com/inconshreveable/mousetrap
@ -676,6 +686,9 @@ golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
# golang.org/x/time v0.0.0-20220609170525-579cf78fd858
## explicit
golang.org/x/time/rate
# golang.org/x/tools v0.3.0
## explicit; go 1.18
golang.org/x/tools/go/gcexportdata