-
Notifications
You must be signed in to change notification settings - Fork 0
/
asynq.go
220 lines (197 loc) · 5.7 KB
/
asynq.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
package queue
import (
"bytes"
"context"
"fmt"
"log"
"os"
"strings"
"sync"
"time"
"github.com/hibiken/asynq"
"github.com/voidshard/igor/pkg/database"
"github.com/voidshard/igor/pkg/structs"
)
const (
asyncWorkQueue = "igor:work"
asyncAggMaxSize = 1000
asyncAggMaxDelay = 2 * time.Second
asyncAggRune = "¬"
asyncEnvVarUser = "ASYNQ_REDIS_USER"
asyncEnvVarPassword = "ASYNQ_REDIS_PASSWORD"
)
// Asynq is a Queue implementation that uses asynq.
// See github.com/hibiken/asynq
type Asynq struct {
opts *Options
// the asynq client & inspector
ins *asynq.Inspector
cli *asynq.Client
// the funcs we're allowed to call inside of igor
svc database.QueueDB
// if register is called we're intended to start a server
lock sync.Mutex
mux *asynq.ServeMux
srv *asynq.Server
// errs is a channel of errors that we'll send to
// Currenty we just log these, probably we should support some output / callback.
errs chan error
}
// NewAsynqQueue returns a new Asynq queue with the given settings
func NewAsynqQueue(svc database.QueueDB, opts *Options) (*Asynq, error) {
redisOpts := asynq.RedisClientOpt{
Addr: opts.URL,
Username: os.Getenv(asyncEnvVarUser),
Password: os.Getenv(asyncEnvVarPassword),
TLSConfig: opts.TLSConfig,
}
ins := asynq.NewInspector(redisOpts)
cli := asynq.NewClient(redisOpts)
return &Asynq{
opts: opts,
ins: ins,
cli: cli,
svc: svc,
}, nil
}
// Close shuts down the queue
func (a *Asynq) Close() error {
if a.srv == nil {
return nil
}
a.srv.Stop()
a.srv.Shutdown()
close(a.errs)
return nil
}
// Run starts the queue, this is only required for workers (ie. handlers)
func (a *Asynq) Run() error {
if a.srv == nil {
a.buildServer()
}
return a.srv.Run(a.mux)
}
// Register a handler for a task type, this allows us to process tasks (indicated by their Type).
//
// Note that we pass here a []*Meta, this is a list of Task wrappers.
// A user may optionally call SetError(), SetComplete() or SetRunning() per Meta (task) in the list
// to have this value reflected in the database.
//
// If a user does *not* call one of these, Igor implicitly calls SetComplete() for them at the conclusion
// of the handler.
//
// Note that returning an error from this handler does not imply SetError() is called, as it is assumed
// that already completed work is still valid.
func (a *Asynq) Register(task string, handler func(work []*Meta) error) error {
if a.mux == nil {
a.buildServer()
}
a.mux.HandleFunc(aggregatedTask(task), func(ctx context.Context, t *asynq.Task) error {
meta, err := a.deaggregateTasks(t)
if err != nil {
return err
}
err = handler(meta)
if err != nil {
return err
}
for _, m := range meta {
if m.Task.Status == structs.QUEUED || m.Task.Status == structs.RUNNING {
m.SetComplete()
}
}
return nil
})
return nil
}
// Kill calls the our underlying queue to cancel a task by it's ID (given to us when we Enqueue() it).
func (a *Asynq) Kill(queuedTaskID string) error {
// Best effort cancel; asynq can't guarantee this will kill it
return a.ins.CancelProcessing(queuedTaskID)
}
// deaggregateTasks takes a task that has been aggregated and returns a list of tasks.
func (a *Asynq) deaggregateTasks(t *asynq.Task) ([]*Meta, error) {
// parse into IDs (the payloads)
taskIDs := []string{}
for _, load := range bytes.Split(t.Payload(), []byte(asyncAggRune)) {
id := bytes.TrimSpace(load)
if len(load) == 0 {
continue
}
taskIDs = append(taskIDs, string(id))
}
// read out the tasks
tasks, err := a.svc.Tasks(taskIDs)
if err != nil {
return nil, err
}
// build in "metas" (sugar for callers)
ms := []*Meta{}
for _, t := range tasks {
ms = append(ms, &Meta{Task: t, svc: a.svc, errs: a.errs})
}
return ms, nil
}
// Enqueue a task to be processed by a worker.
func (a *Asynq) Enqueue(task *structs.Task) (string, error) {
qtask := asynq.NewTask(task.Type, []byte(task.ID), asynq.MaxRetry(int(task.Retries)))
info, err := a.cli.Enqueue(qtask, asynq.Queue(asyncWorkQueue), asynq.Group(aggregatedTask(task.Type)))
if err != nil {
return "", err
}
return info.ID, nil
}
// buildServer creates a new server & mux for us to use.
//
// We set this up with a simple aggregator that just concatenates task IDs together.
// We only need to pass the task ID, since Igor stores the rest of the data in the database (which we can
// look up by ID in batches pretty efficiently).
func (a *Asynq) buildServer() {
a.lock.Lock()
defer a.lock.Unlock()
if a.mux != nil {
// someone locked and set this first
return
}
srv := asynq.NewServer(
asynq.RedisClientOpt{Addr: a.opts.URL},
asynq.Config{
Queues: map[string]int{asyncWorkQueue: 1},
GroupAggregator: asynq.GroupAggregatorFunc(aggregate),
GroupMaxSize: asyncAggMaxSize,
GroupMaxDelay: asyncAggMaxDelay,
},
)
mux := asynq.NewServeMux()
a.srv = srv
a.mux = mux
a.errs = make(chan error)
go func() {
for err := range a.errs {
if err != nil {
log.Println("asynq error:", err)
}
}
}()
}
// aggregate takes multiple tasks from Asynq and aggregates them into a single task.
//
// We do this by concatenating the task IDs together, separated by a rune.
// This is what deaggregateTasks() does in reverse.
func aggregate(group string, tasks []*asynq.Task) *asynq.Task {
var b strings.Builder
for _, t := range tasks {
if t == nil || t.Payload() == nil {
continue
}
b.Write(t.Payload())
b.WriteString(asyncAggRune)
}
return asynq.NewTask(group, []byte(b.String()))
}
// aggregatedTask returns the name of the aggregated task for a given group.
//
// Internally we set this in Asynq as the handler for a group so we can do automatic aggregation.
func aggregatedTask(group string) string {
return fmt.Sprintf("aggregated%s", group)
}