-
Notifications
You must be signed in to change notification settings - Fork 3.5k
/
worker.go
284 lines (229 loc) · 9.65 KB
/
worker.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
package worker
import (
"context"
"flag"
"os"
"sync"
"time"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/grpcclient"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/grafana/dskit/services"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/weaveworks/common/httpgrpc"
"google.golang.org/grpc"
lokiutil "github.com/grafana/loki/pkg/util"
)
type Config struct {
FrontendAddress string `yaml:"frontend_address"`
SchedulerAddress string `yaml:"scheduler_address"`
DNSLookupPeriod time.Duration `yaml:"dns_lookup_duration"`
Parallelism int `yaml:"parallelism"`
MatchMaxConcurrency bool `yaml:"match_max_concurrent"`
MaxConcurrentRequests int `yaml:"-"` // Must be same as passed to PromQL Engine.
QuerierID string `yaml:"id"`
GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"`
}
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&cfg.SchedulerAddress, "querier.scheduler-address", "", "Hostname (and port) of scheduler that querier will periodically resolve, connect to and receive queries from. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.")
f.StringVar(&cfg.FrontendAddress, "querier.frontend-address", "", "Address of query frontend service, in host:port format. If -querier.scheduler-address is set as well, querier will use scheduler instead. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.")
f.DurationVar(&cfg.DNSLookupPeriod, "querier.dns-lookup-period", 3*time.Second, "How often to query DNS for query-frontend or query-scheduler address. Also used to determine how often to poll the scheduler-ring for addresses if the scheduler-ring is configured.")
f.IntVar(&cfg.Parallelism, "querier.worker-parallelism", 10, "Number of simultaneous queries to process per query-frontend or query-scheduler.")
f.BoolVar(&cfg.MatchMaxConcurrency, "querier.worker-match-max-concurrent", false, "Force worker concurrency to match the -querier.max-concurrent option. Overrides querier.worker-parallelism.")
f.StringVar(&cfg.QuerierID, "querier.id", "", "Querier ID, sent to frontend service to identify requests from the same querier. Defaults to hostname.")
cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", f)
}
func (cfg *Config) Validate(log log.Logger) error {
if cfg.FrontendAddress != "" && cfg.SchedulerAddress != "" {
return errors.New("frontend address and scheduler address are mutually exclusive, please use only one")
}
return cfg.GRPCClientConfig.Validate(log)
}
// Handler for HTTP requests wrapped in protobuf messages.
type RequestHandler interface {
Handle(context.Context, *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error)
}
// Single processor handles all streaming operations to query-frontend or query-scheduler to fetch queries
// and process them.
type processor interface {
// Each invocation of processQueriesOnSingleStream starts new streaming operation to query-frontend
// or query-scheduler to fetch queries and execute them.
//
// This method must react on context being finished, and stop when that happens.
//
// processorManager (not processor) is responsible for starting as many goroutines as needed for each connection.
processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string)
// notifyShutdown notifies the remote query-frontend or query-scheduler that the querier is
// shutting down.
notifyShutdown(ctx context.Context, conn *grpc.ClientConn, address string)
}
type querierWorker struct {
*services.BasicService
cfg Config
logger log.Logger
processor processor
subservices *services.Manager
mu sync.Mutex
// Set to nil when stop is called... no more managers are created afterwards.
managers map[string]*processorManager
}
func NewQuerierWorker(cfg Config, rng ring.ReadRing, handler RequestHandler, logger log.Logger, reg prometheus.Registerer) (services.Service, error) {
if cfg.QuerierID == "" {
hostname, err := os.Hostname()
if err != nil {
return nil, errors.Wrap(err, "failed to get hostname for configuring querier ID")
}
cfg.QuerierID = hostname
}
var processor processor
var servs []services.Service
var address string
switch {
case rng != nil:
level.Info(logger).Log("msg", "Starting querier worker using query-scheduler and scheduler ring for addresses")
processor, servs = newSchedulerProcessor(cfg, handler, logger, reg)
case cfg.SchedulerAddress != "":
level.Info(logger).Log("msg", "Starting querier worker connected to query-scheduler", "scheduler", cfg.SchedulerAddress)
address = cfg.SchedulerAddress
processor, servs = newSchedulerProcessor(cfg, handler, logger, reg)
case cfg.FrontendAddress != "":
level.Info(logger).Log("msg", "Starting querier worker connected to query-frontend", "frontend", cfg.FrontendAddress)
address = cfg.FrontendAddress
processor = newFrontendProcessor(cfg, handler, logger)
default:
return nil, errors.New("unable to start the querier worker, need to configure one of frontend_address, scheduler_address, or a ring config in the query_scheduler config block")
}
return newQuerierWorkerWithProcessor(cfg, logger, processor, address, rng, servs)
}
func newQuerierWorkerWithProcessor(cfg Config, logger log.Logger, processor processor, address string, ring ring.ReadRing, servs []services.Service) (*querierWorker, error) {
f := &querierWorker{
cfg: cfg,
logger: logger,
managers: map[string]*processorManager{},
processor: processor,
}
// Empty address is only used in tests, where individual targets are added manually.
if address != "" {
w, err := util.NewDNSWatcher(address, cfg.DNSLookupPeriod, f)
if err != nil {
return nil, err
}
servs = append(servs, w)
}
if ring != nil {
w, err := lokiutil.NewRingWatcher(log.With(logger, "component", "querier-scheduler-worker"), ring, cfg.DNSLookupPeriod, f)
if err != nil {
return nil, err
}
servs = append(servs, w)
}
if len(servs) > 0 {
subservices, err := services.NewManager(servs...)
if err != nil {
return nil, errors.Wrap(err, "querier worker subservices")
}
f.subservices = subservices
}
f.BasicService = services.NewIdleService(f.starting, f.stopping)
return f, nil
}
func (w *querierWorker) starting(ctx context.Context) error {
if w.subservices == nil {
return nil
}
return services.StartManagerAndAwaitHealthy(ctx, w.subservices)
}
func (w *querierWorker) stopping(_ error) error {
// Stop all goroutines fetching queries. Note that in Stopping state,
// worker no longer creates new managers in AddressAdded method.
w.mu.Lock()
for _, m := range w.managers {
m.stop()
}
w.mu.Unlock()
if w.subservices == nil {
return nil
}
// Stop DNS watcher and services used by processor.
return services.StopManagerAndAwaitStopped(context.Background(), w.subservices)
}
func (w *querierWorker) AddressAdded(address string) {
ctx := w.ServiceContext()
if ctx == nil || ctx.Err() != nil {
return
}
w.mu.Lock()
defer w.mu.Unlock()
if m := w.managers[address]; m != nil {
return
}
level.Info(w.logger).Log("msg", "adding connection", "addr", address)
conn, err := w.connect(context.Background(), address)
if err != nil {
level.Error(w.logger).Log("msg", "error connecting", "addr", address, "err", err)
return
}
w.managers[address] = newProcessorManager(ctx, w.processor, conn, address)
// Called with lock.
w.resetConcurrency()
}
func (w *querierWorker) AddressRemoved(address string) {
level.Info(w.logger).Log("msg", "removing connection", "addr", address)
w.mu.Lock()
p := w.managers[address]
delete(w.managers, address)
// Called with lock.
w.resetConcurrency()
w.mu.Unlock()
if p != nil {
p.stop()
}
}
// Must be called with lock.
func (w *querierWorker) resetConcurrency() {
totalConcurrency := 0
index := 0
for _, m := range w.managers {
concurrency := 0
if w.cfg.MatchMaxConcurrency {
concurrency = w.cfg.MaxConcurrentRequests / len(w.managers)
// If max concurrency does not evenly divide into our frontends a subset will be chosen
// to receive an extra connection. Frontend addresses were shuffled above so this will be a
// random selection of frontends.
if index < w.cfg.MaxConcurrentRequests%len(w.managers) {
level.Warn(w.logger).Log("msg", "max concurrency is not evenly divisible across targets, adding an extra connection", "addr", m.address)
concurrency++
}
} else {
concurrency = w.cfg.Parallelism
}
// If concurrency is 0 then MaxConcurrentRequests is less than the total number of
// frontends/schedulers. In order to prevent accidentally starving a frontend or scheduler we are just going to
// always connect once to every target. This is dangerous b/c we may start exceeding PromQL
// max concurrency.
if concurrency == 0 {
concurrency = 1
}
totalConcurrency += concurrency
m.concurrency(concurrency)
index++
}
if totalConcurrency > w.cfg.MaxConcurrentRequests {
level.Warn(w.logger).Log("msg", "total worker concurrency is greater than promql max concurrency. Queries may be queued in the querier which reduces QOS")
}
}
func (w *querierWorker) connect(ctx context.Context, address string) (*grpc.ClientConn, error) {
// Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics.
opts, err := w.cfg.GRPCClientConfig.DialOption(nil, nil)
if err != nil {
return nil, err
}
conn, err := grpc.DialContext(ctx, address, opts...)
if err != nil {
return nil, err
}
return conn, nil
}