forked from thanos-io/thanos
-
Notifications
You must be signed in to change notification settings - Fork 0
/
quickstart.sh
executable file
·294 lines (255 loc) · 7.75 KB
/
quickstart.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
#!/usr/bin/env bash
#
# Starts three Prometheus servers scraping themselves and sidecars for each.
# Two query nodes are started and all are clustered together.
trap 'kill 0' SIGTERM
MINIO_ENABLED=${MINIO_ENABLED:-""}
MINIO_EXECUTABLE=${MINIO_EXECUTABLE:-"minio"}
MC_EXECUTABLE=${MC_EXECUTABLE:-"mc"}
PROMETHEUS_EXECUTABLE=${PROMETHEUS_EXECUTABLE:-"prometheus"}
THANOS_EXECUTABLE=${THANOS_EXECUTABLE:-"thanos"}
S3_ENDPOINT=""
if [ ! $(command -v "$PROMETHEUS_EXECUTABLE") ]; then
echo "Cannot find or execute Prometheus binary $PROMETHEUS_EXECUTABLE, you can override it by setting the PROMETHEUS_EXECUTABLE env variable"
exit 1
fi
if [ ! $(command -v "$THANOS_EXECUTABLE") ]; then
echo "Cannot find or execute Thanos binary $THANOS_EXECUTABLE, you can override it by setting the THANOS_EXECUTABLE env variable"
exit 1
fi
# Start local object storage, if desired.
# NOTE: If you would like to use an actual S3-compatible API with this setup
# set the S3_* environment variables set in the Minio example.
if [ -n "${MINIO_ENABLED}" ]; then
if [ ! $(command -v "$MINIO_EXECUTABLE") ]; then
echo "Cannot find or execute Minio binary $MINIO_EXECUTABLE, you can override it by setting the MINIO_EXECUTABLE env variable"
exit 1
fi
if [ ! $(command -v "$MC_EXECUTABLE") ]; then
echo "Cannot find or execute Minio client binary $MC_EXECUTABLE, you can override it by setting the MC_EXECUTABLE env variable"
exit 1
fi
export MINIO_ACCESS_KEY="THANOS"
export MINIO_SECRET_KEY="ITSTHANOSTIME"
export MINIO_ENDPOINT="127.0.0.1:9000"
export MINIO_BUCKET="thanos"
export S3_ACCESS_KEY=${MINIO_ACCESS_KEY}
export S3_SECRET_KEY=${MINIO_SECRET_KEY}
export S3_BUCKET=${MINIO_BUCKET}
export S3_ENDPOINT=${MINIO_ENDPOINT}
export S3_INSECURE="true"
export S3_V2_SIGNATURE="true"
rm -rf data/minio
mkdir -p data/minio
${MINIO_EXECUTABLE} server ./data/minio \
--address ${MINIO_ENDPOINT} &
sleep 3
# create the bucket
${MC_EXECUTABLE} config host add tmp http://${MINIO_ENDPOINT} ${MINIO_ACCESS_KEY} ${MINIO_SECRET_KEY}
${MC_EXECUTABLE} mb tmp/${MINIO_BUCKET}
${MC_EXECUTABLE} config host rm tmp
cat <<EOF >data/bucket.yml
type: S3
config:
bucket: $S3_BUCKET
endpoint: $S3_ENDPOINT
insecure: $S3_INSECURE
signature_version2: $S3_V2_SIGNATURE
access_key: $S3_ACCESS_KEY
secret_key: $S3_SECRET_KEY
EOF
fi
# Setup alert / rules config file.
cat >data/rules.yml <<-EOF
groups:
- name: example
rules:
- record: job:http_inprogress_requests:sum
expr: sum(http_inprogress_requests) by (job)
EOF
STORES=""
# Start three Prometheus servers monitoring themselves.
for i in $(seq 0 2); do
rm -rf data/prom"${i}"
mkdir -p data/prom"${i}"/
cat >data/prom"${i}"/prometheus.yml <<-EOF
global:
external_labels:
prometheus: prom-${i}
rule_files:
- 'rules.yml'
scrape_configs:
- job_name: prometheus
scrape_interval: 5s
static_configs:
- targets:
- "localhost:909${i}"
- "localhost:5909${i}"
- "localhost:5909${i}"
- "localhost:5909${i}"
- job_name: thanos-sidecar
scrape_interval: 5s
static_configs:
- targets:
- "localhost:109${i}2"
- job_name: thanos-store
scrape_interval: 5s
static_configs:
- targets:
- "localhost:10906"
- job_name: thanos-receive
scrape_interval: 5s
static_configs:
- targets:
- "localhost:10909"
- "localhost:11909"
- "localhost:12909"
- job_name: thanos-query
scrape_interval: 5s
static_configs:
- targets:
- "localhost:10904"
- "localhost:10914"
EOF
cp data/rules.yml data/prom${i}/rules.yml
${PROMETHEUS_EXECUTABLE} \
--config.file data/prom"${i}"/prometheus.yml \
--storage.tsdb.path data/prom"${i}" \
--log.level warn \
--web.enable-lifecycle \
--storage.tsdb.min-block-duration=2h \
--storage.tsdb.max-block-duration=2h \
--web.listen-address 0.0.0.0:909"${i}" &
sleep 0.25
done
sleep 0.5
OBJSTORECFG=""
if [ -n "${MINIO_ENABLED}" ]; then
OBJSTORECFG="--objstore.config-file data/bucket.yml"
fi
# Start one sidecar for each Prometheus server.
for i in $(seq 0 2); do
${THANOS_EXECUTABLE} sidecar \
--debug.name sidecar-"${i}" \
--log.level debug \
--grpc-address 0.0.0.0:109"${i}"1 \
--grpc-grace-period 1s \
--http-address 0.0.0.0:109"${i}"2 \
--http-grace-period 1s \
--prometheus.url http://localhost:909"${i}" \
--tsdb.path data/prom"${i}" \
${OBJSTORECFG} &
STORES="${STORES} --store 127.0.0.1:109${i}1"
sleep 0.25
done
sleep 0.5
if [ -n "${GCS_BUCKET}" -o -n "${S3_ENDPOINT}" ]; then
${THANOS_EXECUTABLE} store \
--debug.name store \
--log.level debug \
--grpc-address 0.0.0.0:10905 \
--grpc-grace-period 1s \
--http-address 0.0.0.0:10906 \
--http-grace-period 1s \
--data-dir data/store \
${OBJSTORECFG} &
STORES="${STORES} --store 127.0.0.1:10905"
fi
sleep 0.5
if [ -n "${REMOTE_WRITE_ENABLED}" ]; then
cat <<-EOF >./data/hashring.json
[{"endpoints":["127.0.0.1:10907","127.0.0.1:11907","127.0.0.1:12907"]}]
EOF
for i in $(seq 0 1 2); do
${THANOS_EXECUTABLE} receive \
--debug.name receive${i} \
--log.level debug \
--tsdb.path "./data/remote-write-receive-${i}-data" \
--grpc-address 0.0.0.0:1${i}907 \
--grpc-grace-period 1s \
--http-address 0.0.0.0:1${i}909 \
--http-grace-period 1s \
--receive.replication-factor 1 \
--tsdb.min-block-duration 5m \
--tsdb.max-block-duration 5m \
--label "receive_replica=\"${i}\"" \
--label 'receive="true"' \
--receive.local-endpoint 127.0.0.1:1${i}907 \
--receive.hashrings-file ./data/hashring.json \
--remote-write.address 0.0.0.0:1${i}908 \
${OBJSTORECFG} &
STORES="${STORES} --store 127.0.0.1:1${i}907"
done
for i in $(seq 0 1 2); do
mkdir -p "data/local-prometheus-${i}-data/"
cat <<EOF >data/local-prometheus-${i}-data/prometheus.yml
global:
external_labels:
prometheus: prom${i}
replica: 1
# When the Thanos remote-write-receive component is started,
# this is an example configuration of a Prometheus server that
# would scrape a local node-exporter and replicate its data to
# the remote write endpoint.
scrape_configs:
- job_name: test
scrape_interval: 1s
static_configs:
- targets:
- fake
remote_write:
- url: http://localhost:1${i}908/api/v1/receive
EOF
${PROMETHEUS_EXECUTABLE} \
--web.listen-address ":5909${i}" \
--config.file data/local-prometheus-${i}-data/prometheus.yml \
--storage.tsdb.path "data/local-prometheus-${i}-data/" &
done
fi
sleep 0.5
QUERIER_JAEGER_CONFIG=$(
cat <<-EOF
type: JAEGER
config:
service_name: thanos-query
sampler_type: ratelimiting
sampler_param: 2
EOF
)
# Start two query nodes.
for i in $(seq 0 1); do
${THANOS_EXECUTABLE} query \
--debug.name query-"${i}" \
--log.level debug \
--grpc-address 0.0.0.0:109"${i}"3 \
--grpc-grace-period 1s \
--http-address 0.0.0.0:109"${i}"4 \
--http-grace-period 1s \
--query.replica-label prometheus \
--tracing.config="${QUERIER_JAEGER_CONFIG}" \
--query.replica-label receive_replica \
${STORES} &
done
sleep 0.5
if [ -n "${GCS_BUCKET}" -o -n "${S3_ENDPOINT}" ]; then
${THANOS_EXECUTABLE} tools bucket web \
--debug.name bucket-web \
--log.level debug \
--http-address 0.0.0.0:10933 \
--http-grace-period 1s \
${OBJSTORECFG} &
fi
sleep 0.5
# Start Thanos Ruler.
${THANOS_EXECUTABLE} rule \
--data-dir data/ \
--eval-interval "30s" \
--rule-file "data/rules.yml" \
--alert.query-url "http://0.0.0.0:9090" \
--query "http://0.0.0.0:10904" \
--query "http://0.0.0.0:10914" \
--http-address="0.0.0.0:19999" \
--grpc-address="0.0.0.0:19998" \
${OBJSTORECFG} &
echo "all started; waiting for signal"
wait