-
Notifications
You must be signed in to change notification settings - Fork 59
/
values.yaml
443 lines (434 loc) · 14.4 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
# Values for configuring the deployment of TimescaleDB
# The charts README is at:
# https://github.com/timescale/helm-charts/tree/main/charts/timescaledb-single
# Check out the various configuration options (administration guide) at:
# https://github.com/timescale/helm-charts/blob/main/charts/timescaledb-single/docs/admin-guide.md
# Override the deployment namespace
namespaceOverride: ""
# TimescaleDB single helm chart configuration
timescaledb-single:
# disable the chart if an existing TimescaleDB instance is used
enabled: &dbEnabled true
# override default helm chart image to use one with newer promscale_extension
image:
repository: timescale/timescaledb-ha
tag: pg14.5-ts2.8.1-p2
pullPolicy: IfNotPresent
clusterName: "{{ .Release.Name }}-tsdb"
env:
- name: TSTUNE_PROFILE
value: promscale
# number or TimescaleDB pods to spawn (default is 3, 1 for no HA)
replicaCount: 1
# backup is disabled by default, enable it
# if you want to backup timescaleDB to s3
# you can provide the s3 details on tobs install
# in the user prompt or you can set s3 details in the
# env variables for the following keys:
# PGBACKREST_REPO1_S3_BUCKET
# PGBACKREST_REPO1_S3_ENDPOINT
# PGBACKREST_REPO1_S3_REGION
# PGBACKREST_REPO1_S3_KEY
# PGBACKREST_REPO1_S3_KEY_SECRET
backup:
enabled: false
# Applying tuning to TimescaleDB as per
# https://docs.timescale.com/promscale/latest/recommendations/config-recomm/#database-configuration
patroni:
bootstrap:
dcs:
postgresql:
parameters:
checkpoint_timeout: 15min
bgwriter_delay: 10ms
bgwriter_lru_maxpages: 100000
# synchronous_commit: "off"
shared_preload_libraries: timescaledb,pg_stat_statements,pg_stat_monitor,pg_stat_kcache
# TODO: stats_temp_directory was deprectated on PG15, we should remove it when the DB upgrades.
stats_temp_directory: /tmp/pg_stat_temp
# TimescaleDB PVC sizes
persistentVolumes:
data:
size: 150Gi
wal:
size: 20Gi
## TimescaleDB resource requests
resources:
requests:
cpu: 100m
memory: 2Gi
sharedMemory:
useMount: true
# Enable Prometheus exporter for PostgreSQL server metrics.
# https://github.com/prometheus-community/postgres_exporter
prometheus:
enabled: true
image:
repository: quay.io/prometheuscommunity/postgres-exporter
tag: v0.11.1
args:
# Disabling collecting database size statistics as this can be expensive
# and some of this data is also provided via node_exporter.
- "--no-collector.database"
# Specifies whether PodMonitor for Prometheus operator should be created
podMonitor:
enabled: true
# Values for configuring the deployment of the Promscale
# The charts README is at:
# https://github.com/timescale/helm-charts/tree/main/charts/promscale
promscale:
enabled: true
image:
repository: timescale/promscale
tag: 0.16.0
pullPolicy: IfNotPresent
# to pass extra args
extraArgs:
- "--metrics.high-availability=true"
extraEnv:
- name: "TOBS_TELEMETRY_INSTALLED_BY"
value: "helm"
- name: "TOBS_TELEMETRY_VERSION"
value: "{{ .Chart.Version }}"
- name: "TOBS_TELEMETRY_TRACING_ENABLED"
value: "true"
- name: "TOBS_TELEMETRY_TIMESCALEDB_ENABLED"
value: *dbEnabled
serviceMonitor:
enabled: true
## Note:
# If you are providing your own secret name, do
# not forget to configure at below connectionSecretName
connectionSecretName: "tobs-promscale-connection"
# Promscale deployment resource requests
resources:
requests:
# By default this should be enough for a cluster
# with only a few pods
memory: 500Mi
cpu: 30m
# Enabling Kube-Prometheus will install
# Grafana & Prometheus into tobs as they
# are part of Kube-Prometheus already
kube-prometheus-stack:
enabled: true
alertmanager:
alertmanagerSpec:
image:
registry: quay.io
repository: prometheus/alertmanager
tag: v0.25.0
replicas: 3
## AlertManager resource requests
resources:
limits:
memory: 100Mi
cpu: 100m
requests:
memory: 50Mi
cpu: 4m
prometheusOperator:
image:
registry: quay.io
repository: prometheus-operator/prometheus-operator
tag: v0.62.0
pullPolicy: IfNotPresent
## Prometheus config reloader configuration
prometheusConfigReloader:
# image to use for config and rule reloading
image:
registry: quay.io
repository: prometheus-operator/prometheus-config-reloader
tag: v0.62.0
# resource config for prometheusConfigReloader
resources:
requests:
cpu: 100m
memory: 50Mi
limits:
cpu: 200m
memory: 50Mi
## Prometheus Operator resource requests
resources:
limits:
memory: 200Mi
cpu: 100m
requests:
memory: 100Mi
cpu: 10m
prometheus:
prometheusSpec:
image:
registry: quay.io
repository: prometheus/prometheus
tag: v2.41.0
scrapeInterval: "1m"
scrapeTimeout: "10s"
evaluationInterval: "1m"
# Prometheus metric retention
retention: 1d
# Number of replicas of each shard to deploy for a Prometheus deployment.
replicas: 2
## Prometheus container retention
resources:
requests:
memory: 400Mi
cpu: 40m
replicaExternalLabelName: "__replica__"
# Promscale requires a cluster label to be present for high availability mode.
prometheusExternalLabelName: "cluster"
# By default collect all PrometheusRules and scrape configuration from the cluster
# Exclude all resources labeled with `tobs/excluded`
ruleSelectorNilUsesHelmValues: false
ruleNamespaceSelector:
matchExpressions:
- key: tobs/excluded
operator: DoesNotExist
ruleSelector:
matchExpressions:
- key: tobs/excluded
operator: DoesNotExist
serviceMonitorSelectorNilUsesHelmValues: false
serviceMonitorSelector:
matchExpressions:
- key: tobs/excluded
operator: DoesNotExist
serviceMonitorNamespaceSelector:
matchExpressions:
- key: tobs/excluded
operator: DoesNotExist
podMonitorSelectorNilUsesHelmValues: false
podMonitorSelector:
matchExpressions:
- key: tobs/excluded
operator: DoesNotExist
podMonitorNamespaceSelector:
matchExpressions:
- key: tobs/excluded
operator: DoesNotExist
probeSelectorNilUsesHelmValues: false
probeSelector:
matchExpressions:
- key: tobs/excluded
operator: DoesNotExist
probeNamespaceSelector:
matchExpressions:
- key: tobs/excluded
operator: DoesNotExist
# The remote_read spec configuration for Prometheus.
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec
# remoteRead:
# - url: "http://{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9201/read"
# readRecent: false
# The remote_write spec configuration for Prometheus.
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec
remoteWrite:
- url: "http://{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9201/write"
remoteTimeout: 100s
queueConfig:
capacity: 100000
maxSamplesPerSend: 10000
batchSendDeadline: 30s
minShards: 20
maxShards: 20
minBackoff: 100ms
maxBackoff: 10s
# Prometheus pod storage spec
storageSpec:
# Using PersistentVolumeClaim
# disable mount sub path, use the root directory of pvc
disableMountSubPath: true
volumeClaimTemplate:
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 8Gi
# We've enabled annotation-based scraping by default for backward-compatibility
# and to support the largest number of use-cases out-of-the-box.
# We encourage people to use ServiceMonitors and PodMonitors for new components.
# See discussion in: https://github.com/prometheus-operator/prometheus-operator/issues/1547
# and more info: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape
# If additional scrape configurations are already deployed in a single secret file you can use this section.
# Expected values are the secret name and key
# Cannot be used with additionalScrapeConfigs
additionalScrapeConfigsSecret:
enabled: false
name: tobs-scrape-config
key: additional-scrape-config.yaml
# Enable Prometheus Remote Write dashboard.
remoteWriteDashboards: true
# Values for configuring the deployment of Grafana
# The Grafana Community chart is used and the guide for it
# can be found at:
# https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md
grafana:
enabled: true
# TODO(paulfantom): remove with kube-prometheus bump
image:
repository: grafana/grafana
tag: 9.3.2
pullPolicy: IfNotPresent
resources:
limits:
cpu: 200m
memory: 400Mi
requests:
cpu: 50m
memory: 250Mi
envValueFrom:
GRAFANA_PASSWORD:
secretKeyRef:
name: custom-secret-scripts
key: GRAFANA_PASSWORD
sidecar:
datasources:
enabled: true
label: tobs_datasource
labelValue: "true"
# Disable Prometheus datasource by default as
# Promscale is the default datasource
defaultDatasourceEnabled: false
dashboards:
# option to enable multi-cluster support
# in Grafana dashboards by default disabled
multicluster:
global:
enabled: false
enabled: true
files:
- dashboards/k8s-cluster.json
- dashboards/k8s-hardware.json
- dashboards/apm-dependencies.json
- dashboards/apm-home.json
- dashboards/apm-service-dependencies-downstream.json
- dashboards/apm-service-dependencies-upstream.json
- dashboards/apm-service-overview.json
- dashboards/promscale.json
- dashboards/postgres-overview.json
- dashboards/postgres-details.json
adminUser: admin
# To configure password externally refer to https://github.com/grafana/helm-charts/blob/6578497320d3c4672bab3a3c7fd38dffba1c9aba/charts/grafana/values.yaml#L340-L345
adminPassword: ""
persistence:
type: pvc
enabled: true
accessModes:
- ReadWriteOnce
prometheus:
datasource:
enabled: true
# By default url of data source is set to ts-prom connector instance
# deployed with this chart. If a connector isn't used this should be
# set to the prometheus-server.
url: "http://{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9201"
timescale:
datasource:
enabled: true
user: grafana
# leaving password empty will cause helm to generate a random password
pass: ""
dbName: postgres
sslMode: require
# By default the url/host is set to the db instance deployed
# with this chart
host: "{{ .Release.Name }}.{{ .Release.Namespace }}.svc"
port: 5432
jaeger:
# Endpoint for integrating jaeger datasource in grafana. This should point to HTTP endpoint, not gRPC.
promscaleTracesQueryEndPoint: "{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9201"
kube-state-metrics:
image:
repository: registry.k8s.io/kube-state-metrics/kube-state-metrics
tag: v2.7.0
pullPolicy: IfNotPresent
# By default kube-state-metrics are scraped using
# serviceMonitor disable annotation based scraping
prometheusScrape: false
resources:
limits:
cpu: 100m
memory: 200Mi
requests:
cpu: 10m
memory: 30Mi
prometheus-node-exporter:
image:
repository: quay.io/prometheus/node-exporter
tag: v1.5.0
pullPolicy: IfNotPresent
# By default node-exporter are scraped using
# serviceMonitor disable annotation based scraping
service:
annotations:
prometheus.io/scrape: "false"
prometheus:
monitor:
interval: 15s
resources:
limits:
cpu: 250m
memory: 180Mi
requests:
cpu: 30m
memory: 50Mi
# Enable OpenTelemetry Operator
# If using tobs CLI you can enable otel with --enable-opentelemetry flag
opentelemetry-operator:
enabled: true
manager:
image:
repository: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator
tag: v0.67.0
resources:
limits:
cpu: 50m
memory: 260Mi
requests:
cpu: 5m
memory: 130Mi
serviceMonitor:
enabled: true
prometheusRule:
enabled: true
instrumentation:
pythonImage: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.32b0
javaImage: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.21.0
nodejsImage: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0
collector:
# The default otel collector that will be deployed by helm once
# the otel operator is in running state
config: |
receivers:
jaeger:
protocols:
grpc:
thrift_http:
otlp:
protocols:
grpc:
http:
exporters:
logging:
otlp:
endpoint: "{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9202"
compression: none
tls:
insecure: true
prometheusremotewrite:
endpoint: "http://{{ .Release.Name }}-promscale.{{ .Release.Namespace }}.svc:9201/write"
tls:
insecure: true
processors:
batch:
service:
pipelines:
traces:
receivers: [jaeger, otlp]
exporters: [logging, otlp]
processors: [batch]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [prometheusremotewrite]