-
Notifications
You must be signed in to change notification settings - Fork 1
/
values.yaml
93 lines (88 loc) · 2.15 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#@data/values
---
#! Default values for capsule.
#! This is a YAML-formatted file.
#! Declare variables to be passed into your templates.
release:
name: capsule
service: kapp
namespace: capsule-system
chart:
name: capsule
appVersion: 0.1.0
version: 0.0.19
manager:
image:
repository: quay.io/clastix/capsule
pullPolicy: IfNotPresent
tag: ''
#! Specifies if the container should be started in hostNetwork mode.
#!
#! Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
#! CNI (such as calico), because control-plane managed by AWS cannot communicate
#! with pods' IP CIDR and admission webhooks are not working
hostNetwork: false
#! Additional Capsule options
options:
logLevel: '4'
forceTenantPrefix: false
capsuleUserGroups: ["capsule.clastix.io"]
protectedNamespaceRegex: ""
allowIngressHostnameCollision: true
allowTenantIngressHostnamesCollision: false
livenessProbe:
httpGet:
path: /healthz
port: 10080
readinessProbe:
httpGet:
path: /readyz
port: 10080
resources:
limits:
cpu: 200m
memory: 128Mi
requests:
cpu: 200m
memory: 128Mi
jobs:
image:
repository: quay.io/clastix/kubectl
pullPolicy: IfNotPresent
tag: "v1.20.7"
proxy:
image:
repository: ""
pullPolicy: ""
tag: ""
mutatingWebhooksTimeoutSeconds: 30
validatingWebhooksTimeoutSeconds: 30
imagePullSecrets: []
serviceAccount:
create: true
annotations: {}
name: "capsule"
podAnnotations: {}
priorityClassName: ''
nodeSelector: {}
#! node-role.kubernetes.io/master: ""
tolerations: []
#!- key: CriticalAddonsOnly
#! operator: Exists
#!- effect: NoSchedule
#! key: node-role.kubernetes.io/master
replicaCount: 1
affinity: {}
podSecurityPolicy:
enabled: false
serviceMonitor:
enabled: true
#! Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one)
namespace:
#! Assign additional labels according to Prometheus' serviceMonitorSelector matching labels
labels: {}
annotations: {}
matchLabels: {}
serviceAccount:
name: prometheus-k8s
namespace: monitoring