This repository has been archived by the owner on Oct 28, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 79
/
values.yaml
179 lines (154 loc) · 5.12 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
image:
es:
repository: quay.io/pires/docker-elasticsearch-kubernetes
tag: 6.3.2
pullPolicy: Always
init:
repository: busybox
tag: latest
pullPolicy: IfNotPresent
curator:
repository: bobrik/curator
tag: latest
pullPolicy: IfNotPresent
common:
# Defines the service type for all outward-facing (non-discovery) services.
# For minikube use NodePort otherwise use LoadBalancer
serviceType: LoadBalancer
env:
CLUSTER_NAME: "myesdb"
# Uncomment this if you get the "No up-and-running site-local (private)
# addresses" error.
# NETWORK_HOST: "_eth0_"
# If enabled, then the data and master nodes will be StatefulSets with
# associated persistent volume claims.
stateful:
enabled: false
# The PVC storage class that backs the persistent volume claims. On AWS
# "gp2" would be appropriate.
class: "standard"
# Client/ingest nodes can execute pre-processing pipelines, composed of
# one or more ingest processors. Depending on the type of operations performed
# by the ingest processors and the required resources, it may make sense to
# have dedicated ingest nodes, that will only perform this specific task.
client:
# It isn't common to need more than 2 client nodes.
replicas: 2
antiAffinity: "soft"
# The amount of RAM allocated to the JVM heap. This should be set to the
# same value as client.resources.requests.memory, or you may see
# OutOfMemoryErrors on startup.
heapMemory: 256m
resources:
requests:
memory: 256Mi
env:
NODE_DATA: "false"
NODE_MASTER: "false"
NODE_INGEST: "true"
HTTP_ENABLE: "true"
# Data nodes hold the shards that contain the documents you have indexed. Data
# nodes handle data related operations like CRUD, search, and aggregations.
# These operations are I/O-, memory-, and CPU-intensive. It is important to
# monitor these resources and to add more data nodes if they are overloaded.
#
# The main benefit of having dedicated data nodes is the separation of the
# master and data roles.
data:
# This count will depend on your data and computation needs.
replicas: 2
antiAffinity: "soft"
# The amount of RAM allocated to the JVM heap. This should be set to the
# same value as data.resources.requests.memory, or you may see
# OutOfMemoryErrors on startup.
heapMemory: 256m
resources:
requests:
memory: 256Mi
env:
NODE_DATA: "true"
NODE_MASTER: "false"
NODE_INGEST: "false"
HTTP_ENABLE: "false"
# Determines the properties of the persistent volume claim associated with a
# data node StatefulSet that is created when the common.stateful.enabled
# attribute is true.
stateful:
# This is a default value, and will not be sufficient in a production
# system. You'll probably want to increase it.
size: 12Gi
# The master node is responsible for lightweight cluster-wide actions such as
# creating or deleting an index, tracking which nodes are part of the
# cluster, and deciding which shards to allocate to which nodes. It is
# important for cluster health to have a stable master node.
master:
# Master replica count should be (#clients / 2) + 1, and generally at least 3.
replicas: 3
antiAffinity: "soft"
# The amount of RAM allocated to the JVM heap. This should be set to the
# same value as master.resources.requests.memory, or you may see
# OutOfMemoryErrors on startup.
heapMemory: 256m
resources:
requests:
memory: 256Mi
env:
NODE_DATA: "false"
NODE_MASTER: "true"
NODE_INGEST: "false"
HTTP_ENABLE: "false"
# The default value for this environment variable is 2, meaning a cluster
# will need a minimum of 2 master nodes to operate. If you have 3 masters
# and one dies, the cluster still works.
NUMBER_OF_MASTERS: "2"
# Determines the properties of the persistent volume claim associated with a
# data node StatefulSet that is created when the common.stateful.enabled
# attribute is true.
stateful:
# This is a default value, and will not be sufficient in a production
# system. You'll probably want to increase it.
size: 2Gi
curator:
enabled: true
schedule: "0 1 * * *"
# Allows modification of the default age-based filter. If you require more
# sophisticated filtering, modify the action file specified in
# templates/es-curator-config.yaml.
age:
timestring: "%Y.%m.%d"
unit: "days"
unit_count: 3
service:
httpPort: 9200
transportPort: 9300
kibana:
enabled: true
replicas: 1
image:
repository: docker.elastic.co/kibana/kibana-oss
tag: 6.3.2
pullPolicy: Always
httpPort: 80
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
env:
# XPACK_GRAPH_ENABLED: "false"
# XPACK_ML_ENABLED: "false"
# XPACK_REPORTING_ENABLED: "false"
# XPACK_SECURITY_ENABLED: "false"
ingress:
enabled: false
# Used to create an Ingress record.
hosts:
# - kibana.local
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: kibana-tls
# hosts:
# - kibana.local