Skip to content
This repository has been archived by the owner on May 16, 2023. It is now read-only.

Commit

Permalink
Add support for NetworkPolicy.
Browse files Browse the repository at this point in the history
  • Loading branch information
desaintmartin committed Feb 27, 2020
1 parent 29bbb33 commit 7df9c1f
Show file tree
Hide file tree
Showing 9 changed files with 358 additions and 1 deletion.
1 change: 1 addition & 0 deletions elasticsearch/.helmignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
tests/
.pytest_cache/
.venv
1 change: 1 addition & 0 deletions elasticsearch/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ helm install --name elasticsearch elastic/elasticsearch --set imageTag=7.6.0
| `service.httpPortName` | The name of the http port within the service | `http` |
| `service.transportPortName` | The name of the transport port within the service | `transport` |
| `service.loadBalancerSourceRanges` | The IP ranges that are allowed to access | `[]` |
| `networkPolicy` | The [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) to set. See [`values.yaml`](./values.yaml) for an example | `{http.enabled: false, transport.enabled: false}` |
| `updateStrategy` | The [updateStrategy](https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets) for the statefulset. By default Kubernetes will wait for the cluster to be green after upgrading each pod. Setting this to `OnDelete` will allow you to manually delete each pod during upgrades | `RollingUpdate` |
| `maxUnavailable` | The [maxUnavailable](https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget) value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod in the node group | `1` |
| `fsGroup (DEPRECATED)` | The Group ID (GID) for [securityContext.fsGroup](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) so that the Elasticsearch user can read from the persistent volume | `` |
Expand Down
15 changes: 15 additions & 0 deletions elasticsearch/examples/networkpolicy/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
default: test
include ../../../helpers/examples.mk

RELEASE := helm-es-networkpolicy

install:
helm upgrade --wait --timeout=600 --install $(RELEASE) --values ./values.yaml ../../ ; \

restart:
helm upgrade --set terminationGracePeriod=121 --wait --timeout=600 --install $(RELEASE) --values ./values.yaml ../../ ; \

test: install goss

purge:
helm del --purge $(RELEASE)
39 changes: 39 additions & 0 deletions elasticsearch/examples/networkpolicy/test/goss.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
kernel-param:
vm.max_map_count:
value: '262144'

http:
http://elasticsearch-master:9200/_cluster/health:
status: 200
timeout: 2000
body:
- 'green'
- '"number_of_nodes":3'
- '"number_of_data_nodes":3'

http://localhost:9200:
status: 200
timeout: 2000
body:
- '"number" : "7.6.0"'
- '"cluster_name" : "elasticsearch"'
- '"name" : "elasticsearch-master-0"'
- 'You Know, for Search'

file:
/usr/share/elasticsearch/data:
exists: true
mode: "2775"
owner: root
group: elasticsearch
filetype: directory

mount:
/usr/share/elasticsearch/data:
exists: true

user:
elasticsearch:
exists: true
uid: 1000
gid: 1000
37 changes: 37 additions & 0 deletions elasticsearch/examples/networkpolicy/values.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
networkPolicy:
http:
enabled: true
explicitNamespacesSelector:
# Accept from namespaces with all those different rules (from whitelisted Pods)
matchLabels:
role: frontend
matchExpressions:
- {key: role, operator: In, values: [frontend]}
additionalRules:
- podSelector:
matchLabels:
role: frontend
- podSelector:
matchExpressions:
- key: role
operator: In
values:
- frontend
transport:
enabled: true
allowExternal: true
explicitNamespacesSelector:
matchLabels:
role: frontend
matchExpressions:
- {key: role, operator: In, values: [frontend]}
additionalRules:
- podSelector:
matchLabels:
role: frontend
- podSelector:
matchExpressions:
- key: role
operator: In
values:
- frontend
61 changes: 61 additions & 0 deletions elasticsearch/templates/networkpolicy.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
{{- if (or .Values.networkPolicy.http.enabled .Values.networkPolicy.transport.enabled) }}
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: {{ template "elasticsearch.uname" . }}
labels:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
chart: "{{ .Chart.Name }}"
app: "{{ template "elasticsearch.uname" . }}"
spec:
podSelector:
matchLabels:
app: "{{ template "elasticsearch.uname" . }}"
ingress: # Allow inbound connections

{{- if .Values.networkPolicy.http.enabled }}
# For HTTP access
- ports:
- port: {{ .Values.httpPort }}
from:
# From authorized Pods (having the correct label)
- podSelector:
matchLabels:
{{ template "elasticsearch.uname" . }}-http-client: "true"
{{- with .Values.networkPolicy.http.explicitNamespacesSelector }}
# From authorized namespaces
namespaceSelector:
{{ toYaml . | indent 12 }}
{{- end }}
{{- with .Values.networkPolicy.transport.additionalRules }}
# Or from custom additional rules
{{ toYaml . | indent 8 }}
{{- end }}
{{- end }}

{{- if .Values.networkPolicy.transport.enabled }}
# For transport access
- ports:
- port: {{ .Values.transportPort }}
from:
# From authorized Pods (having the correct label)
- podSelector:
matchLabels:
{{ template "elasticsearch.uname" . }}-transport-client: "true"
{{- with .Values.networkPolicy.transport.explicitNamespacesSelector }}
# From authorized namespaces
namespaceSelector:
{{ toYaml . | indent 12 }}
{{- end }}
{{- with .Values.networkPolicy.transport.additionalRules }}
# Or from custom additional rules
{{ toYaml . | indent 8 }}
{{- end }}
# Or from other ElasticSearch Pods
- podSelector:
matchLabels:
app: "{{ template "elasticsearch.uname" . }}"
{{- end }}

{{- end }}
2 changes: 1 addition & 1 deletion elasticsearch/templates/statefulset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -352,4 +352,4 @@ spec:
{{- end }}
{{- if .Values.extraContainers }}
{{ tpl .Values.extraContainers . | indent 6 }}
{{- end }}
{{- end }}
148 changes: 148 additions & 0 deletions elasticsearch/tests/elasticsearch_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1194,3 +1194,151 @@ def test_full_name_override():

assert "customfullName" in r["statefulset"]
assert "customfullName" in r["service"]


def test_network_policy():
config = """
networkPolicy:
http:
enabled: true
explicitNamespacesSelector:
# Accept from namespaces with all those different rules (from whitelisted Pods)
matchLabels:
role: frontend
matchExpressions:
- {key: role, operator: In, values: [frontend]}
additionalRules:
- podSelector:
matchLabels:
role: frontend
- podSelector:
matchExpressions:
- key: role
operator: In
values:
- frontend
transport:
enabled: true
allowExternal: true
explicitNamespacesSelector:
matchLabels:
role: frontend
matchExpressions:
- {key: role, operator: In, values: [frontend]}
additionalRules:
- podSelector:
matchLabels:
role: frontend
- podSelector:
matchExpressions:
- key: role
operator: In
values:
- frontend
"""
r = helm_template(config)
ingress = r["networkpolicy"][uname]["spec"]["ingress"]
pod_selector = r["networkpolicy"][uname]["spec"]["podSelector"]
http = ingress[0]
transport = ingress[1]
assert http["from"] == [
{
"podSelector": {
"matchLabels": {
"elasticsearch-master-http-client": "true"
}
},
"namespaceSelector": {
"matchExpressions": [
{
"key": "role",
"operator": "In",
"values": [
"frontend"
]
}
],
"matchLabels": {
"role": "frontend"
}
}
},
{
"podSelector": {
"matchLabels": {
"role": "frontend"
}
}
},
{
"podSelector": {
"matchExpressions": [
{
"key": "role",
"operator": "In",
"values": [
"frontend"
]
}
]
}
}
]
assert http["ports"][0]["port"] == 9200
assert transport["from"] == [
{
"podSelector": {
"matchLabels": {
"elasticsearch-master-transport-client": "true"
}
},
"namespaceSelector": {
"matchExpressions": [
{
"key": "role",
"operator": "In",
"values": [
"frontend"
]
}
],
"matchLabels": {
"role": "frontend"
}
}
},
{
"podSelector": {
"matchLabels": {
"role": "frontend"
}
}
},
{
"podSelector": {
"matchExpressions": [
{
"key": "role",
"operator": "In",
"values": [
"frontend"
]
}
]
}
},
{
"podSelector": {
"matchLabels": {
"app": "elasticsearch-master"
}
}
}
]
assert transport["ports"][0]["port"] == 9300
assert pod_selector == {
"matchLabels": {
"app": "elasticsearch-master",
}
}
55 changes: 55 additions & 0 deletions elasticsearch/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -257,3 +257,58 @@ sysctlInitContainer:
enabled: true

keystore: []

networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
## In order for a Pod to access Elasticsearch, it needs to have the following label:
## {{ template "uname" . }}-client: "true"
## Example for default configuration to access HTTP port:
## elasticsearch-master-http-client: "true"
## Example for default configuration to access transport port:
## elasticsearch-master-transport-client: "true"

http:
enabled: false
## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace
## and matching all criteria can reach the DB.
## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this
## parameter to select these namespaces
##
# explicitNamespacesSelector:
# # Accept from namespaces with all those different rules (only from whitelisted Pods)
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}

## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed.
##
# additionalRules:
# - podSelector:
# matchLabels:
# role: frontend
# - podSelector:
# matchExpressions:
# - key: role
# operator: In
# values:
# - frontend

transport:
## Note that all Elasticsearch Pods can talks to themselves using transport port even if enabled.
enabled: false
# explicitNamespacesSelector:
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}
# additionalRules:
# - podSelector:
# matchLabels:
# role: frontend
# - podSelector:
# matchExpressions:
# - key: role
# operator: In
# values:
# - frontend

0 comments on commit 7df9c1f

Please sign in to comment.