diff --git a/LICENSE b/LICENSE index 7a4a3ea242..e8e90b1bfc 100644 --- a/LICENSE +++ b/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2019 ABB. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/core/bin/gen_docs.sh b/core/bin/gen_docs.sh index e397778ba5..d3c4334216 100644 --- a/core/bin/gen_docs.sh +++ b/core/bin/gen_docs.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/ansible/roles/kafka/files/kafka_generate_ssl.sh b/core/core/src/ansible/roles/kafka/files/kafka_generate_ssl.sh index ed9000b94c..dc7b0446cc 100644 --- a/core/core/src/ansible/roles/kafka/files/kafka_generate_ssl.sh +++ b/core/core/src/ansible/roles/kafka/files/kafka_generate_ssl.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/azure/gen_templates_azure.sh b/core/core/src/scripts/azure/gen_templates_azure.sh index ecf7309831..ee4178a262 100644 --- a/core/core/src/scripts/azure/gen_templates_azure.sh +++ b/core/core/src/scripts/azure/gen_templates_azure.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/azure/run_build.sh b/core/core/src/scripts/azure/run_build.sh index b9f37bd1f8..e1cf4bf9a0 100644 --- a/core/core/src/scripts/azure/run_build.sh +++ b/core/core/src/scripts/azure/run_build.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/common/functions.sh b/core/core/src/scripts/common/functions.sh index 6f6c0ffd79..978d2ced42 100644 --- a/core/core/src/scripts/common/functions.sh +++ b/core/core/src/scripts/common/functions.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/kubernetes/get-id-rsa-from-value.sh b/core/core/src/scripts/kubernetes/get-id-rsa-from-value.sh index 329482e777..1fcd1bde1f 100644 --- a/core/core/src/scripts/kubernetes/get-id-rsa-from-value.sh +++ b/core/core/src/scripts/kubernetes/get-id-rsa-from-value.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/kubernetes/get-init-token.sh b/core/core/src/scripts/kubernetes/get-init-token.sh index 012872aee9..e8f05f3816 100644 --- a/core/core/src/scripts/kubernetes/get-init-token.sh +++ b/core/core/src/scripts/kubernetes/get-init-token.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/kubernetes/linux/make-executable.sh b/core/core/src/scripts/kubernetes/linux/make-executable.sh index 45eae1d4ae..0f4ff96367 100644 --- a/core/core/src/scripts/kubernetes/linux/make-executable.sh +++ b/core/core/src/scripts/kubernetes/linux/make-executable.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/kubernetes/linux/master/apply-kubernetes-dashboard.sh b/core/core/src/scripts/kubernetes/linux/master/apply-kubernetes-dashboard.sh index 9cb381364e..a8989c1f17 100644 --- a/core/core/src/scripts/kubernetes/linux/master/apply-kubernetes-dashboard.sh +++ b/core/core/src/scripts/kubernetes/linux/master/apply-kubernetes-dashboard.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/kubernetes/linux/master/init-kubernetes-master.sh b/core/core/src/scripts/kubernetes/linux/master/init-kubernetes-master.sh index e5b4cf8cf6..351ffe0bfd 100644 --- a/core/core/src/scripts/kubernetes/linux/master/init-kubernetes-master.sh +++ b/core/core/src/scripts/kubernetes/linux/master/init-kubernetes-master.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/kubernetes/linux/master/untaint-master.sh b/core/core/src/scripts/kubernetes/linux/master/untaint-master.sh index fc7def9be2..bf7ac74b4b 100644 --- a/core/core/src/scripts/kubernetes/linux/master/untaint-master.sh +++ b/core/core/src/scripts/kubernetes/linux/master/untaint-master.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/kubernetes/linux/node/init-kubernetes-nodes.sh b/core/core/src/scripts/kubernetes/linux/node/init-kubernetes-nodes.sh index ab8dd98c68..d3acc1b006 100644 --- a/core/core/src/scripts/kubernetes/linux/node/init-kubernetes-nodes.sh +++ b/core/core/src/scripts/kubernetes/linux/node/init-kubernetes-nodes.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/kubernetes/linux/prepare-system-kubernetes-redhat.sh b/core/core/src/scripts/kubernetes/linux/prepare-system-kubernetes-redhat.sh index cd4a31984b..c29fdfa91b 100644 --- a/core/core/src/scripts/kubernetes/linux/prepare-system-kubernetes-redhat.sh +++ b/core/core/src/scripts/kubernetes/linux/prepare-system-kubernetes-redhat.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/kubernetes/linux/prepare-system-kubernetes-ubuntu.sh b/core/core/src/scripts/kubernetes/linux/prepare-system-kubernetes-ubuntu.sh index 3d53e5c26b..ac1e8ce14f 100644 --- a/core/core/src/scripts/kubernetes/linux/prepare-system-kubernetes-ubuntu.sh +++ b/core/core/src/scripts/kubernetes/linux/prepare-system-kubernetes-ubuntu.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/scripts/vsts/run-template-engine.sh b/core/core/src/scripts/vsts/run-template-engine.sh index 1632f4e97a..608878fbfe 100644 --- a/core/core/src/scripts/vsts/run-template-engine.sh +++ b/core/core/src/scripts/vsts/run-template-engine.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/azure/az_get_ips.sh.j2 b/core/core/src/templates/azure/az_get_ips.sh.j2 index e6253aee05..413243eb54 100644 --- a/core/core/src/templates/azure/az_get_ips.sh.j2 +++ b/core/core/src/templates/azure/az_get_ips.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/azure/backend.sh.j2 b/core/core/src/templates/azure/backend.sh.j2 index d86ba5e3bb..d8d71a6c86 100644 --- a/core/core/src/templates/azure/backend.sh.j2 +++ b/core/core/src/templates/azure/backend.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/azure/del_rg.sh.j2 b/core/core/src/templates/azure/del_rg.sh.j2 index 8d035325dc..2cf99c2dcb 100644 --- a/core/core/src/templates/azure/del_rg.sh.j2 +++ b/core/core/src/templates/azure/del_rg.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/azure/del_sp.sh.j2 b/core/core/src/templates/azure/del_sp.sh.j2 index d8abec59a9..eb75330f77 100644 --- a/core/core/src/templates/azure/del_sp.sh.j2 +++ b/core/core/src/templates/azure/del_sp.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/azure/env.sh.j2 b/core/core/src/templates/azure/env.sh.j2 index 4487443e14..893800c217 100644 --- a/core/core/src/templates/azure/env.sh.j2 +++ b/core/core/src/templates/azure/env.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/azure/gen_sp.sh.j2 b/core/core/src/templates/azure/gen_sp.sh.j2 index 30cba3502f..30c15e5424 100644 --- a/core/core/src/templates/azure/gen_sp.sh.j2 +++ b/core/core/src/templates/azure/gen_sp.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/azure/login.sh.j2 b/core/core/src/templates/azure/login.sh.j2 index b6f4c0f306..72f27fa745 100644 --- a/core/core/src/templates/azure/login.sh.j2 +++ b/core/core/src/templates/azure/login.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/azure/release.sh.j2 b/core/core/src/templates/azure/release.sh.j2 index 9ae8e0bd54..608d412204 100644 --- a/core/core/src/templates/azure/release.sh.j2 +++ b/core/core/src/templates/azure/release.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/azure/version.sh.j2 b/core/core/src/templates/azure/version.sh.j2 index 5d531b2ecf..4015235b17 100644 --- a/core/core/src/templates/azure/version.sh.j2 +++ b/core/core/src/templates/azure/version.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/common/ansible.sh.j2 b/core/core/src/templates/common/ansible.sh.j2 index 164dfc51af..8c282f3ad4 100644 --- a/core/core/src/templates/common/ansible.sh.j2 +++ b/core/core/src/templates/common/ansible.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/common/proxy.sh.j2 b/core/core/src/templates/common/proxy.sh.j2 index 06853762a4..da0dac23c9 100644 --- a/core/core/src/templates/common/proxy.sh.j2 +++ b/core/core/src/templates/common/proxy.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/src/templates/common/version.sh.j2 b/core/core/src/templates/common/version.sh.j2 index 90897670b8..8b610567d4 100644 --- a/core/core/src/templates/common/version.sh.j2 +++ b/core/core/src/templates/common/version.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/test/kubernetes/install-test-prerequisites.sh b/core/core/test/kubernetes/install-test-prerequisites.sh index edcd196e70..2e9162cce6 100644 --- a/core/core/test/kubernetes/install-test-prerequisites.sh +++ b/core/core/test/kubernetes/install-test-prerequisites.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/core/test/kubernetes/run-sonobuoy-test.sh b/core/core/test/kubernetes/run-sonobuoy-test.sh index 9bff8a6e7c..7760078d93 100644 --- a/core/core/test/kubernetes/run-sonobuoy-test.sh +++ b/core/core/test/kubernetes/run-sonobuoy-test.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/epiphany b/core/epiphany index 1bd82e4fc8..2e73ddb380 100644 --- a/core/epiphany +++ b/core/epiphany @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/proxy.sh b/core/proxy.sh index 2133566d48..7efb01434d 100644 --- a/core/proxy.sh +++ b/core/proxy.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/src/core/main.py b/core/src/core/main.py index 7fccb3fefc..85ae1454e2 100644 --- a/core/src/core/main.py +++ b/core/src/core/main.py @@ -1,6 +1,6 @@ #!/bin/python # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/core/version.sh b/core/version.sh index 9500c3a2d4..cd717e258c 100644 --- a/core/version.sh +++ b/core/version.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2019 The Epiphany-Platform Team. +# Copyright 2019 ABB. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/docs/architecture/diagrams/logical-view/logical-view-diagram.svg b/docs/architecture/diagrams/logical-view/logical-view-diagram.svg new file mode 100644 index 0000000000..f7b5be5da2 --- /dev/null +++ b/docs/architecture/diagrams/logical-view/logical-view-diagram.svg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34609e841b1944a0a132b95b458d56e4e79105bbbdab341f9a0f4d434f2785ed +size 63997 diff --git a/docs/architecture/diagrams/physical-view/physical-view.svg b/docs/architecture/diagrams/physical-view/physical-view.svg new file mode 100644 index 0000000000..dd5b02209d --- /dev/null +++ b/docs/architecture/diagrams/physical-view/physical-view.svg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca62512405079879277c6e1716da689dcdae6f5e13cf00e60023c3efd5d50ead +size 72652 diff --git a/docs/architecture/diagrams/playground/epiphany-playground.png b/docs/architecture/diagrams/playground/epiphany-playground.png new file mode 100644 index 0000000000..d03d1e0cbc --- /dev/null +++ b/docs/architecture/diagrams/playground/epiphany-playground.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f37b75ce1ff3fe8f990e1b894c6276337eb62572d8b0fb350462ae3b554390b +size 213504 diff --git a/docs/architecture/diagrams/process-view/computing-process-view.svg b/docs/architecture/diagrams/process-view/computing-process-view.svg new file mode 100644 index 0000000000..7a01afd1e3 --- /dev/null +++ b/docs/architecture/diagrams/process-view/computing-process-view.svg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a714a84adfe53da500feb77c921046c5e5253a23618792a9a9621422f765b195 +size 51547 diff --git a/docs/architecture/diagrams/process-view/logging-process-view.svg b/docs/architecture/diagrams/process-view/logging-process-view.svg new file mode 100644 index 0000000000..37c3aca9f0 --- /dev/null +++ b/docs/architecture/diagrams/process-view/logging-process-view.svg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e93db470f0d86358f54deed4480acae6bee12ddd77c96cd85b734457deee998 +size 53877 diff --git a/docs/architecture/diagrams/process-view/monitoring-process-view.svg b/docs/architecture/diagrams/process-view/monitoring-process-view.svg new file mode 100644 index 0000000000..ec82e6cb24 --- /dev/null +++ b/docs/architecture/diagrams/process-view/monitoring-process-view.svg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfc793fa32d4a677b43d0d63ca70f5392b182f0ff2a3ff0dd4cac41a77496688 +size 84580 diff --git a/docs/architecture/docs/README.md b/docs/architecture/docs/README.md new file mode 100644 index 0000000000..9d1d2dcc61 --- /dev/null +++ b/docs/architecture/docs/README.md @@ -0,0 +1,5 @@ +# Architecture Documentation + +This section contains Architecture Design Documents. These help provide context on why certain technical decisions were made and why certain tools were made. + +You can check out the [primary documentation](/docs/index.md) for details or the [index](index.md) here in this folder. diff --git a/docs/architecture/docs/index.md b/docs/architecture/docs/index.md new file mode 100644 index 0000000000..7696d6257c --- /dev/null +++ b/docs/architecture/docs/index.md @@ -0,0 +1,3 @@ +# Architecture Index + +Documentation index. diff --git a/docs/architecture/logical-view.md b/docs/architecture/logical-view.md new file mode 100644 index 0000000000..20367ae7ec --- /dev/null +++ b/docs/architecture/logical-view.md @@ -0,0 +1,117 @@ +# Epiphany Logical View + +## Overview + +Epiphany Platform architecture can be divided into functional modules that realize closely related set of functionality. + +![Logical view architecture diagram](diagrams/logical-view/logical-view-diagram.svg) + +## Monitoring + +### Exporters + +Platform monitoring uses set of `exporter` components that the responsibility is to collect metrics. Following table shows which `exporter` is collecting metrics from each Epiphany component. + +Exporter | Component | Description +--- | --- | --- +`Node exporter` | OS/Hardware metrics | [description](https://prometheus.io/docs/guides/node-exporter/) +`Kafka exporter` | Kafka metrics | [description](https://github.com/danielqsj/kafka_exporter) +`JMX exporter` | JVM metrics (Kafka, Zookeeper) | [description](https://github.com/prometheus/jmx_exporter) +`HAProxy exporter` | HAProxy metrics | [description](https://github.com/prometheus/haproxy_exporter) +`cAdvisor` | Container metrics (Kubernetes, Docker) | [description](https://prometheus.io/docs/guides/cadvisor/) + +### Prometheus + +`Prometheus` is open-source system used for monitoring and alerting. Each `exporter` exposes `http://server-name/metrics` endpoint that contains monitoring data, then `Prometheus` collects this data in configured interval. To find more information about `Prometheus` use this [link](https://prometheus.io/docs/introduction/overview/). + +### Grafana + +Once the data are collected, they can be shown in `Grafana` dashboards. `Grafana` in Epiphany Platform has `Prometheus` datasource configured by default. It uses [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) to query `Prometheus` database. To read more about `Grafana` use this [link](https://grafana.com/). + +### Alert Manager + +When alert rule is met `Prometheus` generates alert. +The alert is handled by `Alert Manager` and depending on configuration is routed to `Slack`, `PagerDuty`, `Email`, etc. To read more about `Alert Manager` use this [link](https://prometheus.io/docs/alerting/alertmanager/). + +## Logging + +### Filebeat + +Epiphany Platform logging uses `Filebeat` to collect logs. It reads data from following locations: + +Source | Purpose +--- | --- +/var/log/audit/audit.log | Logs from Linux audit daemon +/var/log/auth.log | System authorization information and user logins +/var/log/firewalld | Firewall logs +/var/log/haproxy.log | HAProxy logs +/var/log/kafka/server.log | Kafka logs +/var/log/messages | Global system messages +/var/log/secure | Logs from authentication and authorization +/var/log/syslog | System logs and events +/var/log/zookeeper/version-2/* | Zookeeper's logs +Docker containers | Kubernetes components that run in a container + +`Filebeat`, unlike `Grafana`, pushes data to database (`Elasticsearch`) instead of pulling them. +[Read more](https://www.elastic.co/products/beats/filebeat) about `Filebeat`. + +### Elasticsearch + +`Elasticsearch` is highly scalable and full-text search enabled analytics engine. Epiphany Platform uses it for storage and analysis of logs. + +[Read more](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + +### Elasticsearch Curator + +`Elasticsearch Curator` is component that manages and cleans indices and snapshots. Epiphany uses `Elasticsearch Curator` to ensure that centralized logging will not completely fill disk space. + +[Read more](https://www.elastic.co/guide/en/elasticsearch/client/curator/5.5/index.html) + +### Kibana + +`Kibana` like `Grafana` is used in Epiphany for visualization, in addition it has full text search capabilities. `Kibana` uses `Elasticsearch` as datasource for logs, it allows to create full text queries, dashboards and analytics that are performed on logs. + +[Read more](https://www.elastic.co/products/kibana) + +## Computing + +Epiphany Platform benefits from `Kubernetes` capabilities. Product team creates a `Docker` enabled applications and using [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) installs them in a `Kubernetes cluster`. + +### Kubernetes Master + +`Kubernetes Master` is the component that provides control plane for a cluster. It handles an application's deployments and responds for events. Usually `Kubernetes Master` does not run application's pods. + +[Read more](https://kubernetes.io/docs/concepts/overview/components/#master-components) + +### Kubernetes Nodes + +`Kubernetes Node` component maintains running pods that `Kubernetes Master` delegates to work on the node. Usually there are many `Kubernetes Nodes` for single or many `Kubernetes Masters`. + +[Read more](https://kubernetes.io/docs/concepts/overview/components/#node-components) + +## Messaging + +Kafka is a distributed streaming and messaging platform. + +### Kafka Brokers + +`Kafka Broker` is a synonym for Kafka Server or Kafka Node. Brokers allow producers and consumers to publish and consume messages. `Kafka` is horizontally scalable - in short it means that adding new brokers increases `Kafka` cluster capacity. + +[Read more](https://kafka.apache.org/documentation/) + +### Zookeeper + +`Zookeeper` in Epiphany Platform is used for distributed `Kafka` configuration management. Simplified: From application's perspective it provides information about location of topic/partition that application writes or reads. + +Zookeepers are usually deployed in more than one instance - this is called Zookeepers ensemble. + +[Read more](https://cwiki.apache.org/confluence/display/ZOOKEEPER/Index) + +## Load Balancing + +### HAProxy + +`HAProxy` is a high performance load balancer. Applications deployed on `Kubernetes` can be exposed through `HAProxy` that supports TLS termination and supports multiple backends. +Epiphany Platform automates the configuration for backend and frontend of `HAProxy`. + +[Read more](http://www.haproxy.org/#desc) \ No newline at end of file diff --git a/docs/architecture/physical-view.md b/docs/architecture/physical-view.md new file mode 100644 index 0000000000..4d82ce1e1a --- /dev/null +++ b/docs/architecture/physical-view.md @@ -0,0 +1,17 @@ +# Epiphany Physical View + +Epiphany Platform is deployed on a number of machines that require communication in order to exchange large amount of data. Components described in [Logical View](logical-view.md) and [Process View](process-view.md) are deployed on different machines. + +![Logging process view](diagrams/physical-view/physical-view.svg) + +`Node exporter` and `Filebeat` should be present on each machine, because those components are responsible for collecting monitoring and logging data. + +Computing section - contains `Kubernetes Master` and `Kubernetes Node` where many `Kubernetes Node` machines (virtual, cloud, bare metal) are possible. + +Centralized monitoring section with `Prometheus`, `Grafana` and `Alert Manager` pulls data from exporters installed on all machines. `Grafana` web dashboards are available on the machine running this role. Machine running `Alert Manager` requires access to configured endpoints for alerting - like email server, Slack, PagerDuty. + +Centralized logging receives data pushed by `Filebeat` component that is installed on each machine. `Kibana` web interface is available on machine running this role. + +Messaging with `Kafka` like `Kubernetes Node` scales horizontally, it means as many machines running this role are possible as needed. + +Load Balancing machine running `HAProxy` is an entry point for applications running inside of `Kubernetes`. \ No newline at end of file diff --git a/docs/architecture/process-view.md b/docs/architecture/process-view.md new file mode 100644 index 0000000000..366bb2ee83 --- /dev/null +++ b/docs/architecture/process-view.md @@ -0,0 +1,31 @@ +# Epiphany Process View + +## Computing and Load Balancing + +Epiphany strongly utilizes the `Kubernetes` platform and follows its rules and principles. +Read more about `Kubernetes architecture` using this [link](https://kubernetes.io/docs/concepts/architecture/). + +Epiphany computing modules use standard implementation of `Kubernetes` and combine it with the load balancing capabilities of `HAProxy`. + +![Computing and Load Balancing process view](diagrams/process-view/computing-process-view.svg) + +Load balancing integration with `Kubernetes` uses backend configurations. The configurations point to created `Kubernetes services` but this traffic goes through `Kube Proxy` to resolve internal IP address of pod that is currently available. + +## Monitoring + +Epiphany uses `Prometheus` and related components for gathering data from +different exporters: `Node-exporter`, `Kafka-exporter`, `HAProxy-exporter`. This +data is stored in `Prometheus`. `Grafana` connects to `Prometheus` to display +metrics from different kinds of exporters. + +![Monitoring process view](diagrams/process-view/monitoring-process-view.svg) + +`Prometheus` calls `Alertmanager` whenever a configured rule is met to send alerts to the configured notification integrations (like `Slack`, `PagerDuty` or `email`). + +## Logging + +Epiphany uses `Elasticsearch` as key-value database with `Filebeat` for gathering logs and `Kibana` as user interface to write queries and analyze logs. + +![Logging process view](diagrams/process-view/logging-process-view.svg) + +`Filebeat` gathers OS and application logs and ships them to `Elasticsearch`. Queries from `Kibana` are run against `Elasticsearch` key-value database. \ No newline at end of file diff --git a/docs/architecture/security/Epiphany Cloud DFD.png b/docs/architecture/security/Epiphany Cloud DFD.png new file mode 100644 index 0000000000..59a546a87f --- /dev/null +++ b/docs/architecture/security/Epiphany Cloud DFD.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:670d226574b3785b01f6e7856e0681b0d14e114344e98ad18c485600571f2579 +size 185493 diff --git a/docs/assets/images/container48pxvector.svg b/docs/assets/images/container48pxvector.svg new file mode 100644 index 0000000000..04707d77d9 --- /dev/null +++ b/docs/assets/images/container48pxvector.svg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cce86557e0add451cd5e95426724fbda9870ea92180ec96a3e5e575eddee5f81 +size 1804 diff --git a/docs/assets/images/diagrams/build-server/build-environment.svg b/docs/assets/images/diagrams/build-server/build-environment.svg new file mode 100644 index 0000000000..8353f85636 --- /dev/null +++ b/docs/assets/images/diagrams/build-server/build-environment.svg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b4f005f1297d8da84e9a8096a10ed84d6204449eca9fed4bd1edc730aa0219e +size 69249 diff --git a/docs/assets/images/diagrams/playground/epiphany-playground.png b/docs/assets/images/diagrams/playground/epiphany-playground.png new file mode 100644 index 0000000000..d03d1e0cbc --- /dev/null +++ b/docs/assets/images/diagrams/playground/epiphany-playground.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f37b75ce1ff3fe8f990e1b894c6276337eb62572d8b0fb350462ae3b554390b +size 213504 diff --git a/docs/assets/images/logos/epiphany.png b/docs/assets/images/logos/epiphany.png new file mode 100644 index 0000000000..0a73222ec6 --- /dev/null +++ b/docs/assets/images/logos/epiphany.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:489ec3b6724517bd3b265a0d0537afd153e16119c5c7de046edfb33da18575d5 +size 6666 diff --git a/docs/assets/images/logos/epiphany.svg b/docs/assets/images/logos/epiphany.svg new file mode 100644 index 0000000000..0fb1422352 --- /dev/null +++ b/docs/assets/images/logos/epiphany.svg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f9a342ee107905cf49f0ab9da2eee183a3e3d4fea38e0ee55f3dc0eb89af166 +size 3825 diff --git a/docs/assets/images/security.png b/docs/assets/images/security.png new file mode 100644 index 0000000000..f367be237c --- /dev/null +++ b/docs/assets/images/security.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a28a9d571cf392a19eba12162440b40ad32b2f1cb4d55994b789bdbbc891fe7 +size 6679 diff --git a/docs/home/BUILD-ENVIRONMENT.md b/docs/home/BUILD-ENVIRONMENT.md new file mode 100644 index 0000000000..6b81d9cd00 --- /dev/null +++ b/docs/home/BUILD-ENVIRONMENT.md @@ -0,0 +1 @@ +# TBD \ No newline at end of file diff --git a/docs/home/CONTRIBUTING.md b/docs/home/CONTRIBUTING.md new file mode 100644 index 0000000000..13950bb583 --- /dev/null +++ b/docs/home/CONTRIBUTING.md @@ -0,0 +1,42 @@ +# Contributing + + + +- [Contributing](#contributing) + - [Welcome](#welcome) + - [Workflow](#workflow) + - [Security](#security) + - [Group/Project Layouts](#group-project-layouts) + + + +## Welcome + +All contributions are welcomed! Contributions can be anything including adding a bug issue. Anything that contributes in any way to Epiphany is considered a contribution and is welcomed. + +## Workflow - TBD + +## Security + +Security *must* be built-in from day one on any merge request. Meaning, all changes must be able to pass security checks and that you have made sure not to include any hardcoded values such as keys, IDs, passwords, etc. + +## Group/Project Layouts + +Epiphany is broken into a hierarchy with `epiphany-platform` as a group in GitHub that contains folders such as `core`, `docs`, etc. Of course, you can use whatever IDE/editor you like but a good one for this is `Visual Studio Code`. It's based on the same foundation as `Atom` but seems to have more options and when dealing with Azure, it's actually easier. + +```text + +# Create a folder called epiphany +mkdir epiphany +cd epiphany + +# Git clone each project in the epiphany group +git clone git@github.com/epiphany-platform/epiphany.git + +# Folders inside epiphany-platform repository: +# core - Base core of Epiphany. +# data - Data.yaml files that define Epiphany clusters. +# docs - Epiphany platform documentation. +# examples - Examples of how to configure an Epiphany environment, add an application to the Epiphany platform, etc. + +``` diff --git a/docs/home/GITWORKFLOW.md b/docs/home/GITWORKFLOW.md new file mode 100644 index 0000000000..6b81d9cd00 --- /dev/null +++ b/docs/home/GITWORKFLOW.md @@ -0,0 +1 @@ +# TBD \ No newline at end of file diff --git a/docs/home/GOVERNANCE.md b/docs/home/GOVERNANCE.md new file mode 100644 index 0000000000..57cb983337 --- /dev/null +++ b/docs/home/GOVERNANCE.md @@ -0,0 +1,86 @@ +# Epiphany Governance Model + + + +- [Epiphany Governance Model](#epiphany-governance-model) + - [Overview](#overview) + - [Roles And Responsibilities](#roles-and-responsibilities) + - [Committers](#committers) + - [Contributors](#contributors) + - [Users](#users) + - [Support](#support) + - [Contribution Process](#contribution-process) + - [Decision-Making Process](#decision-making-process) + - [Git Workflow and Contributing](#git-workflow-and-contributing) + + + +## Overview + +This project is led by a project leader and managed by the community. That is, the community actively contributes to the day-to-day maintenance of the project, but the general strategic line is drawn by the project leader. In case of disagreement, they have the last word. It is the project leader’s job to resolve disputes within the community and to ensure that the project is able to progress in a coordinated way. In turn, it is the community’s job to guide the decisions of the project leader through active engagement and contribution. + +## Roles And Responsibilities + +Typically, the project leader, or project lead, is self-appointed. However, because the community always has the ability to fork, this person is fully answerable to the community. The project lead’s role is a difficult one: they set the strategic objectives of the project and communicate these clearly to the community. They also have to understand the community as a whole and strive to satisfy as many conflicting needs as possible, while ensuring that the project survives in the long term. + +In many ways, the role of the project leader is less about dictatorship and more about diplomacy. The key is to ensure that, as the project expands, the right people are given influence over it and the community rallies behind the vision of the project lead. The lead’s job is then to ensure that the committers (see below) make the right decisions on behalf of the project. Generally speaking, as long as the committers are aligned with the project’s strategy, the project lead will allow them to proceed as they desire. + +## Committers + +Committers are core contributors who have made several valuable contributions to the project and are now relied upon to both write code directly to the repository and screen the contributions of others. In many cases they are programmers but it is also possible that they contribute in a different role. Typically, a committer will focus on a specific aspect of the project, and will bring a level of expertise and understanding that earns them the respect of the community and the project lead. The role of committer is not an official one, it is simply a position that influential members of the community will find themselves in as the project lead looks to them for guidance and support. + +Committers have no authority over the overall direction of the project. However, they do have the ear of the project lead. It is a committer’s job to ensure that the lead is aware of the community’s needs and collective objectives, and to help develop or elicit appropriate contributions to the project. Often, committers are given informal control over their specific areas of responsibility, and are assigned rights to directly modify certain areas of the source code. That is, although committers do not have explicit decision-making authority, they will often find that their actions are synonymous with the decisions made by the lead. + +## Contributors + +Contributors are community members who either have no desire to become committers, or have not yet been given the opportunity by the project leader. They make valuable contributions, such as those outlined in the list below, but generally do not have the authority to make direct changes to the project code. Contributors engage with the project through communication tools, such as email lists, and via reports and patches attached to issues in the issue tracker, as detailed in our community tools document. + +Anyone can become a contributor. There is no expectation of commitment to the project, no specific skill requirements and no selection process. To become a contributor, a community member simply has to perform one or more actions that are beneficial to the project. + +Some contributors will already be engaging with the project as users, but will also find themselves doing one or more of the following: + +* supporting new users (current users often provide the most effective new user support) +* reporting bugs +* identifying requirements +* supplying graphics and web design +* programming +* assisting with project infrastructure +* writing documentation +* fixing bugs +* adding features + +As contributors gain experience and familiarity with the project, they may find that the project lead starts relying on them more and more. When this begins to happen, they gradually adopt the role of committer, as described above. + +## Users + +Users are community members who have a need for the project. They are the most important members of the community: without them, the project would have no purpose. Anyone can be a user; there are no specific requirements. + +Users should be encouraged to participate in the life of the project and the community as much as possible. User contributions enable the project team to ensure that they are satisfying the needs of those users. Common user activities may include (but are not limited to): + +* evangelizing about the project +* informing developers of project strengths and weaknesses from a new user’s perspective +* providing moral support (a ‘thank you’ goes a long way) +* providing support + +Users who continue to engage with the project and its community will often find themselves becoming more and more involved. Such users may then go on to become contributors, as described above. + +## Support + +All participants in the community are encouraged to provide support for new users within the project management infrastructure. This support is provided as a way of growing the community. Those seeking support should recognize that all support activity within the project is voluntary and is therefore provided as and when time allows. A user requiring guaranteed response times or results should therefore seek to purchase a support contract from a vendor. (Of course, that vendor should be an active member of the community.) However, for those willing to engage with the project on its own terms, and willing to help support other users, the community support channels are ideal. + +## Contribution Process + +Anyone can contribute to the project, regardless of their skills, as there are many ways to contribute. For instance, a contributor might be active on the project mailing list and issue tracker, or might supply patches. The various ways of contributing are described in more detail in our roles in open source document. + +The developer mailing list is the most appropriate place for a contributor to ask for help when making their first contribution. + +## Decision-Making Process + +The project leadership model does not need a formal conflict resolution process, since the project lead’s word is final. If the community chooses to question the wisdom of the actions of a committer, the project lead can review their decisions by checking the email archives, and either uphold or reverse them. + +## Git Workflow and Contributing + +These two documents go into detail on how to contribute and work within the branching strategy used by Epiphany: + +* [Gitworkflow.md](/GITWORKFLOW.md) +* [Contributing.md](/CONTRIBUTING.md) diff --git a/docs/home/HOWTO.md b/docs/home/HOWTO.md new file mode 100644 index 0000000000..ae529d6473 --- /dev/null +++ b/docs/home/HOWTO.md @@ -0,0 +1,1003 @@ + +# How-To Guides + +## Contents + +- [Prerequisites for Epiphany engine](#prerequisites-to-run-epiphany-engine) + - [System](#prerequisites-to-run-epiphany-engine-locally) + - [Docker image for development](#prerequisites-to-run-epiphany-engine-docker-development) + - [Docker image for deployment](#prerequisites-to-run-epiphany-engine-deploy) +- [Epiphany cluster:](#) + - [How to create an Epiphany cluster on premise](#how-to-create-an-epiphany-cluster-on-premise) + - [How to create an Epiphany cluster on Azure](#how-to-create-an-epiphany-cluster-on-azure) + - [How to create production environment on Azure](#how-to-create-production-environment-on-azure) + - [Build artifacts](#build-artifacts) + - [How to scale Kubernetes and Kafka](#how-to-scale-kubernetes-and-kafka) + - [Kafka replication and partition setting](#kafka-replication-and-partition-setting) + - [RabbitMQ installation and setting](#rabbitmq-installation-and-setting) +- Monitoring + - [Import and create of Grafana dashboards](#import-and-create-of-grafana-dashboards) + - [How to configure Kibana](#how-to-configure-kibana) + - [How to configure Prometheus alerts](#how-to-configure-prometheus-alerts) + - [How to configure Azure additional monitoring and alerting](#how-to-configure-azure-additional-monitoring-and-alerting) +- Kubernetes + - [How to do Kubernetes RBAC](#how-to-do-kubernetes-rbac) + - [How to run an example app](#how-to-run-an-example-app) + - [How to run CronJobs](#how-to-run-cronjobs) + - [How to test the monitoring features](#how-to-test-the-monitoring-features) + - [How to run chaos on Epiphany Kubernetes cluster and monitor it with Grafana](#how-to-run-chaos-on-epiphany-kubernetes-cluster-and-monitor-it-with-grafana) + - [How to tunnel Kubernetes dashboard from remote kubectl to your PC](#how-to-tunnel-kubernetes-dashboard-from-remote-kubectl-to-your-pc) + - [How to setup Azure VM as docker machine for development](#how-to-setup-azure-vm-as-docker-machine-for-development) + - [How to upgrade Kubernetes cluster](#how-to-upgrade-kubernete-cluster) + - [How to authenticate to Azure AD app](#how-to-authenticate-to-azure-ad-app) + - [How to expose service through HA Proxy load balancer](#how-to-expose-service-lb) +- Security + - [How to use TLS/SSL certificate with HA Proxy](#how-to-use-tls/ssl-certificate-with-ha-proxy) + - [How to use Kubernetes Secrets](#how-to-use-kubernetes-secrets) + - [How to enable or disable network traffic - firewall](#how-to-enable-or-disable-network-traffic) + - [Client certificate for Azure VPN connection](#client-certificate-for-azure-vpn-connection) +- [Data and log retention](#data-and-log-retention) + - [Elasticsearch](#elasticsearch) + - [Grafana](#grafana) + - [Kafka](#kafka) + - [Kibana](#kibana) + - [Kubernetes](#kubernetes) + - [Prometheus](#prometheus) + - [Zookeeper](#zookeeper) +- Databases + - [How to configure PostgreSQL](#how-to-configure-postgresql) + +## Prerequisites to run Epiphany engine + +### System + +To be able to run the Epiphany engine from your local OS you have to install: + +- Bash 4.4+ + - Should be natively installed on Linux distributions. + - MacOS version of bash most likely needs upgrading. + - For Windows 10 you can install Ubuntu subsystem. + - For Windows 7 see the docker image options below. +- Ansible 2.6+ +- Hashicorp Terraform 0.11.8+ +- jq (JSON Query tool: ) +- Python 2.7 + - jinja2 2.10+ + - jmespath 0.9.3+ +- Git +- Azure CLI 2.0+ +- SSH + +This can both be used for deploying/managing clusters or for development. + +### Docker image for development + +To facilitate an easier path for developers to contribute to Epiphany we have a development docker image based on alpine. This image will help to more easily setup a development environment or to develop on systems which do not support bash like Windows 7. + +The following prerequisites are needed when working with the development image: + - Docker[https://www.docker.com/] + - For Windows 7 check here[https://docs.docker.com/toolbox/toolbox_install_windows/] + - Git[https://git-scm.com/] + +Now, to build it locally and run it: + + +1. Run the following to build the image locally: +```docker build -t epiphany-dev -f core/src/docker/dev/Dockerfile .``` +2. To run the locally build image in a container use: +```docker run -it -v LOCAL_DEV_DIR:/epiphany --rm epiphany-dev``` +Where ```LOCAL_DEV_DIR``` should be replaced with the local path to you're core and data repositories. This will then be mapped to ```/epiphany``` inside the container. If everything is ok you will be presentated with a bash terminal from which one can run the Epiphany engine. Note that when filling in your data YAMLs one needs to specify the paths from the containers point of view. + + + + + + + + + + + +### Docker image for deployment + +For people who are only using the Epiphany engine to deploy and maintain clusters there is a Dockerfile for the image with the engine already embedded. + +To get it from the registry and run it: +1. Build an dev image described [here](#docker-image-for-development). +2. Run the following command to build the deployment image locally: +```docker build -t epiphany-deploy -f core/src/docker/deploy/Dockerfile .``` +3. To run the pulled image in a container use: + + + +```docker run -it -v LOCAL_DATA_DIR:/epiphany/data \``` +``` -v LOCAL_BUILD_DIR:/epiphany/build \``` +``` --rm epiphany-deploy``` +```LOCAL_DATA_DIR``` should be the host input directy for you're data YAML's and certificates. ```LOCAL_BUILD_DIR``` should be the host directory where you want the Epiphany engine to write it's build output. If everything is ok you will be presentated with a bash terminal from which one can run the Epiphany engine. Note that when filling in your data YAMLs one needs to specify the paths from the containers point of view. + +[`Azure specific`] Ensure that you have already enough resources/quotas accessible in your region/subscription on Azure before you run Epiphany - depending on your configuration it can create large number of resources. + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Import and create of Grafana dashboards + +Epiphany use Grafana for monitoring data visualization. Epiphany installation creates Prometheus datasource in Grafana, so the only additional step you have to do is to create your dashboard. + +### Creating dashboards + +You can create your own dashboards [Grafana getting started](http://docs.grafana.org/guides/getting_started/) page will help you with it. +Knowledge of Prometheus will be really helpful when creating diagrams since it use [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) to fetch data. + +### Importing dashboards + +There are also many ready to take [Grafana dashboards](https://grafana.com/dashboards) created by community - remember to check license before importing any of those dashboards. +To import existing dashboard: + +1. If you have found dashboard that suits your needs you can import it directly to Grafana going to menu item `Dashboards/Manage` in your Grafana web page. +2. Click `+Import` button. +3. Enter dashboard id or load json file with dashboard definition +4. Select datasource for dashboard - you should select `Prometheus`. +5. Click `Import` + +### How to configure PostgreSQL + +To configure PostgreSQL login to server using ssh and switch to postgres user with command: + +```bash +sudo -u postgres -i +``` + +And then configure database server using psql according to your needs and +PostgreSQL documentation, to which link you can find under address: + +https://www.postgresql.org/docs/ + +### Components used for monitoring + +There are many monitoring components deployed with Epiphany that you can visualize data from. The knowledge which components are used is important when you look for appropriate dashboard on Grafana website or creating your own query to Prometheus. + +List of monitoring components - so called exporters: + +- cAdvisor +- HAProxy Exporter +- JMX Exporter +- Kafka Exporter +- Node Exporter +- Zookeeper Exporter + +When dashboard creation or import succeeds you will see it on your dashboard list. + +## How to configure Kibana + +In order to start viewing and analyzing logs with Kibana, you first need to add an index pattern for Filebeat according to the following steps: + +1. Goto the `Management` tab +2. Select `Index Patterns` +3. On the first step define as index pattern: + `filebeat-*` + Click next. +4. Configure the time filter field if desired by selecting `@timestamp`. This field represents the time that events occurred or were processed. You can choose not to have a time field, but you will not be able to narrow down your data by a time range. + +This filter pattern can now be used to query the Elasticsearch indices. + +By default Kibana adjusts the UTC time in `@timestamp` to the browser's local timezone. This can be changed in `Management` > `Advanced Settings` > `Timezone for date formatting`. + +## How to configure Prometheus alerts + +In order to send messages from Prometheus add monitoring block to your data.yaml similar to the one below: + +```yaml + monitoring: + alerts: + enable: true + handlers: + mail: + smtp_from: 'some-sender@example.com' + smtp_host: 'somesmtp.example.com:587' + smtp_auth_username: 'someusername' + smtp_auth_password: 'somepassword' + smtp_require_tls: true + recipients: ['recipient1@example.com', 'recipient2@example.com'] + rules: + - name: "disk" + expression: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes) < 99 + duration: 1m #1s, 1m, 1h, 1d, 1w, ... + severity: critical + message: "Disk space Exceeded" + - name: "updown" + expression: up == 0 + duration: 1m #1s, 1m, 1h, 1d, 1w, ... + severity: critical + message: "Instance down" + ``` + + + monitoring: - this covers whole monitoring section and is needed to define alerts + alerts: - this covers whole alerts section and is needed to define alerts + enable: true - global switch to turn off/on alerts. Set to true enable alerts. + handlers: - this section covers email handlers, right now only email is supported + mail: - global configuration for smtp and email + smtp_from: 'some-sender@example.com' - name of email sender + smtp_host: 'somesmtp.example.com:port' - address of your smtp server with port + smtp_auth_username: 'someusername' - name of your smtp server username + smtp_auth_password: 'somepassword' - password for your smtp server user + smtp_require_tls: true - enabling/disabling tls. Set to true to enable TLS support. + recipients: ['recipient1@example.com', 'recipient2@example.com'] - list of recipients in form + ['recipient1@example.com', 'recipient2@example.com']. At least one recipient has to be declared. + rules: - this section covers rules for Prometheus to enable monitoring. Each of rule have to follow pattern defined below. + - name: "disk" - name of file for Prometheus where rule will be stored. Permitted are alphanumerical characters only. + expression: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes) < 99 - rule in format of Prometheus queries + duration: 1m #1s, 1m, 1h, 1d, 1w, ... - duration of event after which notification will be sent, follow Prometheus convention + severity: critical - severity label, that will be showed in email sent to users + message: "Disk space Exceeded" - email topic that will be showed in email sent to users + +More information about Prometheus queries you can find under links provided below: + +https://prometheus.io/docs/prometheus/latest/querying/basics/ + +https://prometheus.io/docs/prometheus/latest/querying/examples/ + +Right now we are only supporting email messages, but we are working heavily on introducing integration with Slack and Pager Duty. + +## How to configure Azure additional monitoring and alerting + +Setting up addtional monitoring on Azure for redundancy is good practice and might catch issues the Epiphany monitoring might miss like: + +- Azure issues and resource downtime +- Issues with the VM which runs the Epiphany monitoring and Alerting (Prometheus) + +More information about Azure monitoring and alerting you can find under links provided below: + +https://docs.microsoft.com/en-us/azure/azure-monitor/overview + +https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-overview-alerts + +## How to do Kubernetes RBAC + +Kubernetes that comes with Epiphany has an admin account created, you should consider creating more roles and accounts - especially when having many deployments running on different namespaces. + +To know more about RBAC in Kubernetes use this [link](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) + +## How to create an Epiphany cluster on premise + +0. Pull `core` repository and if needed `data` repository (contains data.yaml files that can be used as example or base for creating your own data.yaml). + +1. Prepare your VM/Metal servers: + 1. Install one of supported OS: RedHat 7.4+, Ubuntu 16.04+ + 2. Create user account with sudo privileges and nopasswd that will use rsa key for login. + 3. Assign static IP addresses for each of the machines - those addresses should not change after cluster creation. + 4. Assign hostnames for machines. + 5. Ensure machines have internet access - it will be needed during Epiphany execution. + 6. Machines will strongly utilize communication between each other, so ensure this communication does not go through proxy. + 7. Note down IP addresses and hostnames of your machines. + +2. If you need you can create new directory in `repository_path/data/your_platform/` or you can use existing profile from data repository. Where `your_platform` can be `vmware`, `vbox`, `metal`. +3. Create or modify data.yaml. +4. Fill in data.yaml with hostname information (`nodes[*]/hosts/name`). +5. Fill in data.yaml with IP information (`nodes[*]/hosts/ips/public`). +6. You can adjust roles for each machine - according to your needs (`nodes[*]/ansible_roles`). +7. Run `bash epiphany -a -b -i -p your_platform -f your_profile` in main epiphany directory. Do not use trailing slash after profile name or as prefix to infrastructure. +8. Store artifacts in `/build` directory securely. Keep those files in order to upgrade/scale your cluster. + +## How to create an Epiphany cluster on Azure + +0. Pull core repository and if needed data repository (contains data.yaml files that can be used as example or base for creating your own data.yaml). + +1. If you need you can create new directory in `repository_path/data/azure/infrastructure/` or you can use existing profile from data repository. + +2. Fill/modify content in the `data.yml` file in `repository_path/data/azure/infrastructure/your_profile` according to your needs. Please, make sure you have enough free public ips/cores assigned to your subscription. + + 1. Data.yaml files can be very verbose and at the beginning you can find difficulties modifying it, especially when defining large clusters with many virtual machines. Instead of defining huge data.yaml file - you can use template. + 2. Look at data repository, there is a template for Azure environments in path `repository_path/data/azure/infrastructure/epiphany-template` + 3. Create folder and `basic-data.yaml` file in it (like `/infrastructure/epiphany-rhel-playground/basic-data.yaml`). This file contains basic data for new cluster like subscription, number of VMs, or keys location. + 4. Execute Epiphany engine with following command when using template file: `bash epiphany -a -b -i -f infrastructure/your_profile -t /infrastructure/epiphany-template` + +3. If you executed point 2.4 - skip next step and go to 5. + +4. Run `bash epiphany -a -b -i -f infrastructure/your_profile` in main epiphany directory. Do not use trailing slash after profile name or as prefix to infrastructure. + +5. The first you run the above command line it will prompt you to login to Microsoft and show you something like the following: + + ```text + To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code DBD7WRF3H to authenticate. + ``` + +6. Store artifacts in `/build` directory securely. Keep those files in order to upgrade/scale your cluster. + + Follow the instructions and a token will be generated in your home directory and then a Service Principal will be created and you will not be prompted for this again. + +7. Go to section [Azure post deployment manual steps](#azure-post-deployment-manual-steps) that may be applicable for your deployment. + +## How to create production environment on Azure + +Keep this in mind that Epiphany will create public IPs for each of the machines, you can remove it but running Epiphany again on the same cluster will recreate public IPs. + +There are no manual steps required when you finished with [How to create an Epiphany cluster on Azure](#how-to-create-an-epiphany-cluster-on-azure) until you decide to move to `production environment` where cluster's virtual machines `must not` be exposed to internet (except load balancer - HAProxy role). + +Production environment on cloud should be composed of two elements: + +1. Demilitarized (`DMZ`) group that contains only load balancer (HAProxy role) +2. Applications (`APPS`) group that contains all other roles + +Both elements are deployed independently (for now) that is why some manual steps, that will be described in this chapter, are required. + +### 1. DMZ group + +DMZ group should contain HAProxy role that is used for load balancing and TLS termination. VM that hosts HAProxy should be `the only one` accessible from internet. You can see DMZ implementation with VPN for Epiphany build cluster `repository_path/data/azure/infrastructure/epiphany-bld-dmz`. + +### 2. APPS group + +APPS group contains all features/roles required by your installation - this group should contain (you can enable or disable it) also contain VPN connection so you can access dashboards and logs. You can see APPS group implementation with VPN for Epiphany build cluster `repository_path/data/azure/infrastructure/epiphany-bld-apps`, there is nothing special with this configuration - normal Epiphany data.yaml with VPN enabled (just don't forget to specify you VPN' client certificate). + +When you executed two deployments you should get two resource groups (dmz, apps) with two different VNETs and VPNs. +Now manual steps goes: + +1. Peer you VNET's. Go to VNET setting blade and add peering to another vnet - you have to do it twice, both ways. + +2. Add monitoring endpoints for Prometheus. Load balancer (HAProxy) is separate deployment (for now), but still we have to monitor and take logs from it. That is why we have to add scrape configs for Prometheus (monitoring) + + - SSH into monitoring machine and add `two` files in folder `/etc/prometheus/file_sd/` + + ```yaml + # OS Monitoring - haproxy-vm-node + - targets: ['HAPROXY_MACHINE_PRIVATE_IP:9100'] + labels: + "job": "node" + ``` + + ```yaml + # HAProxy monitoring - haproxy-exporter + - targets: ['HAPROXY_MACHINE_PRIVATE_IP:9101'] + labels: + "job": "haproxy" + ``` + +3. ... and configure address for Elasticsearch (logging) + + - SSH into Load Balancer (HAProxy) machine, and edit file `/etc/filebeat/filebeat.yml`. + - Find `### KIBANA ###` section and add private IP address of Logging VM (`Kibana`) as host value + - Find `### OUTPUTS ###` section and add private IP address of Logging VM (`Elasticsearch`) as host value + +4. For security reasons you should also disassociate public IPs from your APPS virtual machines. + +5. Ensure you defined firewall settings for public VM (load balancer): [How to enable/disable network traffic- firewall](#how-to-enable-disable-network-traffic) + +## How to run an example app + +Here we will get a simple app to run using Docker through Kubernetes. We assume you are using Windows 10, have an Epiphany cluster on Azure ready and have an Azure Container Registry ready (might not be created in early version Epiphany clusters - if you don't have one you can skip to point no 11 and test the cluster using some public app from the original Docker Registry). Steps with asterisk can be skipped. + +1. Install [Chocolatey](https://chocolatey.org/install) + +2. Use Chocolatey to install: + + - Docker-for-windows (`choco install docker-for-windows`, requires Hyper-V) + - Azure-cli (`choco install azure-cli`) + +3. Make sure Docker for Windows is running (run as admin, might require a restart) + +4. Run `docker build -t sample-app:v1 .` in examples/dotnet/epiphany-web-app. + +5. *For test purposes, run your image locally with `docker run -d -p 8080:80 --name myapp sample-app:v1` and head to `localhost:8080` to check if it's working. + +6. *Stop your local docker container with: `docker stop myapp` and run `docker rm myapp` to delete the container. + +7. *Now that you have a working docker image we can proceed to the deployment of the app on the Epiphany Kubernetes cluster. + +8. Run `docker login myregistry.azurecr.io -u myUsername -p myPassword` to login into your Azure Container Registry. Credentials are in the `Access keys` tab in your registry. + +9. Tag your image with: `docker tag sample-app:v1 myregistry.azurecr.io/samples/sample-app:v1` + +10. Push your image to the repo: `docker push myregistry.azurecr.io/samples/sample-app:v1` + +11. SSH into your Epiphany clusters master node. + +12. *Run `kubectl cluster-info` and `kubectl config view` to check if everything is okay. + +13. Run `kubectl create secret docker-registry myregistry --docker-server myregistry.azurecr.io --docker-username myusername --docker-password mypassword` to create k8s secret with your registry data. + +14. Create `sample-app.yaml` file with contents: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: sample-app + spec: + selector: + matchLabels: + app: sample-app + replicas: 2 + template: + metadata: + labels: + app: sample-app + spec: + containers: + - name: sample-app + image: myregistry.azurecr.io/samples/sample-app:v1 + ports: + - containerPort: 80 + imagePullSecrets: + - name: myregistry + ``` + +15. Run `kubectl apply -f sample-app.yaml`, and after a minute run `kubectl get pods` to see if it works. + +16. Run `kubectl expose deployment sample-app --type=NodePort --name=sample-app-nodeport`, then run `kubectl get svc sample-app-nodeport` and note the second port. + +17. Run `kubectl get pods -o wide` and check on which node is the app running. + +18. Access the app through [AZURE_NODE_VM_IP]:[PORT] from the two previous points - firewall changes might be needed. + +## How to run CronJobs + +1. Follow the previous point using examples/dotnet/Epiaphany.SampleApps/Epiphany.SampleApps.CronApp + +2. Create `cronjob.yaml` file with contents: + + ```yaml + apiVersion: batch/v1beta1 + kind: CronJob + metadata: + name: sample-cron-job + spec: + schedule: "*/1 * * * *" # Run once a minute + failedJobsHistoryLimit: 5 + jobTemplate: + spec: + template: + spec: + containers: + - name: sample-cron-job + image: myregistry.azurecr.io/samples/sample-cron-app:v1 + restartPolicy: OnFailure + imagePullSecrets: + - name: myregistrysecret + ``` + +3. Run `kubectl apply -f cronjob.yaml`, and after a minute run `kubectl get pods` to see if it works. + +4. Run `kubectl get cronjob sample-cron-job` to get status of our cron job. + +5. Run `kubectl get jobs --watch` to see job scheduled by the “sample-cron-job” cron job. + +## How to test the monitoring features + +Prerequisites: Epiphany cluster on Azure with at least a single VM with `prometheus` and `grafana` roles enabled. + +1. Copy ansible inventory from `build/epiphany/*/inventory/` to `examples/monitoring/` + +2. Run `ansible-playbook -i NAME_OF_THE_INVENTORY_FILE grafana.yml` in `examples/monitoring` + +3. In the inventory file find the IP adress of the node of the machine that has grafana installed and head over to `https://NODE_IP:3000` - you might have to head over to Portal Azure and allow traffic to that port in the firewall, also ignore the possible certificate error in your browser. + +4. Head to `Dashboards/Manage` on the side panel and select `Kubernetes Deployment metrics` - here you can see a sample kubernetes monitoring dashboard. + +5. Head to `http://NODE_IP:9090` to see Prometheus UI - there in the dropdown you have all of the metrics you can monitor with Prometheus/Grafana. + +## How to run chaos on Epiphany Kubernetes cluster and monitor it with Grafana + +1. SSH into the Kubernetes master. + +2. Copy over `chaos-sample.yaml` file from the example folder and run it with `kubectl apply -f chaos-sample.yaml` - it takes code from `github.com/linki/chaoskube` so normal security concerns apply. + +3. Run `kubectl create clusterrolebinding chaos --clusterrole=cluster-admin --user=system:serviceaccount:default:default` to start the chaos - random pods will be terminated with 5s ferquency, configurable inside the yaml file. + +4. Head over to Grafana at `https://NODE_IP:3000`, open a new dashboard, add a panel, set Prometheus as a data source and put `kubelet_running_pod_count` in the query field - now you can see how Kubernetes is replacing killed pods and balancing them between the nodes. + +5. Run `kubectl get svc nginx-service` and note the second port. You can access the nginx page via `[ANY_CLUSTER_VM_IP]:[PORT]` - it is accessible even though random pods carrying it are constantly killed at random, unless you have more vms in your cluster than deployed nginx instances and choose IP of one not carrying it. + +## How to test the central logging features + +Prerequisites: Epiphany cluster on Azure with at least a single VM with `elasticsearch`, `kibana` and `filebeat` roles enabled. + +1. Connect to kubectl using kubectl proxy or directly from Kubernetes master server + +2. Apply from epiphany repository `extras/kubernetes/pod-counter` `pod-counter.yaml` with command: `kubectl apply -f yourpath_to_pod_counter/pod-counter.yaml` + + Paths are system dependend so please be aware of applying correct separator for your operatins system. + +3. In the inventory file find the IP adress of the node of the machine that has kibana installed and head over to `http://NODE_IP:5601` - you might have to head over to Portal Azure and allow traffic to that port in the firewall. + +4. You can right now search for data from logs in Discover section in Kibana after creating filebeat-* index pattern. To create index pattern click Discover, then in Step 1: Define index pattern as filebeat-*. Then click Next step. In Step 2: Configure settings click Create index pattern. Right now you can go to Discover section and look at output from your logs. + +5. You can verify if CounterPod is sending messages correctly and filebeat is gathering them correctly querying for `CounterPod` in search field in Discover section. + +6. For more informations refer to documentation: + +## How to tunnel kubernetes dashboard from remote kubectl to your PC + +1. SSH into server, and forward port 8001 to your machine `ssh -i epi_keys/id_rsa operations@40.67.255.155 -L 8001:localhost:8001` NOTE: substitute IP with your cluster master's IP. + +2. On **remote** host: get admin token bearer: `kubectl describe secret $(kubectl get secrets --namespace=kube-system | grep admin-user | awk '{print $1}') --namespace=kube-system | grep -E '^token' | awk '{print $2}' | head -1` NOTE: save this token for next points. + +3. On **remote** host, open proxy to the dashboard `kubectl proxy` + +4. Now on your **local** machine navigate to `http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/overview?namespace=default` + +5. When prompted to put in credentials, use admin token from the previous point. + +## How to setup Azure VM as docker machine for development + +0. Make sure you have docker-machine installed `(choco install docker-machine)` + +1. Run the following: + + ```bash + docker-machine create --driver azure --azure-subscription-id --azure-resource-group --azure-vnet --azure-subnet default --azure-location westeurope + ``` + +2. When the creation succeedes go ahead and connect to your docker-machine using `docker-machine env ` and later invoke commands as instructed by docker-machine + +3. Check if everything is working with `docker run hello-world` + +Now your docker containers are running on a separate system without you having to worry about overhead. +Source: + +# How to use Kubernetes Secrets + +Prerequisites: Epiphany Kubernetes cluster + +1. SSH into the Kubernetes master. + +2. Run `echo -n 'admin' > ./username.txt`, `echo -n 'VeryStrongPassword!!1' > ./password.txt` and `kubectl create secret generic mysecret --from-file=./username.txt --from-file=./password.txt` + +3. Copy over `secrets-sample.yaml` file from the example folder and run it with `kubectl apply -f secrets-sample.yaml` + +4. Run `kubectl get pods`, copy the name of one of the ubuntu pods and run `kubectl exec -it POD_NAME -- /bin/bash` with it. + +5. In the pods bash run `printenv | grep SECRET` - Kubernetes secret created in point 2 was attached to pods during creation (take a look at `secrets-sample.yaml`) and are availiable inside of them as an environmental variables. + +## How to authenticate to Azure AD app + +1. Register you application. Go to Azure portal to `Azure Active Directory => App registrations` tab. + +2. Click button `New application registration` fill the data and confirm. + +3. Deploy app from `examples/dotnet/Epiphany.SampleApps/Epiphany.SampleApps.AuthService`. + + This is a test service for verification Azure AD authentication of registered app. ([How to deploy app](#how-to-run-an-example-app)) + +4. Create secret key for your app `settings => keys`. Remember to copy value of key after creation. + +5. Try to authenticate (e.g. using postman) calling service api `/api/auth/` with following Body application/json type parameters : + + ```json + { + "TenantId": "", + "ClientId": "", + "Resource": "https://graph.windows.net/", + "ClientSecret": "" + } + ``` + + - TenantId - Directory ID, which you find in `Azure active Directory => Properties` tab. + + - ClientId - Application ID, which you find in details of previously registered app `Azure Active Directory => App registrations => your app` + + - Resource - is the service root of Azure AD Graph API. The Azure Active Directory (AD) Graph API provides programmatic access to Azure AD through OData REST API endpoints. You can construct your own Graph API URL. ([How to construct a Graph API URL](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-graph-api-quickstart)) + + - ClientSecret - Created secret key from 4. point. + +6. The service should return Access Token. + +## How to expose service through HA Proxy load balancer + +1. Add haproxy role to your data.yaml +2. Create a folder repository_path/core/src/ansible/roles/haproxy/vars/ +3. Create a file repository_path/core/src/ansible/roles/haproxy/vars/main.yml: +4. Add to repository_path/core/src/ansible/roles/haproxy/vars/main.yml content: + + ```yaml + --- + service_port: your_service_port + ``` + + Where `your_service_port` is a port where your service is exposed using NodePort. + +## How to set HA Proxy load balancer to minimize risk of Slowloris like attacks + +1. Add haproxy_tls_termination role to your data.yaml +2. If you want to minimize risk of Slowloris like attacks add to your data.yaml in section for haproxy: + + ```yaml + haproxy: + http_request_timeout: 5s + ``` + + Where http_request_timeout is the number_of_seconds with s after which connection to HAProxy will be terminated by HAProxy. + This parameter is optional, if is not present no timeout http-request in global section of HAProxy configuration will be set. + +## How to use TLS/SSL certificate with HA Proxy + +If you want to use HAProxy with TLS/SSL certificate follow the instruction below. + +1. Add haproxy_tls_termination role to your data.yaml +2. If you want to use your certificates, you can add to section core to your data.yaml: + + ```yaml + haproxy: + haproxy_certs_dir: your_path_to_certificates + ``` + + Your certificates will be copied and applied automatically to HA Proxy configuration. + + Please be aware that `your_path_to_certificates` cannot contain variables (`$HOME`) or tilde (`~`) as this will make deployment of Epiphany fail. Additionally if you need more than one DNS name for your frontend you need to provide certificates on your own, as there is only one self-signed certificate generated by this role with CN localhost. For multiple backends you need to provide also mapping as described in later part of this document. + +3. If you don't want to apply your certificates that will be generated automatically, then just don't put any certificate in `your_path_to_certificates` or don't put section with `haproxy: haproxy_certs_dir` in your data.yaml + +4. Below you can find example of configuration: + ```yaml + haproxy: + haproxy_certs_dir: /home/epiphany/certs/ + frontend: + - name: https_front + port: 443 + https: yes + backend: + - http_back1 + - http_back2 + domain_backend_mapping: + - domain: backend1.domain.com + backend: http_back1 + - domain: backend2.domain.com + backend: http_back2 + - name: http_front1 + port: 80 + https: no + backend: + - http_back2 + - name: http_front2 + port: 8080 + https: no + backend: + - http_back1 + - http_back2 + domain_backend_mapping: + - domain: http-backend1.domain.com + backend: http_back1 + - domain: http-backend2.domain.com + backend: http_back2 + backend: + - name: http_back1 + server_groups: + - worker + port: 30001 + - name: http_back2 + server_groups: + - worker + - kibana + port: 30002 + ``` + +5. Parameters description: + + `haproxy_certs_dir` - (Optional) Path on machine from which you run Epiphany installer where certificates generated by you are stored. If not one certificate with CN localhost will be generated, works only with one frontend definition, in other cases it won't be able to redirect you to correct backend on HAProxy. + + `frontend` - (Mandatory) At least one frontend configuration must exist, if more than one domain must be supported than `domain_backend_mapping` section is mandatory, as this will make fail. This is a list of frontend, each position has to start with `-`. + + - `name` - (Mandatory) Name of each configuration for frontend. + - `port` - (Mandatory) Port to which frontend should be binding. Must be unique for all frontends in other case it will make HAProxy fail. + - `https` - (Mandatory) Information if https will be used - options `yes`/`no`. If `no`, only http part of configuration for frontend will be generated. + - `backend` - (Mandatory) At least one backend configuration must exist. If `domain_backend_mapping` exists this must match configuration in `domain_backend_mapping` backend section. It always has to match configuration from backend name section. This is a list of backend, each position has to start with `-`. This parameter shows to which backend configuration forward traffic from frontend to backend. + + - `domain_backend_mapping` - (Optional) If this exist at least one domain to backend mapping must exist. Must be provided if more than one domain has to be supported. + + - `domain` - (Mandatory if `domain_backend_mapping` used for each mapping) Domain that matches SSL certificate CN for https configuration and domain name. For http, domain that will be mapped using http header. + - `backend` - (Mandatory if `domain_backend_mapping` used for each mapping) Must match name from backend section + + `backend` - (Mandatory) This is a list of backend, each position has to start with `-`. At least one backend used by frontend must exist. If there won't be a match with each frontend configuration HAProxy will fail to start. + - `name` - (Mandatory) Name of each configuration for backend, must match frontend backend configuration and `domain_backend_mapping` backend part in frontend section. + - `server_groups` - (Mandatory) This is a list of server groups, each position has to start with `-`. At least one `server_group` used by backend must exist. It must match Epiphany role e.g. `kibana`, `worker` etc. + - `port` - (Mandatory) Port on which backend service is exposed. + +## How to upgrade Kubernetes cluster + +Upgrade procedure might be different for each Kubernetes version. Upgrade shall be done only from one minor version to next minor version. For example, upgrade from 1.9 to 1.11 looks like this: + +```text +1.9.x -> 1.9.y +1.9.y -> 1.10 +1.10 -> 1.11 +``` + +Each version can be upgraded in a bit different way, to find information how to upgrade your version of Kubernetes please use this [guide](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-upgrade/#kubeadm-upgrade-guidance). + +Epiphany use kubeadm to boostrap a cluster and same tool shall be used to upgrade it. + +Upgrading Kubernetes cluster with running applications shall be done step by step. To prevent your applications downtime you should use at least **two Kubernetes worker nodes** and at least **two instances of each of your service**. + +Start cluster upgrade with upgrading master node. Detailed instructions how to upgrade each node, including master, are described in guide linked above. When Kubernetes master is down it does not affect running applications, at this time only control plane is not operating. **Your services will be running but will not be recreated nor scaled when control plane is down.** + +Once master upgrade finished successfully, you shall start upgrading nodes - **one by one**. Kubernetes master will notice when worker node is down and it will instatiate services on existing operating node, that is why it is essential to have more than one worker node in cluster to minimize applications downtime. + +## How to upgrade Kafka cluster + +### Kafka upgrade + +No downtime upgrades are possible to achieve when upgrading Kafka, but before you start thinking about upgrading you have to think about your topics configuration. Kafka topics are distributed accross partitions with replication. Default value for replication is 3, it means each partition will be replicated to 3 brokers. You should remember to enable redundancy and keep **at least two replicas all the time**, it is important when upgrading Kafka cluser. When one of your Kafka nodes will be down during upgrade ZooKeeper will direct your producers and consumers to working instances - having replicated partitions on working nodes will ensure no downtime and no data loss work. + +Upgrading Kafka could be different for every Kafka release, please refer to [Apache Kafka documentation](https://kafka.apache.org/documentation/#upgrade). Important point to remember during Kafka upgrade is the rule: **only one broker at the time** - to prevent downtime you should uprage you Kafka brokers one by one. + +### ZooKeeper upgrade + +ZooKeeper redundancy is also recommended, since service restart is required during upgrade - it can cause ZooKeeper unavailability. Having at **least two ZooKeeper services** in *ZooKeepers ensemble* you can upgrade one and then start with the rest **one by one**. + +More detailed information about ZooKeeper you can find in [ZooKeeper documentation](https://cwiki.apache.org/confluence/display/ZOOKEEPER). + +## How to enable or disable network traffic + +### VM Firewall + +Epiphany 1.0 supports firewalld on host machines (RedHat only). You can enable firewall setting `.../security/firewall/enable` to `true` in data.yaml. Remember to allow port 22 to be open in ports_open (`.../security/firewall/ports_open`) dictionary in order to configuration can do its job. + +### Azure specific - Network Security Group + +Security for internet facing infrastructure is extremely important thing - remember to configure `Network Security Group` rules to allow network traffic only on required ports and directions. You can do it using Azure specific data.yaml in section `.../network_security_group/rules`. Remember to allow port 22 (you can/should remove this rule after deployment) in order to configuration can do its job. + +## Client certificate for Azure VPN connection + +Epiphany will create point to site configuration (if you enable VPN in `.../security/vpn/enable` and specify public key of your certificate, in base64 format, in `public_cert_data` field). For production environments you have to use root certificate from `trusted provider`. +For development purposes you can use self signed certificate which can be generated using powershell: + +When you get root certificate you should generate child certificate(s) that will be distributed to the team that should have VPN access to clusters. +Configuration of client config in data.yaml (`.../security/vpn/client_configuration/root_certificate`) looks like following: + +```yaml +... +root_certificate: + # name is the name of the cert that was created for you by a trusted party OR a name you give a self-signed cert + name: NAME-OF-YOUR-CERTIFICATE + revoked_certificate: + name: NAME-OF-REVOKED-CERTIFICATE + thumbprint: THUMBPRINT-OF-REVOKED-CERTIFICATE + # public_cert_data is the actual base64 public key from your cert. Put it in 'as is'. The '|' tells yaml to use 'as is'. + public_cert_data: | + YOUR-BASE64-CLIENT-AUTH-PUBLIC-KEY +... +``` + +Configuration requires to have revoked certificate filled in (for now). + +## Build artifacts + +Epiphany engine produce build artifacts during each deployment. Those artifacts contains: + +- Generated terraform files. +- Generated terraform state files. +- Generated cluster manifest file. +- Generated ansible files. +- Azure login credentials for `service principal` if deploying to Azure. + +Artifacts contains sensitive data so it is important to keep it in safe place like `private GIT repository` or `storage with limited access`. Generated build is also important in case of scaling or updating cluster - you will it in build folder in order to edit your cluster. + +Epiphany creates (or use if you don't specified it to create) service principal account which can manage all resources in subscription, please store build artifacts securely. + +## How to scale Kubernetes and Kafka + +### Scaling Kubernetes + +For Azure specific deployment configuration for Kubernetes Node looks like that: + +```yaml +vms: + - name: vm-k8s-node + size: Standard_DS1_v2 + os_type: linux + count: 1 + bastian_host: false + # roles are how you define a grouping of nodes. These values will be used to create an inventory of your cluster + # Must be a member of the 'role' in core + roles: + - linux + - worker + - node_exporter + - filebeat + - reboot +``` + +There is 1 worker role defined - it means only one Kubernetes node virtual machine will be created and configured to join Kubernetes cluster. When Epiphany deployment was created with one Kubernetes node and then you decide to have more nodes you can simply change + +```yaml +count: 1 +``` + +to + +```yaml +count: 2 +``` + +and wait for add new node. It is important to have your build folder from initial deployment so now state will be automatically refreshed with no downtime. For more information about build folder go to [Build artifacts](#build-artifacts) section. + +For all other deployments (Metal, VMWare, VirtualBox, etc.) you just have to add another definition for machine with worker role. + +### Scaling Kafka + +Scaling Kafka looks exactly the same like scaling Kubernetes. Once changed `count:` property from `1` to `n` and executed Epiphany you will have `n` Kafka machines. + +To add new Kafka broker to non-Azure deployment looks the same as adding new Kubernetes node. + +## Kafka replication and partition setting + +When planning Kafka installation you have to think about number of partitions and replicas since it is strongly related to throughput of Kafka and its reliability. By default Kafka's `replicas` number is set to 1 - you should change it in `core/src/ansible/roles/kafka/defaults` in order to have partitions replicated to many virtual machines. + +```yaml + ... + replicas: 1 # Default to at least 1 (1 broker) + partitions: 8 # 100 x brokers x replicas for reasonable size cluster. Small clusters can be less + ... +``` + +You can read more [here](https://www.confluent.io/blog/how-choose-number-topics-partitions-kafka-cluster) about planning number of partitions. + +## RabbitMQ installation and setting + +To install RabbitMQ in single mode just add rabbitmq role to your data.yaml for your sever and in general roles section. All configuration on Rabbit MQ - e.g. user other than +guest creation should be performed manually. + +## Data and log retention + +An Epiphany cluster has a number of components which log, collect and retain data. To make sure that these do not exceed the usable storage of the machines there running on the following configurations are available. + +### Elasticsearch + +For managing the data storage that Elasticsearch consumes we use [Elasticsearch Curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/5.5/about.html). To use it one needs to make sure the elasticsearch-curator is enabled. This role will install and configure the [Elasticsearch Curator](https://www.elastic.co/guide/en/elasticsearch/client/curator/5.5/about.html) to run in a cronjob to clean up older indices which are older then a certain treshold. + +In the default configuration `/core/src/ansible/roles/elasticsearch-curator/defaults/main.yml` the following values can be tweaked regarding storage: + +```yaml +# Rentention time of Elasticsearch indices in days. +indices_retention_days: 30 +``` + +The size of the storage consumed by Elasticsearch is depenedant on the clustersize and how much logging the deployed application will generate. + +### Grafana + +In the default configuration `/core/src/ansible/roles/grafana/defaults/main.yml` the following values can be tweaked to control the ammount of storage used by Grafana: + +```yaml +# The path where Grafana stores its logs +grafana_logs_dir: "/var/log/grafana" + +# The path where Grafana stores it's (Dashboards DB (SQLLite), sessions, etc) +grafana_data_dir: "/var/lib/grafana" + +grafana_logging: +# Enable or disable log rotation +log_rotate: true + +# Enable or disable daily log rotation +daily_rotate: true + +# Number of days to retain the logs +max_days: 7 +``` + +While logs can be rotated and have a retention time, the ammount of storage used by Grafana is dependant on user usage and dashboard count and cannot be directly controlled. + +### Kafka + +In the default configuration `/core/src/ansible/roles/kafka/defaults/main.yml` the following values can be tweaked regarding storage: + +```yaml +# The path where kafka stores its data +data_dir: /var/lib/kafka + +# The path where kafka stores its logs +log_dir: /var/log/kafka + +# The minimum age of a log file to be eligible for deletion due to age +log_retention_hours: 168 + +# Offsets older than this retention period will be discarded +offset_retention_minutes: 10080 +``` + +The ammount of storage Kafka consumes is dependant on the application running on Epiphany, how many messages producers create and how fast the consumers can consume them. It's up to the application developer to configure a `log_retention_hours` and `offset_retention_minutes` to suite the applications need. + +Since Kafka does not have a mechanism for log rotation we use [logrotate](https://linux.die.net/man/8/logrotate) for this. The template for logrotate can be found here: + +`/core/src/ansible/roles/kafka/templates/logrotate.conf.j2` + +On the system the configuration can be found here: + +`/etc/logrotate.d/kafka` + +### Kibana + +In the default configuration `/core/src/ansible/roles/kibana/defaults/main.yml` the following values can be tweaked regarding storage: + +```yaml +# The path where Kibana stores its logs +kibana_log_dir: /var/log/kibana +``` + +Since Kibana does not have a mechanism for log rotation we use [logrotate](https://linux.die.net/man/8/logrotate) for this. The template for logrotate can be found here: + +`/core/src/ansible/roles/kibana/templates/logrotate.conf.j2` + +On the system the configuration can be found here: + +`/etc/logrotate.d/kibana` + +Besides logs any other data is depenedant on user usage (Dashboards, queries etc). Kibana stores that kind of data in ElasticSearch under the `.kibana` index. + +### Kubernetes + +The kubelet and container runtime (Docker) do not run in containers. On machines with systemd they write to journald. + +Everything a containerized application writes to stdout and stderr is redirected to the Docker logging driver (`json-file`), which is configured to rotate logs automatically. + +In the default configuration `/core/src/ansible/roles/docker/defaults/main.yml` the following values can be tweaked regarding storage: + +```yaml +docker_logging: + log_opts: + # The maximum size of the log before it is rolled. A positive integer plus a modifier representing the unit of measure (k, m, or g). + max_file_size: "10m" + # The maximum number of log files that can be present. If rolling the logs creates excess files, the oldest file is removed. + max_files: 2 +``` + +On the system the configuration can be found here: + +`/etc/docker/daemon.json` + +### Prometheus + +In the default configuration `/core/src/ansible/roles/prometheus/defaults/main.yml` the following values can be tweaked to control the amount of storage used by Prometheus: + +```yaml +# The path where Prometheus stores its data +prometheus_db_dir: /var/lib/prometheus + +# The time it will retain the data before it gets deleted +prometheus_storage_retention: "30d" + +prometheus_global: +# The interval it will use to scrape the data from the sources +scrape_interval: 15s +``` + +The size of the data which Prometheus will scrape and retain is dependant on the cluster size (Kafka/Kubernetes nodes) and the scrape interval. The [Prometheus storage documentation](https://prometheus.io/docs/prometheus/latest/storage/) will help you determine how much data might be generated with a certain scrape interval and clustersize. This can then be used to determine a storage retention time in days. Note that one should not plan to use the entire disk space for data retention since it might also be used by other components like Grafana which might be deployed on the same system. + +### Zookeeper + +In the default configuration `core/src/ansible/roles/zookeeper/defaults/main.yml` the following values can be tweaked regarding storage: + +```yaml +# The path where Zookeeper stores its logs +zookeeper_log_dir: /var/log/zookeeper + +# The max size a logfile can have +zookeeper_rolling_log_file_max_size: 10MB + +# How many logfiles can be retained before rolling over +zookeeper_max_rolling_log_file_count: 10 +``` diff --git a/docs/home/README.md b/docs/home/README.md new file mode 100644 index 0000000000..8eca613ab9 --- /dev/null +++ b/docs/home/README.md @@ -0,0 +1,125 @@ +# Epiphany + + + +- [Epiphany](#epiphany) + - [Overview](#overview) + - [Known Issues](#known-issues) + - [HOWTO documents](HOWTO.md) + - [Git Workflow](#git-workflow) + - [Dependencies](#dependencies) + - [Help Wanted](#help-wanted) + - [Epiphany Community](#epiphany-community) + - [Security](#security) + - [Automation](#automation) + - [Storage](#storage) + - [Reading materials on Epiphany components](#reading-materials-on-epiphany-components) + + + +## Overview + +Epiphany at it's core is full automation of Kubernetes and Docker plus additional builtin services such as Kafka for high speed messaging/events, Prometheus for monitoring and Graphana for dashboards, Elasticsearch and Kibana for centralized logging. Other optional services are being evaluated now. + +Epiphany can run on as few as one node (laptop, desktop, server) but the real value comes from running 3 or more nodes for scale and HA. Nodes can be added or removed at will depending on data in the manifest. Everything is data driven so simply changing the manifest data and running the automation will modify the environment. + +We currently use Terraform and Ansible for our automation orchestration. All automation is idempotent so you can run it as many times as you wish and it will maintain the same state unless you change the data. If someone makes a "snow flake" change to the environment (you should never do this) then simply running the automation again will put the environment back to the desired state. + +## Known Issues + +The Terraform backend feature along with Azure Service Principal is the default configuration. This allows for multiple team members to modify the IaaS (VM) services on Azure. This works as expected from most networks. However, we have seen an error generated by Terraform on certain networks. This can be resolved simply by setting two options in the `data.yaml` file for your given environment. These are as follows: + +```yaml +# Comments excluded here for clarity +terraform: + service_principal: + enable: false + backend: + enable: false +``` + +## Git Workflow + +See [GITWORKFLOW.md](/GITWORKFLOW.md). + +## Dependencies + +There are several required dependencies for the Epiphany automation to run from a 'workstation/deployment/bootstrap' node: + +1. Docker +2. Ansible +3. jq (JSON Query tool: https://stedolan.github.io/jq/download/) +4. Python +5. Git +6. Azure CLI >= 2.0 +7. SSH + +We will create a Docker image that can be run that has all of this so that you will not be required to install anything unless you want to. + +## Help Wanted + +As in all community based projects, community participation is very important. We currently are building in Microsoft Azure under our Ability subscription. The plan is to do all building, testing, etc. in this subscription but allow you to clone this repo and run the automation to build out Epiphany within your own Azure subscription. Of course, you can use it for on-premise and other cloud platforms [coming soon]. + +So, to move faster we need more assistance from anyone willing to contribute. The contributions needed include development, QA, documentation, graphics, evangelism, and whatever else you believe would be good. The point is, Epiphany is open and welcomes all contributions from all divisions and BUs. + +If interested then please see the [Contribution Guide](/CONTRIBUTING.md). + +## Epiphany Community + +The plans are to have biweekly or monthly Microsoft Teams calls/videos on all things Epiphany. We may even do this weekly depending on interest. Think of it like 'office hours' where we may have a how-to session, open forum, round table talk, etc. There are no hard rules or predefined subjects. It will be driven by what you want. + +## Security + +Security is critical! Epiphany addresses security at multiple points. By default it establishes perimeter security via firewall rules, IPTables, etc. but it also incorporates cross platform Kubernetes Secrets. + +Security enhancements will always be addressed. Epiphany will always comply with MCSR. + +## Automation - TBD + +## Storage + +Microsoft Azure has a nice storage tool called [Storage Explorer](https://azure.microsoft.com/en-us/features/storage-explorer/). It can also be used with Visual Studio Code when using the builtin Azure Storage features. + +## Reading materials on Epiphany components + +Here are some materials concerning Epiphany components - both on what we use in the background and on what's available for you to use with your application/deployment. Links for every component are in an ascending order of complexity. + +1. Under the hood + 1. [Terraform](https://www.terraform.io/) + - AWS use case [example](https://www.terraform.io/intro/getting-started/build.html) + 2. [Ansible](https://www.ansible.com/) + - [Intro to playbooks](https://docs.ansible.com/ansible/2.5/user_guide/playbooks_intro.html) + 3. [jq](https://stedolan.github.io/jq) + 4. [git-lfs](https://git-lfs.github.com/) +2. Available out of the box + 1. [Docker](https://www.docker.com/) + - (Pluralsight) [Introduction to Docker and containerization](https://app.pluralsight.com/library/courses/docker-containers-big-picture/table-of-contents) + - [A Beginner-Friendly Introduction to Containers, VMs and Docker](https://medium.freecodecamp.org/a-beginner-friendly-introduction-to-containers-vms-and-docker-79a9e3e119b) + 2. [Kubernetes](https://kubernetes.io/) + - (Pluralsight) [Introduction to Kubernetes](https://app.pluralsight.com/library/courses/getting-started-kubernetes/table-of-contents) + - [Kubernetes Basics](https://kubernetes.io/docs/tutorials/kubernetes-basics/) + - (Free Udacity course) [Scalable Microservices with Kubernetes](https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615) + - Curated list of [Kubernetes resources](https://legacy.gitbook.com/book/ramitsurana/awesome-kubernetes/details) + 3. Monitoring + 1. [Prometheus](https://prometheus.io/) + - Query [examples](https://prometheus.io/docs/prometheus/latest/querying/examples/) + - [Integration with Grafana](https://prometheus.io/docs/visualization/grafana/) + - Included [OS mectric collector](https://github.com/prometheus/node_exporter) + - Kafka monitoring with [JMX exporter](https://github.com/prometheus/jmx_exporter) + - Alertmanager [Alerts from Prometheus](https://prometheus.io/docs/alerting/alertmanager/) + 2. [Grafana](https://grafana.com/) + - Community supplied, ready to use [dashboards](https://grafana.com/dashboards) + 4. [Kafka](http://kafka.apache.org/) + - [Kafka introduction](http://kafka.apache.org/intro) + - (Pluralsight) [Getting Started with Apache Kafka](https://app.pluralsight.com/library/courses/apache-kafka-getting-started/table-of-contents) + 5. [RabbitMQ](https://www.rabbitmq.com/) + - [RabbitMQ Getting started](https://www.rabbitmq.com/getstarted.html) + 6. Central logging + 1. [Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) + 2. [Kibana](https://www.elastic.co/guide/en/kibana/current/index.html) + 3. [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/index.html) + - Beats platform reference(https://www.elastic.co/guide/en/beats/libbeat/current/index.html) + 7. Load Balancing + 1. [HaProxy](http://www.haproxy.org/) + 8. Databases + 1. [PostgreSQL](https://www.postgresql.org/docs/) diff --git a/docs/home/TROUBLESHOOTING.md b/docs/home/TROUBLESHOOTING.md new file mode 100644 index 0000000000..e1dbea3e30 --- /dev/null +++ b/docs/home/TROUBLESHOOTING.md @@ -0,0 +1,44 @@ +# Troubleshooting Guide + +## Additional Links + +[Kubernetes troubleshooting](https://kubernetes.io/docs/setup/independent/troubleshooting-kubeadm/) + +## Installation + +### Service Principal + +The first you launch Epiphany and you're building on Azure you will be prompted to register your device. This process generates a token and is then used to create an Azure Service Principal. At times, Azure can not fully propagate fast enough so you may see an error that resembles something like below: + +```text +====> Login using service principal... +Get Token request returned http error: 400 and server response: {"error":"unauthorized_client","error_description":"AADSTS70001: Application with identifier '1c38cb1b-7cbe-4bf5-8d17-bee51d5d6502' was not found in the directory 372ee9e0-9ce0-4033-a64a-c07073a91ecd +Trace ID: fe4358f1-7250-4ad9-b6ce-c7da80101700 +Correlation ID: 441a5caf-66d8-4252-a783-c0abcf60c40e +Timestamp: 2018-07-18 08:30:12Z","error_codes":[70001],"timestamp":"2018-07-18 08:30:12Z","trace_id":"fe4358f1-7250-4ad9-b6ce-c7da80101700","correlation_id":"441a5caf-66d8-4252-a783-c0abcf60c40e"} +``` + +Simply relaunch the command line and the Service Principal should be propagated. Repeat if necessary. This is not very common but has been seen. + +### Kubernetes + +At rare times Google has a connection issue with pulling down images. You may see something like below: + +```text +TASK [master : kubeadm config images pull] ********************************************************************************************** +fatal: [vm-epiphany-rhel-playground-master-001]: FAILED! => {"changed": true, "cmd": "kubeadm config images pull", "delta": "0:00:01.428562", "end": "2018-07-18 08:56:47.608629", "msg": "non-zero return code", "rc": 1, "start": "2018-07-18 08:56:46.180067", "stderr": "failed to pull image \"k8s.gcr.io/kube-apiserver-amd64:v1.11.1\": exit status 1", "stderr_lines": ["failed to pull image \"k8s.gcr.io/kube-apiserver-amd64:v1.11.1\": exit status 1"], "stdout": "", "stdout_lines": []} +``` + +You wait a little while and try again and it will usually go away. They resolve those types of issue quickly. If it does not go away then it could be the version of Kubernetes. For example, in the error above, v1.11.1 did not have proper images in the google registry. Changing to v1.11.0 fixed it until Google fixed their issue. + +## Kafka + +When running the Ansible automation there is a verification script called `kafka_producer_consumer.py` which creates a topic, produces messages and consumes messages. If the script fails for whatever reason then Ansible verification will report it as an error. An example of an issue is as follows: + +```text +ERROR org.apache.kafka.common.errors.InvalidReplicationFactorException: Replication factor: 1 larger than available brokers: 0. +``` + +This issue is saying the a replication of 1 is being attempted but there are no brokers '0'. This means that the kafka broker(s) are not running any longer. Kafka will start and attempt to establish connections etc. and if unable it will shutdown and log the message. So, when the verification script runs it will not be able to find a local broker (runs on each broker). + +Take a look at syslog/dmesg and run `sudo systemctl status kafka`. Most likely it relates to security (TLS/SSL) and/or network but can also be incorrect settings in the config file `/opt/kafka/config/server.properties`. Correct and rerun the automation. diff --git a/docs/project_layout.md b/docs/project_layout.md new file mode 100644 index 0000000000..31c7d55fb4 --- /dev/null +++ b/docs/project_layout.md @@ -0,0 +1,45 @@ +# Epiphany Project Layout + +## Folders + +### .gitlab + +The `.gitlab` folder is of course, specific to GitLab. It provides several templates used by GitLab to aid in tracking issues and merge requests (a.k.a. pull requests). This folder should only be modified by the project owner since it only deals with workflow so you most likely will never have a need to work with it. + +### architecture + +The `architecture` folder is a very important. It contains everything needed to understand what Epiphany is from an Architectural point of view. The Architectural documentation can be found [here](/architecture/docs/index.md) along with the specifics for the folders in /architecture. + +### assets + +The `assets` folder contains all of the assets used including logos and presentations slides. You should place all of your images in the `images` folder and any specific sub-folder that you feel will help keep things more organized. Also, the `slides` folder holds PowerPoint slides of Epiphany. `Epiphany.pptx` This is a single presentation that contains chapters (sections) along with navigation so that you can use a single presentation to address different audiences. Some of the sections repeat themselves in different ways because of the design so keep that in mind. + +### bin + +The `bin` folder is used to hold executable files such as binaries, scripts etc. This folder should *only* hold items required by Epiphany as a dependency and that is not part of a full project. For example, the `template_engine` executable script handles all of the template features for Epiphany and it's small in size. If you need to include a project like dependency then you would add the package information in the `dependencies` section of the primary Epiphany data file. This dependency would then be pulled down during the pre-build process from where ever you point it to. So, small executable scripts or binaries that are not part of a larger package should be considered to go here. If in doubt then make it a dependency instead. + +### build + +The `build` folder is what it says - it holds the build. + +## Critical Folder + +### core + +The `core` folder holds the core of Epiphany. + +### data + +The `data` folder holds all of the yaml data files. This folder is what is most updated by application developers for their own product needs. Epiphany is fully data driven so making changes here will build out a new version of Epiphany and/or your product (maybe). + +### core-extensions + +The `core-extensions` folder holds additional (extensions) items that could be promoted to `core` but instead is placed here first. + +### examples + +The `examples` folder holds a number of examples of how to use Epiphany. + +### extras + +The `extras` folder contains additional (extra) goodies that may help in your workflow etc. This includes editor examples and more.