From 53ef192e9d93ae0f33f569e56886ffdd994f5bc1 Mon Sep 17 00:00:00 2001 From: Nikita Vaniasin Date: Tue, 24 Oct 2023 08:09:14 +0200 Subject: [PATCH] sync-gh-pages: docs and Helm chart 1.2.34 (#1459) --- README.md | 284 +- docs/CODEOWNERS | 2 + docs/README.md | 11 + docs/api/ArangoDeployment.V1.md | 4080 +++++++++++++++++ docs/api/ArangoMember.V1.md | 69 + docs/bare-metal.md | 524 +++ docs/customer_questions.md | 11 + docs/design/README.md | 30 + docs/design/acceptance_test.md | 533 +++ docs/design/acceptance_test_platforms.md | 13 + docs/design/additional_configuration.md | 17 + docs/design/api.md | 30 + docs/design/arch_change.md | 66 + docs/design/backup.md | 59 + docs/design/configuring_tz.md | 18 + docs/design/constraints.md | 59 + docs/design/dashboard.md | 64 + docs/design/debugging.md | 31 + docs/design/exporter.md | 93 + .../features/deployment_spec_defaults.md | 20 + docs/design/features/ephemeral_volumes.md | 20 + .../features/failover_leader_service.md | 23 + docs/design/features/rebalancer.md | 10 + docs/design/features/rebalancer_v2.md | 25 + .../features/rebuild_out_synced_shards.md | 28 + docs/design/features/secured_containers.md | 28 + docs/design/health.md | 23 + docs/design/lifecycle_hooks_and_finalizers.md | 37 + docs/design/logging.md | 43 + docs/design/maintenance.md | 14 + docs/design/metrics.md | 147 + docs/design/pod_evication_and_replacement.md | 124 + docs/design/pod_name_versus_cluster_id.md | 18 + docs/design/recovery.md | 270 ++ docs/design/resource_and_labels.md | 102 + docs/design/resource_management.md | 22 + docs/design/rotating.md | 13 + docs/design/scaling.md | 28 + docs/design/status.md | 33 + docs/design/test_clusters.md | 16 + docs/design/testing.md | 40 + docs/design/topology_awareness.md | 198 + docs/design/upgrading.md | 32 + docs/generated/actions.md | 188 + docs/generated/metrics/README.md | 38 + ...db_operator_agency_cache_health_present.md | 12 + .../arangodb_operator_agency_cache_healthy.md | 12 + .../arangodb_operator_agency_cache_leaders.md | 13 + ...rator_agency_cache_member_commit_offset.md | 13 + ...db_operator_agency_cache_member_serving.md | 13 + .../arangodb_operator_agency_cache_present.md | 12 + .../arangodb_operator_agency_cache_serving.md | 12 + .../arangodb_operator_agency_errors.md | 12 + .../arangodb_operator_agency_fetches.md | 12 + .../metrics/arangodb_operator_agency_index.md | 12 + .../arangodb_operator_engine_assertions.md | 18 + .../arangodb_operator_engine_ops_alerts.md | 19 + ...angodb_operator_engine_panics_recovered.md | 11 + ...erator_kubernetes_client_request_errors.md | 12 + ...odb_operator_kubernetes_client_requests.md | 12 + ...godb_operator_kubernetes_events_created.md | 13 + ...members_unexpected_container_exit_codes.md | 17 + .../arangodb_operator_rebalancer_enabled.md | 12 + ...ngodb_operator_rebalancer_moves_current.md | 12 + ...angodb_operator_rebalancer_moves_failed.md | 12 + ...odb_operator_rebalancer_moves_generated.md | 12 + ...odb_operator_rebalancer_moves_succeeded.md | 12 + ...tor_resources_arangodeployment_accepted.md | 12 + ...urces_arangodeployment_immutable_errors.md | 12 + ...r_resources_arangodeployment_propagated.md | 12 + ...ources_arangodeployment_status_restores.md | 12 + ...tor_resources_arangodeployment_uptodate.md | 12 + ...rces_arangodeployment_validation_errors.md | 12 + ...rces_arangodeploymentreplication_active.md | 12 + ...rces_arangodeploymentreplication_failed.md | 12 + docs/providers/README.md | 3 + docs/providers/eks/README.md | 26 + index.yaml | 10 +- 78 files changed, 7846 insertions(+), 108 deletions(-) create mode 100644 docs/CODEOWNERS create mode 100644 docs/README.md create mode 100644 docs/api/ArangoDeployment.V1.md create mode 100644 docs/api/ArangoMember.V1.md create mode 100644 docs/bare-metal.md create mode 100644 docs/customer_questions.md create mode 100644 docs/design/README.md create mode 100644 docs/design/acceptance_test.md create mode 100644 docs/design/acceptance_test_platforms.md create mode 100644 docs/design/additional_configuration.md create mode 100644 docs/design/api.md create mode 100644 docs/design/arch_change.md create mode 100644 docs/design/backup.md create mode 100644 docs/design/configuring_tz.md create mode 100644 docs/design/constraints.md create mode 100644 docs/design/dashboard.md create mode 100644 docs/design/debugging.md create mode 100644 docs/design/exporter.md create mode 100644 docs/design/features/deployment_spec_defaults.md create mode 100644 docs/design/features/ephemeral_volumes.md create mode 100644 docs/design/features/failover_leader_service.md create mode 100644 docs/design/features/rebalancer.md create mode 100644 docs/design/features/rebalancer_v2.md create mode 100644 docs/design/features/rebuild_out_synced_shards.md create mode 100644 docs/design/features/secured_containers.md create mode 100644 docs/design/health.md create mode 100644 docs/design/lifecycle_hooks_and_finalizers.md create mode 100644 docs/design/logging.md create mode 100644 docs/design/maintenance.md create mode 100644 docs/design/metrics.md create mode 100644 docs/design/pod_evication_and_replacement.md create mode 100644 docs/design/pod_name_versus_cluster_id.md create mode 100644 docs/design/recovery.md create mode 100644 docs/design/resource_and_labels.md create mode 100644 docs/design/resource_management.md create mode 100644 docs/design/rotating.md create mode 100644 docs/design/scaling.md create mode 100644 docs/design/status.md create mode 100644 docs/design/test_clusters.md create mode 100644 docs/design/testing.md create mode 100644 docs/design/topology_awareness.md create mode 100644 docs/design/upgrading.md create mode 100644 docs/generated/actions.md create mode 100644 docs/generated/metrics/README.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_cache_health_present.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_cache_healthy.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_cache_leaders.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_cache_member_commit_offset.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_cache_member_serving.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_cache_present.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_cache_serving.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_errors.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_fetches.md create mode 100644 docs/generated/metrics/arangodb_operator_agency_index.md create mode 100644 docs/generated/metrics/arangodb_operator_engine_assertions.md create mode 100644 docs/generated/metrics/arangodb_operator_engine_ops_alerts.md create mode 100644 docs/generated/metrics/arangodb_operator_engine_panics_recovered.md create mode 100644 docs/generated/metrics/arangodb_operator_kubernetes_client_request_errors.md create mode 100644 docs/generated/metrics/arangodb_operator_kubernetes_client_requests.md create mode 100644 docs/generated/metrics/arangodb_operator_kubernetes_events_created.md create mode 100644 docs/generated/metrics/arangodb_operator_members_unexpected_container_exit_codes.md create mode 100644 docs/generated/metrics/arangodb_operator_rebalancer_enabled.md create mode 100644 docs/generated/metrics/arangodb_operator_rebalancer_moves_current.md create mode 100644 docs/generated/metrics/arangodb_operator_rebalancer_moves_failed.md create mode 100644 docs/generated/metrics/arangodb_operator_rebalancer_moves_generated.md create mode 100644 docs/generated/metrics/arangodb_operator_rebalancer_moves_succeeded.md create mode 100644 docs/generated/metrics/arangodb_operator_resources_arangodeployment_accepted.md create mode 100644 docs/generated/metrics/arangodb_operator_resources_arangodeployment_immutable_errors.md create mode 100644 docs/generated/metrics/arangodb_operator_resources_arangodeployment_propagated.md create mode 100644 docs/generated/metrics/arangodb_operator_resources_arangodeployment_status_restores.md create mode 100644 docs/generated/metrics/arangodb_operator_resources_arangodeployment_uptodate.md create mode 100644 docs/generated/metrics/arangodb_operator_resources_arangodeployment_validation_errors.md create mode 100644 docs/generated/metrics/arangodb_operator_resources_arangodeploymentreplication_active.md create mode 100644 docs/generated/metrics/arangodb_operator_resources_arangodeploymentreplication_failed.md create mode 100644 docs/providers/README.md create mode 100644 docs/providers/eks/README.md diff --git a/README.md b/README.md index 793662323..96dc2abbd 100644 --- a/README.md +++ b/README.md @@ -6,20 +6,22 @@ ArangoDB Kubernetes Operator helps to run ArangoDB deployments on Kubernetes clusters. To get started, follow the Installation instructions below and/or -read the [tutorial](https://www.arangodb.com/docs/stable/tutorials-kubernetes.html). +read the [tutorial](https://www.arangodb.com/docs/stable/deployment-kubernetes-usage.html). ## State -The ArangoDB Kubernetes Operator is still in **development**. +The ArangoDB Kubernetes Operator is Production ready. -Running ArangoDB deployments (single, active-failover or cluster) -is reasonably stable, and we're in the process of validating -production readiness of various Kubernetes platforms. +[Documentation](https://www.arangodb.com/docs/stable/deployment-kubernetes.html) -The feature set of the ArangoDB Kubernetes Operator is close to what -it is intended to be. +### Limits -[Documentation](./docs/README.md) + +| Limit | Description | Community | Enterprise | +|:-------------------|:-----------------------------------------------------------------------------|:----------|:-----------| +| Cluster size limit | Limits of the nodes (DBServers & Coordinators) supported in the Cluster mode | 64 | 1024 | + + ### Production readiness state @@ -31,101 +33,94 @@ state and over time move to full "production readiness". Operator will supports versions supported on providers and maintained by Kubernetes. Once version is not supported anymore it will go into "Deprecating" state and will be marked as deprecated on Minor release. -Kubernetes versions starting from 1.16 are supported and tested, charts and manifests can use API Versions which are not present in older versions. +Kubernetes versions starting from 1.18 are supported and tested, charts and manifests can use API Versions which are not present in older versions. The following table has the general readiness state, the table below covers individual newer features separately. -| Platform | Kubernetes Version | ArangoDB Version | State | Remarks | Provider Remarks | -|---------------------|--------------------|------------------|------------|-----------------------|------------------------------------| -| Google GKE | 1.17 | >= 3.5.0 | Production | Don't use micro nodes | | -| Google GKE | 1.18 | >= 3.5.0 | Production | Don't use micro nodes | | -| Google GKE | 1.19 | >= 3.5.0 | Production | Don't use micro nodes | | -| Google GKE | 1.20 | >= 3.5.0 | Production | Don't use micro nodes | | -| Azure AKS | 1.18 | >= 3.5.0 | Production | | | -| Azure AKS | 1.19 | >= 3.5.0 | Production | | | -| Azure AKS | 1.20 | >= 3.5.0 | Production | | | -| Amazon EKS | 1.16 | >= 3.5.0 | Production | | [Amazon EKS](./docs/providers/eks) | -| Amazon EKS | 1.17 | >= 3.5.0 | Production | | [Amazon EKS](./docs/providers/eks) | -| Amazon EKS | 1.18 | >= 3.5.0 | Production | | [Amazon EKS](./docs/providers/eks) | -| Amazon EKS | 1.19 | >= 3.5.0 | Production | | [Amazon EKS](./docs/providers/eks) | -| Amazon EKS | 1.20 | >= 3.5.0 | Production | | [Amazon EKS](./docs/providers/eks) | -| IBM Cloud | 1.17 | >= 3.5.0 | Deprecated | | | -| IBM Cloud | 1.18 | >= 3.5.0 | Production | | | -| IBM Cloud | 1.19 | >= 3.5.0 | Production | | | -| IBM Cloud | 1.20 | >= 3.5.0 | Production | | | -| OpenShift | 3.11 | >= 3.5.0 | Production | | | -| OpenShift | 4.2 | >= 3.5.0 | Production | | | -| BareMetal (kubeadm) | 1.16 | >= 3.5.0 | Production | | | -| BareMetal (kubeadm) | 1.17 | >= 3.5.0 | Production | | | -| BareMetal (kubeadm) | 1.18 | >= 3.5.0 | Production | | | -| BareMetal (kubeadm) | 1.19 | >= 3.5.0 | Production | | | -| BareMetal (kubeadm) | 1.20 | >= 3.5.0 | Production | | | -| BareMetal (kubeadm) | 1.21 | >= 3.5.0 | Production | | | -| Minikube | 1.14+ | >= 3.5.0 | Devel Only | | | -| Other | 1.14+ | >= 3.5.0 | Devel Only | | | - -Feature-wise production readiness table: - -| Feature | Operator Version | ArangoDB Version | ArangoDB Edition | State | Enabled | Flag | Remarks | -|-----------------------------------------|------------------|------------------|-----------------------|--------------|---------|--------------------------------------------|--------------------------------------------------------------------------| -| Pod Disruption Budgets | 0.3.10 | Any | Community, Enterprise | Alpha | True | N/A | N/A | -| Pod Disruption Budgets | 0.3.11 | Any | Community, Enterprise | Production | True | N/A | N/A | -| Volume Resizing | 0.3.10 | Any | Community, Enterprise | Alpha | True | N/A | N/A | -| Volume Resizing | 0.3.11 | Any | Community, Enterprise | Production | True | N/A | N/A | -| Disabling of liveness probes | 0.3.10 | Any | Community, Enterprise | Alpha | True | N/A | N/A | -| Disabling of liveness probes | 0.3.11 | Any | Community, Enterprise | Production | True | N/A | N/A | -| Volume Claim Templates | 0.3.11 | Any | Community, Enterprise | Alpha | True | N/A | N/A | -| Volume Claim Templates | 1.0.0 | Any | Community, Enterprise | Production | True | N/A | N/A | -| Prometheus Metrics Exporter | 0.3.11 | Any | Community, Enterprise | Alpha | True | N/A | Prometheus required | -| Prometheus Metrics Exporter | 1.0.0 | Any | Community, Enterprise | Production | True | N/A | Prometheus required | -| Sidecar Containers | 0.3.11 | Any | Community, Enterprise | Alpha | True | N/A | N/A | -| Sidecar Containers | 1.0.0 | Any | Community, Enterprise | Production | True | N/A | N/A | -| Operator Single Mode | 1.0.4 | Any | Community, Enterprise | Production | False | --mode.single | Only 1 instance of Operator allowed in namespace when feature is enabled | -| TLS SNI Support | 1.0.3 | >= 3.7.0 | Enterprise | Production | True | --deployment.feature.tls-sni | N/A | -| TLS Runtime Rotation Support | 1.0.4 | > 3.7.0 | Enterprise | Alpha | False | --deployment.feature.tls-rotation | N/A | -| TLS Runtime Rotation Support | 1.1.0 | > 3.7.0 | Enterprise | Production | True | --deployment.feature.tls-rotation | N/A | -| JWT Rotation Support | 1.0.4 | > 3.7.0 | Enterprise | Alpha | False | --deployment.feature.jwt-rotation | N/A | -| JWT Rotation Support | 1.1.0 | > 3.7.0 | Enterprise | Production | True | --deployment.feature.jwt-rotation | N/A | -| Encryption Key Rotation Support | 1.0.4 | > 3.7.0 | Enterprise | Alpha | False | --deployment.feature.encryption-rotation | N/A | -| Encryption Key Rotation Support | 1.1.0 | > 3.7.0 | Enterprise | Production | True | --deployment.feature.encryption-rotation | N/A | -| Encryption Key Rotation Support | 1.2.0 | > 3.7.0 | Enterprise | NotSupported | False | --deployment.feature.encryption-rotation | N/A | -| Version Check | 1.1.4 | >= 3.6.0 | Community, Enterprise | Alpha | False | --deployment.feature.upgrade-version-check | N/A | -| Operator Maintenance Management Support | 1.0.7 | >= 3.6.0 | Community, Enterprise | Alpha | False | --deployment.feature.maintenance | N/A | -| Operator Maintenance Management Support | 1.2.0 | >= 3.6.0 | Community, Enterprise | Production | True | --deployment.feature.maintenance | N/A | -| Operator Internal Metrics Exporter | 1.1.9 | >= 3.6.0 | Community, Enterprise | Alpha | False | --deployment.feature.metrics-exporter | N/A | -| Operator Internal Metrics Exporter | 1.2.0 | >= 3.6.0 | Community, Enterprise | Production | True | --deployment.feature.metrics-exporter | N/A | -| Operator Internal Metrics Exporter | 1.2.3 | >= 3.6.0 | Community, Enterprise | Production | True | --deployment.feature.metrics-exporter | It is always enabled | -| Operator Ephemeral Volumes | 1.2.2 | >= 3.7.0 | Community, Enterprise | Alpha | False | --deployment.feature.ephemeral-volumes | N/A | - -## Release notes for 0.3.16 - -In this release we have reworked the Helm charts. One notable change is -that we now create a new service account specifically for the operator. -The actual deployment still runs by default under the `default` service -account unless one changes that. Note that the service account under -which the ArangoDB runs needs a small set of extra permissions. For -the `default` service account we grant them when the operator is -deployed. If you use another service account you have to grant these -permissions yourself. See -[here](docs/Manual/Deployment/Kubernetes/DeploymentResource.md#specgroupserviceaccountname-string) -for details. - -## Installation of latest release using Kubectl + +| Platform | Kubernetes Version | ArangoDB Version | State | Remarks | Provider Remarks | +|:--------------------|:-------------------|:-----------------|:-----------|:------------------------------------------|:-----------------------------------| +| Google GKE | 1.21-1.26 | >= 3.6.0 | Production | Don't use micro nodes | | +| Azure AKS | 1.21-1.26 | >= 3.6.0 | Production | | | +| Amazon EKS | 1.21-1.26 | >= 3.6.0 | Production | | [Amazon EKS](./docs/providers/eks) | +| IBM Cloud | 1.17 | >= 3.6.0 | Deprecated | Support will be dropped in Operator 1.5.0 | | +| IBM Cloud | 1.18-1.21 | >= 3.6.0 | Production | | | +| OpenShift | 3.11 | >= 3.6.0 | Deprecated | Support will be dropped in Operator 1.5.0 | | +| OpenShift | 4.2-4.13 | >= 3.6.0 | Production | | | +| BareMetal (kubeadm) | <= 1.20 | >= 3.6.0 | Deprecated | Support will be dropped in Operator 1.5.0 | | +| BareMetal (kubeadm) | 1.21-1.27 | >= 3.6.0 | Production | | | +| Minikube | 1.21-1.27 | >= 3.6.0 | Devel Only | | | +| Other | 1.21-1.27 | >= 3.6.0 | Devel Only | | | + + + +#### Operator Features + + +| Feature | Operator Version | Introduced | ArangoDB Version | ArangoDB Edition | State | Enabled | Flag | Remarks | +|:-------------------------------------------------------------------------------------|:-----------------|:-----------|:-----------------|:----------------------|:-------------|:--------|:------------------------------------------------------|:-----------------------------------------------------------------------------------| +| Enforced ResignLeadership | 1.2.34 | 1.2.34 | >= 3.8.0 | Community, Enterprise | Production | True | --deployment.feature.enforced-resign-leadership | Enforce ResignLeadership and ensure that Leaders are moved from restarted DBServer | +| Copy resources spec to init containers | 1.2.33 | 1.2.33 | >= 3.8.0 | Community, Enterprise | Production | True | --deployment.feature.init-containers-copy-resources | Copy resources spec to built-in init containers if they are not specified | +| [Rebalancer V2](docs/design/features/rebalancer_v2.md) | 1.2.31 | 1.2.31 | >= 3.10.0 | Community, Enterprise | Alpha | False | --deployment.feature.rebalancer-v2 | N/A | +| [Secured containers](docs/design/features/secured_containers.md) | 1.2.31 | 1.2.31 | >= 3.8.0 | Community, Enterprise | Alpha | False | --deployment.feature.secured-containers | If set to True Operator will run containers in secure mode | +| Version Check V2 | 1.2.31 | 1.2.31 | >= 3.8.0 | Community, Enterprise | Alpha | False | --deployment.feature.upgrade-version-check-V2 | N/A | +| [Operator Ephemeral Volumes](docs/design/features/ephemeral_volumes.md) | 1.2.31 | 1.2.2 | >= 3.8.0 | Community, Enterprise | Beta | False | --deployment.feature.ephemeral-volumes | N/A | +| [Force Rebuild Out Synced Shards](docs/design/features/rebuild_out_synced_shards.md) | 1.2.27 | 1.2.27 | >= 3.8.0 | Community, Enterprise | Production | False | --deployment.feature.force-rebuild-out-synced-shards | It should be used only if user is aware of the risks. | +| [Spec Default Restore](docs/design/features/deployment_spec_defaults.md) | 1.2.25 | 1.2.21 | >= 3.8.0 | Community, Enterprise | Beta | True | --deployment.feature.deployment-spec-defaults-restore | If set to False Operator will not change ArangoDeployment Spec | +| Version Check | 1.2.23 | 1.1.4 | >= 3.8.0 | Community, Enterprise | Production | True | --deployment.feature.upgrade-version-check | N/A | +| [Failover Leader service](docs/design/features/failover_leader_service.md) | 1.2.13 | 1.2.13 | >= 3.8.0 | Community, Enterprise | Production | False | --deployment.feature.failover-leadership | N/A | +| Graceful Restart | 1.2.5 | 1.0.7 | >= 3.8.0 | Community, Enterprise | Production | True | ---deployment.feature.graceful-shutdown | N/A | +| Optional Graceful Restart | 1.2.0 | 1.2.5 | >= 3.8.0 | Community, Enterprise | Production | False | --deployment.feature.optional-graceful-shutdown | N/A | +| Operator Internal Metrics Exporter | 1.2.0 | 1.2.0 | >= 3.8.0 | Community, Enterprise | Production | True | --deployment.feature.metrics-exporter | N/A | +| Operator Maintenance Management Support | 1.2.0 | 1.0.7 | >= 3.8.0 | Community, Enterprise | Production | True | --deployment.feature.maintenance | N/A | +| Encryption Key Rotation Support | 1.2.0 | 1.0.3 | >= 3.8.0 | Enterprise | NotSupported | False | --deployment.feature.encryption-rotation | N/A | +| TLS Runtime Rotation Support | 1.1.0 | 1.0.4 | >= 3.8.0 | Enterprise | Production | True | --deployment.feature.tls-rotation | N/A | +| JWT Rotation Support | 1.1.0 | 1.0.3 | >= 3.8.0 | Enterprise | Production | True | --deployment.feature.jwt-rotation | N/A | +| Operator Single Mode | 1.0.4 | 1.0.4 | >= 3.8.0 | Community, Enterprise | Production | False | --mode.single | Only 1 instance of Operator allowed in namespace when feature is enabled | +| TLS SNI Support | 1.0.3 | 1.0.3 | >= 3.8.0 | Enterprise | Production | True | --deployment.feature.tls-sni | N/A | +| Disabling of liveness probes | 0.3.11 | 0.3.10 | >= 3.8.0 | Community, Enterprise | Production | True | N/A | N/A | +| Pod Disruption Budgets | 0.3.11 | 0.3.10 | >= 3.8.0 | Community, Enterprise | Production | True | N/A | N/A | +| Prometheus Metrics Exporter | 0.3.11 | 0.3.10 | >= 3.8.0 | Community, Enterprise | Production | True | N/A | Prometheus required | +| Sidecar Containers | 0.3.11 | 0.3.10 | >= 3.8.0 | Community, Enterprise | Production | True | N/A | N/A | +| Volume Claim Templates | 0.3.11 | 0.3.10 | >= 3.8.0 | Community, Enterprise | Production | True | N/A | N/A | +| Volume Resizing | 0.3.11 | 0.3.10 | >= 3.8.0 | Community, Enterprise | Production | True | N/A | N/A | + + + +#### Operator Enterprise Only Features + +To upgrade to the Enterprise Edition, you need to get in touch with the ArangoDB team. [Contact us](https://www.arangodb.com/contact/) for more details. + + +| Feature | Operator Version | Introduced | ArangoDB Version | ArangoDB Edition | State | Enabled | Flag | Remarks | +|:-------------------------------------------------------|:-----------------|:-----------|:-----------------|:-----------------|:-----------|:--------|:-----|:----------------------------------------------------------------------------| +| AgencyCache | 1.2.30 | 1.2.30 | >= 3.8.0 | Enterprise | Production | True | N/A | Enable Agency Cache mechanism in the Operator (Increase limit of the nodes) | +| Member Maintenance Support | 1.2.25 | 1.2.16 | >= 3.8.0 | Enterprise | Production | True | N/A | Enable Member Maintenance during planned restarts | +| [Rebalancer](docs/design/features/rebalancer.md) | 1.2.15 | 1.2.5 | >= 3.8.0 | Enterprise | Production | True | N/A | N/A | +| [TopologyAwareness](docs/design/topology_awareness.md) | 1.2.4 | 1.2.4 | >= 3.8.0 | Enterprise | Production | True | N/A | N/A | + + + +## Operator Community Edition (CE) + +Image: `arangodb/kube-arangodb:1.2.34` + +### Installation of latest CE release using Kubectl ```bash -kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.3/manifests/arango-crd.yaml -kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.3/manifests/arango-deployment.yaml +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.34/manifests/arango-crd.yaml +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.34/manifests/arango-deployment.yaml # To use `ArangoLocalStorage`, also run -kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.3/manifests/arango-storage.yaml +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.34/manifests/arango-storage.yaml # To use `ArangoDeploymentReplication`, also run -kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.3/manifests/arango-deployment-replication.yaml +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.34/manifests/arango-deployment-replication.yaml ``` This procedure can also be used for upgrades and will not harm any running ArangoDB deployments. -## Installation of latest release using kustomize +### Installation of latest CE release using kustomize Installation using [kustomize](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/) looks like installation from yaml files, but user is allowed to modify namespace or resource names without yaml modifications. @@ -144,22 +139,20 @@ bases: - https://github.com/arangodb/kube-arangodb/manifests/kustomize/deployment/?ref=1.0.3 ``` -## Installation of latest release using Helm +### Installation of latest CE release using Helm Only use this procedure for a new install of the operator. See below for upgrades. ```bash -# The following will install the custom resources required by the operators. -helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.3/kube-arangodb-crd-1.2.3.tgz # The following will install the operator for `ArangoDeployment` & # `ArangoDeploymentReplication` resources. -helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.3/kube-arangodb-1.2.3.tgz +helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.34/kube-arangodb-1.2.34.tgz # To use `ArangoLocalStorage`, set field `operator.features.storage` to true -helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.3/kube-arangodb-1.2.3.tgz --set "operator.features.storage=true" +helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.34/kube-arangodb-1.2.34.tgz --set "operator.features.storage=true" ``` -## Upgrading the operator using Helm +### Upgrading the operator using Helm To upgrade the operator to the latest version with Helm, you have to delete the previous deployment and then install the latest. **HOWEVER**: @@ -175,7 +168,6 @@ list` output: ``` % helm list NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE -steely-mule 1 Sun Mar 31 21:11:07 2019 DEPLOYED kube-arangodb-crd-0.3.9 default vetoed-ladybird 1 Mon Apr 8 11:36:58 2019 DEPLOYED kube-arangodb-0.3.10-preview default ``` @@ -191,9 +183,95 @@ with `helm install` as normal: ```bash # The following will install the operator for `ArangoDeployment` & # `ArangoDeploymentReplication` resources. -helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.3/kube-arangodb-1.2.3.tgz +helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.34/kube-arangodb-1.2.34.tgz # To use `ArangoLocalStorage`, set field `operator.features.storage` to true -helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.3/kube-arangodb-1.2.3.tgz --set "operator.features.storage=true" +helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.34/kube-arangodb-1.2.34.tgz --set "operator.features.storage=true" +``` + +## Operator Enterprise Edition (EE) + +Image: `arangodb/kube-arangodb-enterprise:1.2.34` + +### Installation of latest EE release using Kubectl + +```bash +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.34/manifests/enterprise-crd.yaml +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.34/manifests/enterprise-deployment.yaml +# To use `ArangoLocalStorage`, also run +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.34/manifests/enterprise-storage.yaml +# To use `ArangoDeploymentReplication`, also run +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.2.34/manifests/enterprise-deployment-replication.yaml +``` + +This procedure can also be used for upgrades and will not harm any +running ArangoDB deployments. + +### Installation of latest EE release using kustomize + +Installation using [kustomize](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/) looks like installation from yaml files, +but user is allowed to modify namespace or resource names without yaml modifications. + +IT is recommended to use kustomization instead of handcrafting namespace in yaml files - kustomization will replace not only resource namespaces, +but also namespace references in resources like ClusterRoleBinding. + +Example kustomization file: +``` +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: my-custom-namespace + +bases: + - https://github.com/arangodb/kube-arangodb/manifests/kustomize-enterprise/deployment/?ref=1.0.3 +``` + +### Installation of latest EE release using Helm + +Only use this procedure for a new install of the operator. See below for +upgrades. + +```bash +# The following will install the operator for `ArangoDeployment` & +# `ArangoDeploymentReplication` resources. +helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.34/kube-arangodb-1.2.34.tgz --set "operator.image=arangodb/kube-arangodb-enterprise:1.2.34" +# To use `ArangoLocalStorage`, set field `operator.features.storage` to true +helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.34/kube-arangodb-1.2.34.tgz --set "operator.image=arangodb/kube-arangodb-enterprise:1.2.34" --set "operator.features.storage=true" +``` + +### Upgrading the operator using Helm + +To upgrade the operator to the latest version with Helm, you have to +delete the previous deployment and then install the latest. **HOWEVER**: +You *must not delete* the deployment of the custom resource definitions +(CRDs), or your ArangoDB deployments will be deleted! + +Therefore, you have to use `helm list` to find the deployments for the +operator (`kube-arangodb`) and of the storage operator +(`kube-arangodb-storage`) and use `helm delete` to delete them using the +automatically generated deployment names. Here is an example of a `helm +list` output: + +``` +% helm list +NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE +vetoed-ladybird 1 Mon Apr 8 11:36:58 2019 DEPLOYED kube-arangodb-0.3.10-preview default +``` + +So here, you would have to do + +```bash +helm delete vetoed-ladybird +``` + +but **not delete `steely-mule`**. Then you could install the new version +with `helm install` as normal: + +```bash +# The following will install the operator for `ArangoDeployment` & +# `ArangoDeploymentReplication` resources. +helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.34/kube-arangodb-1.2.34.tgz --set "operator.image=arangodb/kube-arangodb-enterprise:1.2.34" +# To use `ArangoLocalStorage`, set field `operator.features.storage` to true +helm install https://github.com/arangodb/kube-arangodb/releases/download/1.2.34/kube-arangodb-1.2.34.tgz --set "operator.image=arangodb/kube-arangodb-enterprise:1.2.34" --set "operator.features.storage=true" ``` ## Building @@ -206,11 +284,3 @@ kubectl apply -f manifests/arango-storage-dev.yaml # To use `ArangoDeploymentReplication`, also run kubectl apply -f manifests/arango-deployment-replication-dev.yaml ``` - -## ArangoExporter - -[ArangoExporter](https://github.com/arangodb-helper/arangodb-exporter) project has been merged with ArangoOperator. -Starting from ArangoDB 3.6 Servers expose metrics endpoint with prometheus compatible format. From this point Exporter -is used only for TLS and/or Authentication termination to be compatible with all Prometheus installations. - -ArangoExporter documentation can be found [here](./docs/design/exporter.md) \ No newline at end of file diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS new file mode 100644 index 000000000..09f301439 --- /dev/null +++ b/docs/CODEOWNERS @@ -0,0 +1,2 @@ +# This team will own the entire repository +* @arangodb/team-golang diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..4bb0b7274 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,11 @@ +# ArangoDB Kubernetes Operator + +- [Tutorial](https://www.arangodb.com/docs/stable/tutorials-kubernetes.html) +- [Documentation](https://www.arangodb.com/docs/stable/deployment-kubernetes.html) +- [Design documents](./design/README.md) +- [Providers](./providers/README.md) + + +# ArangoDB Kubernetes Operator Generated Documentation +- [ArangoDB Operator Metrics & Alerts](./generated/metrics/README.md) +- [ArangoDB Actions](./generated/actions.md) diff --git a/docs/api/ArangoDeployment.V1.md b/docs/api/ArangoDeployment.V1.md new file mode 100644 index 000000000..8e2631c91 --- /dev/null +++ b/docs/api/ArangoDeployment.V1.md @@ -0,0 +1,4080 @@ +# API Reference for ArangoDeployment V1 + +## Spec + +### .spec.agents.affinity: core.PodAffinity + +Affinity specified additional affinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.PodAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L134) + +### .spec.agents.allowMemberRecreation: bool + +AllowMemberRecreation allows to recreate member. Value is used only for Coordinator and DBServer with default to True, for all other groups set to false. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L171) + +### .spec.agents.annotations: map[string]string + +Annotations specified the annotations added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L98) + +### .spec.agents.annotationsIgnoreList: []string + +AnnotationsIgnoreList list regexp or plain definitions which annotations should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L100) + +### .spec.agents.annotationsMode: string + +AnnotationsMode Define annotations mode which should be use while overriding annotations + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L102) + +### .spec.agents.antiAffinity: core.PodAntiAffinity + +AntiAffinity specified additional antiAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.Pod.AntiAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podantiaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L130) + +### .spec.agents.args: []string + +Args holds additional commandline arguments + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L78) + +### .spec.agents.count: int + +Count holds the requested number of servers + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L72) + +### .spec.agents.entrypoint: string + +Entrypoint overrides container executable + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L80) + +### .spec.agents.envs\[int\].name: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L26) + +### .spec.agents.envs\[int\].value: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L27) + +### .spec.agents.ephemeralVolumes.apps.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.agents.ephemeralVolumes.temp.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.agents.exporterPort: uint16 + +ExporterPort define Port used by exporter + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L184) + +### .spec.agents.extendedRotationCheck: bool + +ExtendedRotationCheck extend checks for rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L157) + +### .spec.agents.externalPortEnabled: bool + +ExternalPortEnabled if external port should be enabled. If is set to false, ports needs to be exposed via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L169) + +### .spec.agents.indexMethod: string + +IndexMethod define group Indexing method + +Possible Values: +* random (default) - Pick random ID for member. Enforced on the Community Operator. +* ordered - Use sequential number as Member ID, starting from 0. Enterprise Operator required. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L177) + +### .spec.agents.initContainers.containers: []core.Container + +Containers contains list of containers + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L91) + +### .spec.agents.initContainers.mode: string + +Mode keep container replace mode + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L94) + +### .spec.agents.internalPort: int + +InternalPort define port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L165) + +### .spec.agents.internalPortProtocol: string + +InternalPortProtocol define protocol of port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L167) + +### .spec.agents.labels: map[string]string + +Labels specified the labels added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L104) + +### .spec.agents.labelsIgnoreList: []string + +LabelsIgnoreList list regexp or plain definitions which labels should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L106) + +### .spec.agents.labelsMode: string + +LabelsMode Define labels mode which should be use while overriding labels + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L108) + +### .spec.agents.maxCount: int + +MaxCount specifies a upper limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L76) + +### .spec.agents.minCount: int + +MinCount specifies a lower limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L74) + +### .spec.agents.nodeAffinity: core.NodeAffinity + +NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of code.NodeAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#nodeaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L138) + +### .spec.agents.nodeSelector: map[string]string + +NodeSelector speficies a set of selectors for nodes + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L114) + +### .spec.agents.numactl.args: []string + +Args define list of the numactl process + +Default Value: [] + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L38) + +### .spec.agents.numactl.enabled: bool + +Enabled define if numactl should be enabled + +Default Value: false + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L30) + +### .spec.agents.numactl.path: string + +Path define numactl path within the container + +Default Value: /usr/bin/numactl + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L34) + +### .spec.agents.overrideDetectedNumberOfCores: bool + +OverrideDetectedNumberOfCores determines if number of cores should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L92) + +### .spec.agents.overrideDetectedTotalMemory: bool + +OverrideDetectedTotalMemory determines if memory should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L90) + +### .spec.agents.podModes.network: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L31) + +### .spec.agents.podModes.pid: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L32) + +### .spec.agents.port: uint16 + +Port define Port used by member + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L182) + +### .spec.agents.priorityClassName: string + +PriorityClassName specifies a priority class name + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L118) + +### .spec.agents.probes.livenessProbeDisabled: bool + +LivenessProbeDisabled if true livenessProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L193) + +### .spec.agents.probes.livenessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.agents.probes.livenessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.agents.probes.livenessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.agents.probes.livenessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.agents.probes.livenessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.agents.probes.ReadinessProbeDisabled: bool + +OldReadinessProbeDisabled if true readinessProbes are disabled +Deprecated: This field is deprecated, keept only for backward compatibility. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L200) + +### .spec.agents.probes.readinessProbeDisabled: bool + +ReadinessProbeDisabled override flag for probe disabled in good manner (lowercase) with backward compatibility + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L202) + +### .spec.agents.probes.readinessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.agents.probes.readinessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.agents.probes.readinessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.agents.probes.readinessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.agents.probes.readinessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.agents.probes.startupProbeDisabled: bool + +StartupProbeDisabled if true startupProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L207) + +### .spec.agents.probes.startupProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.agents.probes.startupProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.agents.probes.startupProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.agents.probes.startupProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.agents.probes.startupProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.agents.pvcResizeMode: string + +VolumeResizeMode specified resize mode for pvc + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L124) + +### .spec.agents.resources: core.ResourceRequirements + +Resources holds resource requests & limits + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L88) + +### .spec.agents.schedulerName: string + +SchedulerName define scheduler name used for group + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L82) + +### .spec.agents.securityContext.addCapabilities: []string + +AddCapabilities add new capabilities to containers + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L45) + +### .spec.agents.securityContext.allowPrivilegeEscalation: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L47) + +### .spec.agents.securityContext.dropAllCapabilities: bool + +DropAllCapabilities specifies if capabilities should be dropped for this pod containers +Deprecated: This field is added for backward compatibility. Will be removed in 1.1.0. + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L43) + +### .spec.agents.securityContext.fsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L55) + +### .spec.agents.securityContext.privileged: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L48) + +### .spec.agents.securityContext.readOnlyRootFilesystem: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L49) + +### .spec.agents.securityContext.runAsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L52) + +### .spec.agents.securityContext.runAsNonRoot: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L50) + +### .spec.agents.securityContext.runAsUser: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L51) + +### .spec.agents.securityContext.seccompProfile: core.SeccompProfile + +SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. + +Links: +* [Documentation of core.SeccompProfile](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#seccompprofile-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L71) + +### .spec.agents.securityContext.seLinuxOptions: core.SELinuxOptions + +SELinuxOptions are the labels to be applied to the container + +Links: +* [Documentation of core.SELinuxOptions](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#selinuxoptions-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L76) + +### .spec.agents.securityContext.supplementalGroups: []int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L54) + +### .spec.agents.securityContext.sysctls: map[string]intstr.IntOrString + +Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported +sysctls (by the container runtime) might fail to launch. +Map Value can be String or Int + +Links: +* [Documentation](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/) + +Example: +```yaml +sysctls: + "kernel.shm_rmid_forced": "0" + "net.core.somaxconn": 1024 + "kernel.msgmax": "65536" +``` + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L66) + +### .spec.agents.serviceAccountName: string + +ServiceAccountName specifies the name of the service account used for Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L112) + +### .spec.agents.shutdownDelay: int + +ShutdownDelay define how long operator should delay finalizer removal after shutdown + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L163) + +### .spec.agents.shutdownMethod: string + +ShutdownMethod describe procedure of member shutdown taken by Operator + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L161) + +### .spec.agents.sidecarCoreNames: []string + +SidecarCoreNames is a list of sidecar containers which must run in the pod. +Some names (e.g.: "server", "worker") are reserved, and they don't have any impact. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L141) + +### .spec.agents.sidecars: []core.Container + +Sidecars specifies a list of additional containers to be started + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L145) + +### .spec.agents.storageClassName: string + +StorageClassName specifies the classname for storage of the servers. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L84) + +### .spec.agents.terminationGracePeriodSeconds: int64 + +TerminationGracePeriodSeconds override default TerminationGracePeriodSeconds for pods - via silent rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L173) + +### .spec.agents.tolerations: []core.Toleration + +Tolerations specifies the tolerations added to Pods in this group. + +Links: +* [Documentation of core.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#toleration-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L96) + +### .spec.agents.volumeAllowShrink: bool + +Deprecated: VolumeAllowShrink allows shrink the volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L126) + +### .spec.agents.volumeClaimTemplate: core.PersistentVolumeClaim + +VolumeClaimTemplate specifies a template for volume claims + +Links: +* [Documentation of core.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaim-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L122) + +### .spec.agents.volumeMounts: []ServerGroupSpecVolumeMount + +VolumeMounts define list of volume mounts mounted into server container + +Links: +* [Documentation of ServerGroupSpecVolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#volumemount-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L153) + +### .spec.agents.volumes\[int\].configMap: core.ConfigMapVolumeSource + +ConfigMap which should be mounted into pod + +Links: +* [Documentation of core.ConfigMapVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#configmapvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L138) + +### .spec.agents.volumes\[int\].emptyDir: core.EmptyDirVolumeSource + +EmptyDir + +Links: +* [Documentation of core.EmptyDirVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#emptydirvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L143) + +### .spec.agents.volumes\[int\].hostPath: core.HostPathVolumeSource + +HostPath + +Links: +* [Documentation of core.HostPathVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#hostpathvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L148) + +### .spec.agents.volumes\[int\].name: string + +Name of volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L128) + +### .spec.agents.volumes\[int\].persistentVolumeClaim: core.PersistentVolumeClaimVolumeSource + +PersistentVolumeClaim + +Links: +* [Documentation of core.PersistentVolumeClaimVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaimvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L153) + +### .spec.agents.volumes\[int\].secret: core.SecretVolumeSource + +Secret which should be mounted into pod + +Links: +* [Documentation of core.SecretVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L133) + +### .spec.allowUnsafeUpgrade: bool + +AllowUnsafeUpgrade determines if upgrade on missing member or with not in sync shards is allowed + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L163) + +### .spec.annotations: map[string]string + +Annotations specifies the annotations added to all ArangoDeployment owned resources (pods, services, PVC’s, PDB’s). + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L126) + +### .spec.annotationsIgnoreList: []string + +AnnotationsIgnoreList list regexp or plain definitions which annotations should be ignored + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L129) + +### .spec.annotationsMode: string + +AnnotationsMode defines annotations mode which should be use while overriding annotations. + +Possible Values: +* disabled (default) - Disable annotations/labels override. Default if there is no annotations/labels set in ArangoDeployment +* append - Add new annotations/labels without affecting old ones +* replace - Replace existing annotations/labels + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L135) + +### .spec.architecture: []string + +Architecture defines the list of supported architectures. +First element on the list is marked as default architecture. + +Links: +* [Architecture Change](/docs/design/arch_change.md) + +Default Value: ['amd64'] + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L258) + +### .spec.auth.jwtSecretName: string + +[Code Reference](/pkg/apis/deployment/v1/authentication_spec.go#L31) + +### .spec.bootstrap.passwordSecretNames: map[string]string + +PasswordSecretNames contains a map of username to password-secret-name + +[Code Reference](/pkg/apis/deployment/v1/bootstrap.go#L53) + +### .spec.chaos.enabled: bool + +Enabled switches the chaos monkey for a deployment on or off. + +[Code Reference](/pkg/apis/deployment/v1/chaos_spec.go#L33) + +### .spec.chaos.interval: int64 + +Interval is the time between events + +[Code Reference](/pkg/apis/deployment/v1/chaos_spec.go#L35) + +### .spec.chaos.kill-pod-probability: int + +KillPodProbability is the chance of a pod being killed during an event + +[Code Reference](/pkg/apis/deployment/v1/chaos_spec.go#L37) + +### .spec.ClusterDomain: string + +ClusterDomain define domain used in the kubernetes cluster. +Required only of domain is not set to default (cluster.local) + +Default Value: cluster.local + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L237) + +### .spec.communicationMethod: string + +CommunicationMethod define communication method used in deployment + +Possible Values: +* headless (default) - Define old communication mechanism, based on headless service. +* dns - Define ClusterIP Service DNS based communication. +* short-dns - Define ClusterIP Service DNS based communication. Use namespaced short DNS (used in migration) +* headless-dns - Define Headless Service DNS based communication. +* ip - Define ClusterIP Service IP based communication. + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L245) + +### .spec.coordinators.affinity: core.PodAffinity + +Affinity specified additional affinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.PodAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L134) + +### .spec.coordinators.allowMemberRecreation: bool + +AllowMemberRecreation allows to recreate member. Value is used only for Coordinator and DBServer with default to True, for all other groups set to false. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L171) + +### .spec.coordinators.annotations: map[string]string + +Annotations specified the annotations added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L98) + +### .spec.coordinators.annotationsIgnoreList: []string + +AnnotationsIgnoreList list regexp or plain definitions which annotations should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L100) + +### .spec.coordinators.annotationsMode: string + +AnnotationsMode Define annotations mode which should be use while overriding annotations + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L102) + +### .spec.coordinators.antiAffinity: core.PodAntiAffinity + +AntiAffinity specified additional antiAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.Pod.AntiAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podantiaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L130) + +### .spec.coordinators.args: []string + +Args holds additional commandline arguments + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L78) + +### .spec.coordinators.count: int + +Count holds the requested number of servers + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L72) + +### .spec.coordinators.entrypoint: string + +Entrypoint overrides container executable + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L80) + +### .spec.coordinators.envs\[int\].name: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L26) + +### .spec.coordinators.envs\[int\].value: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L27) + +### .spec.coordinators.ephemeralVolumes.apps.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.coordinators.ephemeralVolumes.temp.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.coordinators.exporterPort: uint16 + +ExporterPort define Port used by exporter + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L184) + +### .spec.coordinators.extendedRotationCheck: bool + +ExtendedRotationCheck extend checks for rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L157) + +### .spec.coordinators.externalPortEnabled: bool + +ExternalPortEnabled if external port should be enabled. If is set to false, ports needs to be exposed via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L169) + +### .spec.coordinators.indexMethod: string + +IndexMethod define group Indexing method + +Possible Values: +* random (default) - Pick random ID for member. Enforced on the Community Operator. +* ordered - Use sequential number as Member ID, starting from 0. Enterprise Operator required. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L177) + +### .spec.coordinators.initContainers.containers: []core.Container + +Containers contains list of containers + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L91) + +### .spec.coordinators.initContainers.mode: string + +Mode keep container replace mode + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L94) + +### .spec.coordinators.internalPort: int + +InternalPort define port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L165) + +### .spec.coordinators.internalPortProtocol: string + +InternalPortProtocol define protocol of port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L167) + +### .spec.coordinators.labels: map[string]string + +Labels specified the labels added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L104) + +### .spec.coordinators.labelsIgnoreList: []string + +LabelsIgnoreList list regexp or plain definitions which labels should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L106) + +### .spec.coordinators.labelsMode: string + +LabelsMode Define labels mode which should be use while overriding labels + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L108) + +### .spec.coordinators.maxCount: int + +MaxCount specifies a upper limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L76) + +### .spec.coordinators.minCount: int + +MinCount specifies a lower limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L74) + +### .spec.coordinators.nodeAffinity: core.NodeAffinity + +NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of code.NodeAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#nodeaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L138) + +### .spec.coordinators.nodeSelector: map[string]string + +NodeSelector speficies a set of selectors for nodes + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L114) + +### .spec.coordinators.numactl.args: []string + +Args define list of the numactl process + +Default Value: [] + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L38) + +### .spec.coordinators.numactl.enabled: bool + +Enabled define if numactl should be enabled + +Default Value: false + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L30) + +### .spec.coordinators.numactl.path: string + +Path define numactl path within the container + +Default Value: /usr/bin/numactl + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L34) + +### .spec.coordinators.overrideDetectedNumberOfCores: bool + +OverrideDetectedNumberOfCores determines if number of cores should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L92) + +### .spec.coordinators.overrideDetectedTotalMemory: bool + +OverrideDetectedTotalMemory determines if memory should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L90) + +### .spec.coordinators.podModes.network: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L31) + +### .spec.coordinators.podModes.pid: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L32) + +### .spec.coordinators.port: uint16 + +Port define Port used by member + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L182) + +### .spec.coordinators.priorityClassName: string + +PriorityClassName specifies a priority class name + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L118) + +### .spec.coordinators.probes.livenessProbeDisabled: bool + +LivenessProbeDisabled if true livenessProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L193) + +### .spec.coordinators.probes.livenessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.coordinators.probes.livenessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.coordinators.probes.livenessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.coordinators.probes.livenessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.coordinators.probes.livenessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.coordinators.probes.ReadinessProbeDisabled: bool + +OldReadinessProbeDisabled if true readinessProbes are disabled +Deprecated: This field is deprecated, keept only for backward compatibility. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L200) + +### .spec.coordinators.probes.readinessProbeDisabled: bool + +ReadinessProbeDisabled override flag for probe disabled in good manner (lowercase) with backward compatibility + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L202) + +### .spec.coordinators.probes.readinessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.coordinators.probes.readinessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.coordinators.probes.readinessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.coordinators.probes.readinessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.coordinators.probes.readinessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.coordinators.probes.startupProbeDisabled: bool + +StartupProbeDisabled if true startupProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L207) + +### .spec.coordinators.probes.startupProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.coordinators.probes.startupProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.coordinators.probes.startupProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.coordinators.probes.startupProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.coordinators.probes.startupProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.coordinators.pvcResizeMode: string + +VolumeResizeMode specified resize mode for pvc + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L124) + +### .spec.coordinators.resources: core.ResourceRequirements + +Resources holds resource requests & limits + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L88) + +### .spec.coordinators.schedulerName: string + +SchedulerName define scheduler name used for group + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L82) + +### .spec.coordinators.securityContext.addCapabilities: []string + +AddCapabilities add new capabilities to containers + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L45) + +### .spec.coordinators.securityContext.allowPrivilegeEscalation: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L47) + +### .spec.coordinators.securityContext.dropAllCapabilities: bool + +DropAllCapabilities specifies if capabilities should be dropped for this pod containers +Deprecated: This field is added for backward compatibility. Will be removed in 1.1.0. + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L43) + +### .spec.coordinators.securityContext.fsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L55) + +### .spec.coordinators.securityContext.privileged: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L48) + +### .spec.coordinators.securityContext.readOnlyRootFilesystem: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L49) + +### .spec.coordinators.securityContext.runAsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L52) + +### .spec.coordinators.securityContext.runAsNonRoot: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L50) + +### .spec.coordinators.securityContext.runAsUser: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L51) + +### .spec.coordinators.securityContext.seccompProfile: core.SeccompProfile + +SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. + +Links: +* [Documentation of core.SeccompProfile](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#seccompprofile-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L71) + +### .spec.coordinators.securityContext.seLinuxOptions: core.SELinuxOptions + +SELinuxOptions are the labels to be applied to the container + +Links: +* [Documentation of core.SELinuxOptions](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#selinuxoptions-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L76) + +### .spec.coordinators.securityContext.supplementalGroups: []int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L54) + +### .spec.coordinators.securityContext.sysctls: map[string]intstr.IntOrString + +Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported +sysctls (by the container runtime) might fail to launch. +Map Value can be String or Int + +Links: +* [Documentation](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/) + +Example: +```yaml +sysctls: + "kernel.shm_rmid_forced": "0" + "net.core.somaxconn": 1024 + "kernel.msgmax": "65536" +``` + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L66) + +### .spec.coordinators.serviceAccountName: string + +ServiceAccountName specifies the name of the service account used for Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L112) + +### .spec.coordinators.shutdownDelay: int + +ShutdownDelay define how long operator should delay finalizer removal after shutdown + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L163) + +### .spec.coordinators.shutdownMethod: string + +ShutdownMethod describe procedure of member shutdown taken by Operator + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L161) + +### .spec.coordinators.sidecarCoreNames: []string + +SidecarCoreNames is a list of sidecar containers which must run in the pod. +Some names (e.g.: "server", "worker") are reserved, and they don't have any impact. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L141) + +### .spec.coordinators.sidecars: []core.Container + +Sidecars specifies a list of additional containers to be started + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L145) + +### .spec.coordinators.storageClassName: string + +StorageClassName specifies the classname for storage of the servers. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L84) + +### .spec.coordinators.terminationGracePeriodSeconds: int64 + +TerminationGracePeriodSeconds override default TerminationGracePeriodSeconds for pods - via silent rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L173) + +### .spec.coordinators.tolerations: []core.Toleration + +Tolerations specifies the tolerations added to Pods in this group. + +Links: +* [Documentation of core.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#toleration-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L96) + +### .spec.coordinators.volumeAllowShrink: bool + +Deprecated: VolumeAllowShrink allows shrink the volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L126) + +### .spec.coordinators.volumeClaimTemplate: core.PersistentVolumeClaim + +VolumeClaimTemplate specifies a template for volume claims + +Links: +* [Documentation of core.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaim-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L122) + +### .spec.coordinators.volumeMounts: []ServerGroupSpecVolumeMount + +VolumeMounts define list of volume mounts mounted into server container + +Links: +* [Documentation of ServerGroupSpecVolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#volumemount-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L153) + +### .spec.coordinators.volumes\[int\].configMap: core.ConfigMapVolumeSource + +ConfigMap which should be mounted into pod + +Links: +* [Documentation of core.ConfigMapVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#configmapvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L138) + +### .spec.coordinators.volumes\[int\].emptyDir: core.EmptyDirVolumeSource + +EmptyDir + +Links: +* [Documentation of core.EmptyDirVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#emptydirvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L143) + +### .spec.coordinators.volumes\[int\].hostPath: core.HostPathVolumeSource + +HostPath + +Links: +* [Documentation of core.HostPathVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#hostpathvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L148) + +### .spec.coordinators.volumes\[int\].name: string + +Name of volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L128) + +### .spec.coordinators.volumes\[int\].persistentVolumeClaim: core.PersistentVolumeClaimVolumeSource + +PersistentVolumeClaim + +Links: +* [Documentation of core.PersistentVolumeClaimVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaimvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L153) + +### .spec.coordinators.volumes\[int\].secret: core.SecretVolumeSource + +Secret which should be mounted into pod + +Links: +* [Documentation of core.SecretVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L133) + +### .spec.database.maintenance: bool + +Maintenance manage maintenance mode on Cluster side. Requires maintenance feature to be enabled + +[Code Reference](/pkg/apis/deployment/v1/database_spec.go#L25) + +### .spec.dbservers.affinity: core.PodAffinity + +Affinity specified additional affinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.PodAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L134) + +### .spec.dbservers.allowMemberRecreation: bool + +AllowMemberRecreation allows to recreate member. Value is used only for Coordinator and DBServer with default to True, for all other groups set to false. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L171) + +### .spec.dbservers.annotations: map[string]string + +Annotations specified the annotations added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L98) + +### .spec.dbservers.annotationsIgnoreList: []string + +AnnotationsIgnoreList list regexp or plain definitions which annotations should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L100) + +### .spec.dbservers.annotationsMode: string + +AnnotationsMode Define annotations mode which should be use while overriding annotations + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L102) + +### .spec.dbservers.antiAffinity: core.PodAntiAffinity + +AntiAffinity specified additional antiAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.Pod.AntiAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podantiaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L130) + +### .spec.dbservers.args: []string + +Args holds additional commandline arguments + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L78) + +### .spec.dbservers.count: int + +Count holds the requested number of servers + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L72) + +### .spec.dbservers.entrypoint: string + +Entrypoint overrides container executable + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L80) + +### .spec.dbservers.envs\[int\].name: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L26) + +### .spec.dbservers.envs\[int\].value: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L27) + +### .spec.dbservers.ephemeralVolumes.apps.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.dbservers.ephemeralVolumes.temp.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.dbservers.exporterPort: uint16 + +ExporterPort define Port used by exporter + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L184) + +### .spec.dbservers.extendedRotationCheck: bool + +ExtendedRotationCheck extend checks for rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L157) + +### .spec.dbservers.externalPortEnabled: bool + +ExternalPortEnabled if external port should be enabled. If is set to false, ports needs to be exposed via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L169) + +### .spec.dbservers.indexMethod: string + +IndexMethod define group Indexing method + +Possible Values: +* random (default) - Pick random ID for member. Enforced on the Community Operator. +* ordered - Use sequential number as Member ID, starting from 0. Enterprise Operator required. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L177) + +### .spec.dbservers.initContainers.containers: []core.Container + +Containers contains list of containers + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L91) + +### .spec.dbservers.initContainers.mode: string + +Mode keep container replace mode + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L94) + +### .spec.dbservers.internalPort: int + +InternalPort define port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L165) + +### .spec.dbservers.internalPortProtocol: string + +InternalPortProtocol define protocol of port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L167) + +### .spec.dbservers.labels: map[string]string + +Labels specified the labels added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L104) + +### .spec.dbservers.labelsIgnoreList: []string + +LabelsIgnoreList list regexp or plain definitions which labels should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L106) + +### .spec.dbservers.labelsMode: string + +LabelsMode Define labels mode which should be use while overriding labels + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L108) + +### .spec.dbservers.maxCount: int + +MaxCount specifies a upper limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L76) + +### .spec.dbservers.minCount: int + +MinCount specifies a lower limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L74) + +### .spec.dbservers.nodeAffinity: core.NodeAffinity + +NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of code.NodeAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#nodeaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L138) + +### .spec.dbservers.nodeSelector: map[string]string + +NodeSelector speficies a set of selectors for nodes + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L114) + +### .spec.dbservers.numactl.args: []string + +Args define list of the numactl process + +Default Value: [] + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L38) + +### .spec.dbservers.numactl.enabled: bool + +Enabled define if numactl should be enabled + +Default Value: false + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L30) + +### .spec.dbservers.numactl.path: string + +Path define numactl path within the container + +Default Value: /usr/bin/numactl + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L34) + +### .spec.dbservers.overrideDetectedNumberOfCores: bool + +OverrideDetectedNumberOfCores determines if number of cores should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L92) + +### .spec.dbservers.overrideDetectedTotalMemory: bool + +OverrideDetectedTotalMemory determines if memory should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L90) + +### .spec.dbservers.podModes.network: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L31) + +### .spec.dbservers.podModes.pid: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L32) + +### .spec.dbservers.port: uint16 + +Port define Port used by member + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L182) + +### .spec.dbservers.priorityClassName: string + +PriorityClassName specifies a priority class name + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L118) + +### .spec.dbservers.probes.livenessProbeDisabled: bool + +LivenessProbeDisabled if true livenessProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L193) + +### .spec.dbservers.probes.livenessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.dbservers.probes.livenessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.dbservers.probes.livenessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.dbservers.probes.livenessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.dbservers.probes.livenessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.dbservers.probes.ReadinessProbeDisabled: bool + +OldReadinessProbeDisabled if true readinessProbes are disabled +Deprecated: This field is deprecated, keept only for backward compatibility. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L200) + +### .spec.dbservers.probes.readinessProbeDisabled: bool + +ReadinessProbeDisabled override flag for probe disabled in good manner (lowercase) with backward compatibility + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L202) + +### .spec.dbservers.probes.readinessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.dbservers.probes.readinessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.dbservers.probes.readinessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.dbservers.probes.readinessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.dbservers.probes.readinessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.dbservers.probes.startupProbeDisabled: bool + +StartupProbeDisabled if true startupProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L207) + +### .spec.dbservers.probes.startupProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.dbservers.probes.startupProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.dbservers.probes.startupProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.dbservers.probes.startupProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.dbservers.probes.startupProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.dbservers.pvcResizeMode: string + +VolumeResizeMode specified resize mode for pvc + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L124) + +### .spec.dbservers.resources: core.ResourceRequirements + +Resources holds resource requests & limits + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L88) + +### .spec.dbservers.schedulerName: string + +SchedulerName define scheduler name used for group + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L82) + +### .spec.dbservers.securityContext.addCapabilities: []string + +AddCapabilities add new capabilities to containers + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L45) + +### .spec.dbservers.securityContext.allowPrivilegeEscalation: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L47) + +### .spec.dbservers.securityContext.dropAllCapabilities: bool + +DropAllCapabilities specifies if capabilities should be dropped for this pod containers +Deprecated: This field is added for backward compatibility. Will be removed in 1.1.0. + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L43) + +### .spec.dbservers.securityContext.fsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L55) + +### .spec.dbservers.securityContext.privileged: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L48) + +### .spec.dbservers.securityContext.readOnlyRootFilesystem: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L49) + +### .spec.dbservers.securityContext.runAsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L52) + +### .spec.dbservers.securityContext.runAsNonRoot: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L50) + +### .spec.dbservers.securityContext.runAsUser: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L51) + +### .spec.dbservers.securityContext.seccompProfile: core.SeccompProfile + +SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. + +Links: +* [Documentation of core.SeccompProfile](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#seccompprofile-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L71) + +### .spec.dbservers.securityContext.seLinuxOptions: core.SELinuxOptions + +SELinuxOptions are the labels to be applied to the container + +Links: +* [Documentation of core.SELinuxOptions](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#selinuxoptions-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L76) + +### .spec.dbservers.securityContext.supplementalGroups: []int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L54) + +### .spec.dbservers.securityContext.sysctls: map[string]intstr.IntOrString + +Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported +sysctls (by the container runtime) might fail to launch. +Map Value can be String or Int + +Links: +* [Documentation](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/) + +Example: +```yaml +sysctls: + "kernel.shm_rmid_forced": "0" + "net.core.somaxconn": 1024 + "kernel.msgmax": "65536" +``` + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L66) + +### .spec.dbservers.serviceAccountName: string + +ServiceAccountName specifies the name of the service account used for Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L112) + +### .spec.dbservers.shutdownDelay: int + +ShutdownDelay define how long operator should delay finalizer removal after shutdown + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L163) + +### .spec.dbservers.shutdownMethod: string + +ShutdownMethod describe procedure of member shutdown taken by Operator + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L161) + +### .spec.dbservers.sidecarCoreNames: []string + +SidecarCoreNames is a list of sidecar containers which must run in the pod. +Some names (e.g.: "server", "worker") are reserved, and they don't have any impact. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L141) + +### .spec.dbservers.sidecars: []core.Container + +Sidecars specifies a list of additional containers to be started + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L145) + +### .spec.dbservers.storageClassName: string + +StorageClassName specifies the classname for storage of the servers. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L84) + +### .spec.dbservers.terminationGracePeriodSeconds: int64 + +TerminationGracePeriodSeconds override default TerminationGracePeriodSeconds for pods - via silent rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L173) + +### .spec.dbservers.tolerations: []core.Toleration + +Tolerations specifies the tolerations added to Pods in this group. + +Links: +* [Documentation of core.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#toleration-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L96) + +### .spec.dbservers.volumeAllowShrink: bool + +Deprecated: VolumeAllowShrink allows shrink the volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L126) + +### .spec.dbservers.volumeClaimTemplate: core.PersistentVolumeClaim + +VolumeClaimTemplate specifies a template for volume claims + +Links: +* [Documentation of core.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaim-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L122) + +### .spec.dbservers.volumeMounts: []ServerGroupSpecVolumeMount + +VolumeMounts define list of volume mounts mounted into server container + +Links: +* [Documentation of ServerGroupSpecVolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#volumemount-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L153) + +### .spec.dbservers.volumes\[int\].configMap: core.ConfigMapVolumeSource + +ConfigMap which should be mounted into pod + +Links: +* [Documentation of core.ConfigMapVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#configmapvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L138) + +### .spec.dbservers.volumes\[int\].emptyDir: core.EmptyDirVolumeSource + +EmptyDir + +Links: +* [Documentation of core.EmptyDirVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#emptydirvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L143) + +### .spec.dbservers.volumes\[int\].hostPath: core.HostPathVolumeSource + +HostPath + +Links: +* [Documentation of core.HostPathVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#hostpathvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L148) + +### .spec.dbservers.volumes\[int\].name: string + +Name of volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L128) + +### .spec.dbservers.volumes\[int\].persistentVolumeClaim: core.PersistentVolumeClaimVolumeSource + +PersistentVolumeClaim + +Links: +* [Documentation of core.PersistentVolumeClaimVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaimvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L153) + +### .spec.dbservers.volumes\[int\].secret: core.SecretVolumeSource + +Secret which should be mounted into pod + +Links: +* [Documentation of core.SecretVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L133) + +### .spec.disableIPv6: bool + +DisableIPv6 setting prevents the use of IPv6 addresses by ArangoDB servers. +This setting cannot be changed after the deployment has been created. + +Default Value: false + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L109) + +### .spec.downtimeAllowed: bool + +DowntimeAllowed setting is used to allow automatic reconciliation actions that yield some downtime of the ArangoDB deployment. +When this setting is set to false, no automatic action that may result in downtime is allowed. +If the need for such an action is detected, an event is added to the ArangoDeployment. +Once this setting is set to true, the automatic action is executed. +Operations that may result in downtime are: +- Rotating TLS CA certificate +Note: It is still possible that there is some downtime when the Kubernetes cluster is down, or in a bad state, irrespective of the value of this setting. + +Default Value: false + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L104) + +### .spec.environment: string + +Environment setting specifies the type of environment in which the deployment is created. + +Possible Values: +* Development (default) - This value optimizes the deployment for development use. It is possible to run a deployment on a small number of nodes (e.g. minikube). +* Production - This value optimizes the deployment for production use. It puts required affinity constraints on all pods to avoid Agents & DB-Servers from running on the same machine. + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L65) + +### .spec.externalAccess.advertisedEndpoint: string + +AdvertisedEndpoint is passed to the coordinators/single servers for advertising a specific endpoint + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L55) + +### .spec.externalAccess.loadBalancerIP: string + +LoadBalancerIP define optional IP used to configure a load-balancer on, in case of Auto or LoadBalancer type. + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L45) + +### .spec.externalAccess.loadBalancerSourceRanges: []string + +LoadBalancerSourceRanges define LoadBalancerSourceRanges used for LoadBalancer Service type +If specified and supported by the platform, this will restrict traffic through the cloud-provider +load-balancer will be restricted to the specified client IPs. This field will be ignored if the +cloud-provider does not support the feature. + +Links: +* [Cloud Provider Firewall](https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/) + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L52) + +### .spec.externalAccess.managedServiceNames: []string + +ManagedServiceNames keeps names of services which are not managed by KubeArangoDB. +It is only relevant when type of service is `managed`. + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L59) + +### .spec.externalAccess.nodePort: int + +NodePort define optional port used in case of Auto or NodePort type. + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L42) + +### .spec.externalAccess.type: string + +Type specifies the type of Service that will be created to provide access to the ArangoDB deployment from outside the Kubernetes cluster. + +Possible Values: +* Auto (default) - Create a Service of type LoadBalancer and fallback to a Service or type NodePort when the LoadBalancer is not assigned an IP address. +* None - limit access to application running inside the Kubernetes cluster. +* LoadBalancer - Create a Service of type LoadBalancer for the ArangoDB deployment. +* NodePort - Create a Service of type NodePort for the ArangoDB deployment. + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L39) + +### .spec.features.foxx.queues: bool + +[Code Reference](/pkg/apis/deployment/v1/deployment_features.go#L24) + +### .spec.id.affinity: core.PodAffinity + +Affinity specified additional affinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.PodAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_id_group_spec.go#L44) + +### .spec.id.antiAffinity: core.PodAntiAffinity + +AntiAffinity specified additional antiAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.Pod.AntiAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podantiaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_id_group_spec.go#L40) + +### .spec.id.entrypoint: string + +Entrypoint overrides container executable + +[Code Reference](/pkg/apis/deployment/v1/server_id_group_spec.go#L28) + +### .spec.id.nodeAffinity: core.NodeAffinity + +NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of code.NodeAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#nodeaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_id_group_spec.go#L48) + +### .spec.id.nodeSelector: map[string]string + +NodeSelector speficies a set of selectors for nodes + +[Code Reference](/pkg/apis/deployment/v1/server_id_group_spec.go#L34) + +### .spec.id.priorityClassName: string + +PriorityClassName specifies a priority class name + +[Code Reference](/pkg/apis/deployment/v1/server_id_group_spec.go#L36) + +### .spec.id.resources: core.ResourceRequirements + +Resources holds resource requests & limits + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_id_group_spec.go#L56) + +### .spec.id.securityContext.addCapabilities: []string + +AddCapabilities add new capabilities to containers + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L45) + +### .spec.id.securityContext.allowPrivilegeEscalation: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L47) + +### .spec.id.securityContext.dropAllCapabilities: bool + +DropAllCapabilities specifies if capabilities should be dropped for this pod containers +Deprecated: This field is added for backward compatibility. Will be removed in 1.1.0. + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L43) + +### .spec.id.securityContext.fsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L55) + +### .spec.id.securityContext.privileged: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L48) + +### .spec.id.securityContext.readOnlyRootFilesystem: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L49) + +### .spec.id.securityContext.runAsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L52) + +### .spec.id.securityContext.runAsNonRoot: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L50) + +### .spec.id.securityContext.runAsUser: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L51) + +### .spec.id.securityContext.seccompProfile: core.SeccompProfile + +SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. + +Links: +* [Documentation of core.SeccompProfile](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#seccompprofile-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L71) + +### .spec.id.securityContext.seLinuxOptions: core.SELinuxOptions + +SELinuxOptions are the labels to be applied to the container + +Links: +* [Documentation of core.SELinuxOptions](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#selinuxoptions-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L76) + +### .spec.id.securityContext.supplementalGroups: []int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L54) + +### .spec.id.securityContext.sysctls: map[string]intstr.IntOrString + +Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported +sysctls (by the container runtime) might fail to launch. +Map Value can be String or Int + +Links: +* [Documentation](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/) + +Example: +```yaml +sysctls: + "kernel.shm_rmid_forced": "0" + "net.core.somaxconn": 1024 + "kernel.msgmax": "65536" +``` + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L66) + +### .spec.id.serviceAccountName: string + +ServiceAccountName specifies the name of the service account used for Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_id_group_spec.go#L50) + +### .spec.id.tolerations: []core.Toleration + +Tolerations specifies the tolerations added to Pods in this group. + +Links: +* [Documentation of core.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#toleration-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_id_group_spec.go#L32) + +### .spec.image: string + +Image specifies the docker image to use for all ArangoDB servers. +In a development environment this setting defaults to arangodb/arangodb:latest. +For production environments this is a required setting without a default value. +It is highly recommend to use explicit version (not latest) for production environments. + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L78) + +### .spec.imageDiscoveryMode: string + +ImageDiscoveryMode specifies the image discovery mode. + +Possible Values: +* kubelet (default) - Use sha256 of the discovered image in the pods +* direct - Use image provided in the spec.image directly in the pods + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L94) + +### .spec.imagePullPolicy: core.PullPolicy + +ImagePullPolicy specifies the pull policy for the docker image to use for all ArangoDB servers. + +Links: +* [Documentation of core.PullPolicy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) + +Possible Values: +* Always (default) - Means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. +* Never - Means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present +* IfNotPresent - Means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L86) + +### .spec.imagePullSecrets: []string + +ImagePullSecrets specifies the list of image pull secrets for the docker image to use for all ArangoDB servers. + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L89) + +### .spec.labels: map[string]string + +Labels specifies the labels added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L138) + +### .spec.labelsIgnoreList: []string + +LabelsIgnoreList list regexp or plain definitions which labels should be ignored + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L141) + +### .spec.labelsMode: string + +LabelsMode Define labels mode which should be use while overriding labels + +Possible Values: +* disabled (default) - Disable annotations/labels override. Default if there is no annotations/labels set in ArangoDeployment +* append - Add new annotations/labels without affecting old ones +* replace - Replace existing annotations/labels + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L147) + +### .spec.license.secretName: string + +[Code Reference](/pkg/apis/deployment/v1/license_spec.go#L30) + +### .spec.lifecycle.resources: core.ResourceRequirements + +Resources holds resource requests & limits + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/lifecycle_spec.go#L31) + +### .spec.memberPropagationMode: string + +MemberPropagationMode defines how changes to pod spec should be propogated. +Changes to a pod’s configuration require a restart of that pod in almost all cases. +Pods are restarted eagerly by default, which can cause more restarts than desired, especially when updating arangod as well as the operator. +The propagation of the configuration changes can be deferred to the next restart, either triggered manually by the user or by another operation like an upgrade. +This reduces the number of restarts for upgrading both the server and the operator from two to one. + +Possible Values: +* always (default) - Restart the member as soon as a configuration change is discovered +* on-restart - Wait until the next restart to change the member configuration + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L220) + +### .spec.metrics.authentication.jwtTokenSecretName: string + +JWTTokenSecretName contains the name of the JWT kubernetes secret used for authentication + +[Code Reference](/pkg/apis/deployment/v1/deployment_metrics_spec.go#L34) + +### .spec.metrics.enabled: bool + +[Code Reference](/pkg/apis/deployment/v1/deployment_metrics_spec.go#L77) + +### .spec.metrics.image: string + +deprecated + +[Code Reference](/pkg/apis/deployment/v1/deployment_metrics_spec.go#L79) + +### .spec.metrics.mode: string + +deprecated + +[Code Reference](/pkg/apis/deployment/v1/deployment_metrics_spec.go#L86) + +### .spec.metrics.port: uint16 + +[Code Reference](/pkg/apis/deployment/v1/deployment_metrics_spec.go#L91) + +### .spec.metrics.resources: core.ResourceRequirements + +Resources holds resource requests & limits + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/deployment_metrics_spec.go#L84) + +### .spec.metrics.serviceMonitor.enabled: bool + +[Code Reference](/pkg/apis/deployment/v1/deployment_metrics_service_monitor_spec.go#L24) + +### .spec.metrics.serviceMonitor.labels: map[string]string + +[Code Reference](/pkg/apis/deployment/v1/deployment_metrics_service_monitor_spec.go#L25) + +### .spec.metrics.tls: bool + +[Code Reference](/pkg/apis/deployment/v1/deployment_metrics_spec.go#L87) + +### .spec.mode: string + +Mode specifies the type of ArangoDB deployment to create. + +Possible Values: +* Cluster (default) - Full cluster. Defaults to 3 Agents, 3 DB-Servers & 3 Coordinators. +* ActiveFailover - Active-failover single pair. Defaults to 3 Agents and 2 single servers. +* Single - Single server only (note this does not provide high availability or reliability). + +This field is **immutable**: Change of the ArangoDeployment Mode is not possible after creation. + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L60) + +### .spec.networkAttachedVolumes: bool + +NetworkAttachedVolumes +If set to `true`, a ResignLeadership operation will be triggered when a DB-Server pod is evicted (rather than a CleanOutServer operation). +Furthermore, the pod will simply be redeployed on a different node, rather than cleaned and retired and replaced by a new member. +You must only set this option to true if your persistent volumes are “movable” in the sense that they can be mounted from a different k8s node, like in the case of network attached volumes. +If your persistent volumes are tied to a specific pod, you must leave this option on false. + +Default Value: true + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L123) + +### .spec.rebalancer.enabled: bool + +[Code Reference](/pkg/apis/deployment/v1/rebalancer_spec.go#L26) + +### .spec.rebalancer.optimizers.leader: bool + +[Code Reference](/pkg/apis/deployment/v1/rebalancer_spec.go#L74) + +### .spec.rebalancer.parallelMoves: int + +[Code Reference](/pkg/apis/deployment/v1/rebalancer_spec.go#L28) + +### .spec.rebalancer.readers.count: bool + +deprecated does not work in Rebalancer V2 +Count Enable Shard Count machanism + +[Code Reference](/pkg/apis/deployment/v1/rebalancer_spec.go#L62) + +### .spec.recovery.autoRecover: bool + +[Code Reference](/pkg/apis/deployment/v1/recovery_spec.go#L26) + +### .spec.restoreEncryptionSecret: string + +RestoreEncryptionSecret specifies optional name of secret which contains encryption key used for restore + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L160) + +### .spec.restoreFrom: string + +RestoreFrom setting specifies a `ArangoBackup` resource name the cluster should be restored from. +After a restore or failure to do so, the status of the deployment contains information about the restore operation in the restore key. +It will contain some of the following fields: +- `requestedFrom`: name of the ArangoBackup used to restore from. +- `message`: optional message explaining why the restore failed. +- `state`: state indicating if the restore was successful or not. Possible values: Restoring, Restored, RestoreFailed +If the restoreFrom key is removed from the spec, the restore key is deleted as well. +A new restore attempt is made if and only if either in the status restore is not set or if spec.restoreFrom and status.requestedFrom are different. + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L157) + +### .spec.rocksdb.encryption.keySecretName: string + +[Code Reference](/pkg/apis/deployment/v1/rocksdb_spec.go#L31) + +### .spec.single.affinity: core.PodAffinity + +Affinity specified additional affinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.PodAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L134) + +### .spec.single.allowMemberRecreation: bool + +AllowMemberRecreation allows to recreate member. Value is used only for Coordinator and DBServer with default to True, for all other groups set to false. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L171) + +### .spec.single.annotations: map[string]string + +Annotations specified the annotations added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L98) + +### .spec.single.annotationsIgnoreList: []string + +AnnotationsIgnoreList list regexp or plain definitions which annotations should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L100) + +### .spec.single.annotationsMode: string + +AnnotationsMode Define annotations mode which should be use while overriding annotations + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L102) + +### .spec.single.antiAffinity: core.PodAntiAffinity + +AntiAffinity specified additional antiAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.Pod.AntiAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podantiaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L130) + +### .spec.single.args: []string + +Args holds additional commandline arguments + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L78) + +### .spec.single.count: int + +Count holds the requested number of servers + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L72) + +### .spec.single.entrypoint: string + +Entrypoint overrides container executable + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L80) + +### .spec.single.envs\[int\].name: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L26) + +### .spec.single.envs\[int\].value: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L27) + +### .spec.single.ephemeralVolumes.apps.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.single.ephemeralVolumes.temp.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.single.exporterPort: uint16 + +ExporterPort define Port used by exporter + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L184) + +### .spec.single.extendedRotationCheck: bool + +ExtendedRotationCheck extend checks for rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L157) + +### .spec.single.externalPortEnabled: bool + +ExternalPortEnabled if external port should be enabled. If is set to false, ports needs to be exposed via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L169) + +### .spec.single.indexMethod: string + +IndexMethod define group Indexing method + +Possible Values: +* random (default) - Pick random ID for member. Enforced on the Community Operator. +* ordered - Use sequential number as Member ID, starting from 0. Enterprise Operator required. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L177) + +### .spec.single.initContainers.containers: []core.Container + +Containers contains list of containers + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L91) + +### .spec.single.initContainers.mode: string + +Mode keep container replace mode + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L94) + +### .spec.single.internalPort: int + +InternalPort define port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L165) + +### .spec.single.internalPortProtocol: string + +InternalPortProtocol define protocol of port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L167) + +### .spec.single.labels: map[string]string + +Labels specified the labels added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L104) + +### .spec.single.labelsIgnoreList: []string + +LabelsIgnoreList list regexp or plain definitions which labels should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L106) + +### .spec.single.labelsMode: string + +LabelsMode Define labels mode which should be use while overriding labels + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L108) + +### .spec.single.maxCount: int + +MaxCount specifies a upper limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L76) + +### .spec.single.minCount: int + +MinCount specifies a lower limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L74) + +### .spec.single.nodeAffinity: core.NodeAffinity + +NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of code.NodeAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#nodeaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L138) + +### .spec.single.nodeSelector: map[string]string + +NodeSelector speficies a set of selectors for nodes + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L114) + +### .spec.single.numactl.args: []string + +Args define list of the numactl process + +Default Value: [] + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L38) + +### .spec.single.numactl.enabled: bool + +Enabled define if numactl should be enabled + +Default Value: false + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L30) + +### .spec.single.numactl.path: string + +Path define numactl path within the container + +Default Value: /usr/bin/numactl + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L34) + +### .spec.single.overrideDetectedNumberOfCores: bool + +OverrideDetectedNumberOfCores determines if number of cores should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L92) + +### .spec.single.overrideDetectedTotalMemory: bool + +OverrideDetectedTotalMemory determines if memory should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L90) + +### .spec.single.podModes.network: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L31) + +### .spec.single.podModes.pid: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L32) + +### .spec.single.port: uint16 + +Port define Port used by member + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L182) + +### .spec.single.priorityClassName: string + +PriorityClassName specifies a priority class name + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L118) + +### .spec.single.probes.livenessProbeDisabled: bool + +LivenessProbeDisabled if true livenessProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L193) + +### .spec.single.probes.livenessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.single.probes.livenessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.single.probes.livenessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.single.probes.livenessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.single.probes.livenessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.single.probes.ReadinessProbeDisabled: bool + +OldReadinessProbeDisabled if true readinessProbes are disabled +Deprecated: This field is deprecated, keept only for backward compatibility. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L200) + +### .spec.single.probes.readinessProbeDisabled: bool + +ReadinessProbeDisabled override flag for probe disabled in good manner (lowercase) with backward compatibility + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L202) + +### .spec.single.probes.readinessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.single.probes.readinessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.single.probes.readinessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.single.probes.readinessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.single.probes.readinessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.single.probes.startupProbeDisabled: bool + +StartupProbeDisabled if true startupProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L207) + +### .spec.single.probes.startupProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.single.probes.startupProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.single.probes.startupProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.single.probes.startupProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.single.probes.startupProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.single.pvcResizeMode: string + +VolumeResizeMode specified resize mode for pvc + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L124) + +### .spec.single.resources: core.ResourceRequirements + +Resources holds resource requests & limits + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L88) + +### .spec.single.schedulerName: string + +SchedulerName define scheduler name used for group + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L82) + +### .spec.single.securityContext.addCapabilities: []string + +AddCapabilities add new capabilities to containers + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L45) + +### .spec.single.securityContext.allowPrivilegeEscalation: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L47) + +### .spec.single.securityContext.dropAllCapabilities: bool + +DropAllCapabilities specifies if capabilities should be dropped for this pod containers +Deprecated: This field is added for backward compatibility. Will be removed in 1.1.0. + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L43) + +### .spec.single.securityContext.fsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L55) + +### .spec.single.securityContext.privileged: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L48) + +### .spec.single.securityContext.readOnlyRootFilesystem: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L49) + +### .spec.single.securityContext.runAsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L52) + +### .spec.single.securityContext.runAsNonRoot: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L50) + +### .spec.single.securityContext.runAsUser: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L51) + +### .spec.single.securityContext.seccompProfile: core.SeccompProfile + +SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. + +Links: +* [Documentation of core.SeccompProfile](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#seccompprofile-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L71) + +### .spec.single.securityContext.seLinuxOptions: core.SELinuxOptions + +SELinuxOptions are the labels to be applied to the container + +Links: +* [Documentation of core.SELinuxOptions](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#selinuxoptions-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L76) + +### .spec.single.securityContext.supplementalGroups: []int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L54) + +### .spec.single.securityContext.sysctls: map[string]intstr.IntOrString + +Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported +sysctls (by the container runtime) might fail to launch. +Map Value can be String or Int + +Links: +* [Documentation](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/) + +Example: +```yaml +sysctls: + "kernel.shm_rmid_forced": "0" + "net.core.somaxconn": 1024 + "kernel.msgmax": "65536" +``` + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L66) + +### .spec.single.serviceAccountName: string + +ServiceAccountName specifies the name of the service account used for Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L112) + +### .spec.single.shutdownDelay: int + +ShutdownDelay define how long operator should delay finalizer removal after shutdown + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L163) + +### .spec.single.shutdownMethod: string + +ShutdownMethod describe procedure of member shutdown taken by Operator + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L161) + +### .spec.single.sidecarCoreNames: []string + +SidecarCoreNames is a list of sidecar containers which must run in the pod. +Some names (e.g.: "server", "worker") are reserved, and they don't have any impact. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L141) + +### .spec.single.sidecars: []core.Container + +Sidecars specifies a list of additional containers to be started + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L145) + +### .spec.single.storageClassName: string + +StorageClassName specifies the classname for storage of the servers. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L84) + +### .spec.single.terminationGracePeriodSeconds: int64 + +TerminationGracePeriodSeconds override default TerminationGracePeriodSeconds for pods - via silent rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L173) + +### .spec.single.tolerations: []core.Toleration + +Tolerations specifies the tolerations added to Pods in this group. + +Links: +* [Documentation of core.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#toleration-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L96) + +### .spec.single.volumeAllowShrink: bool + +Deprecated: VolumeAllowShrink allows shrink the volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L126) + +### .spec.single.volumeClaimTemplate: core.PersistentVolumeClaim + +VolumeClaimTemplate specifies a template for volume claims + +Links: +* [Documentation of core.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaim-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L122) + +### .spec.single.volumeMounts: []ServerGroupSpecVolumeMount + +VolumeMounts define list of volume mounts mounted into server container + +Links: +* [Documentation of ServerGroupSpecVolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#volumemount-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L153) + +### .spec.single.volumes\[int\].configMap: core.ConfigMapVolumeSource + +ConfigMap which should be mounted into pod + +Links: +* [Documentation of core.ConfigMapVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#configmapvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L138) + +### .spec.single.volumes\[int\].emptyDir: core.EmptyDirVolumeSource + +EmptyDir + +Links: +* [Documentation of core.EmptyDirVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#emptydirvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L143) + +### .spec.single.volumes\[int\].hostPath: core.HostPathVolumeSource + +HostPath + +Links: +* [Documentation of core.HostPathVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#hostpathvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L148) + +### .spec.single.volumes\[int\].name: string + +Name of volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L128) + +### .spec.single.volumes\[int\].persistentVolumeClaim: core.PersistentVolumeClaimVolumeSource + +PersistentVolumeClaim + +Links: +* [Documentation of core.PersistentVolumeClaimVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaimvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L153) + +### .spec.single.volumes\[int\].secret: core.SecretVolumeSource + +Secret which should be mounted into pod + +Links: +* [Documentation of core.SecretVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L133) + +### .spec.storageEngine: string + +StorageEngine specifies the type of storage engine used for all servers in the cluster. +This setting cannot be changed after the cluster has been created. + +Possible Values: +* RocksDB (default) - To use the RocksDB storage engine. +* MMFiles - To use the MMFiles storage engine. Deprecated. + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L72) + +### .spec.sync.auth.clientCASecretName: string + +[Code Reference](/pkg/apis/deployment/v1/sync_authentication_spec.go#L32) + +### .spec.sync.auth.jwtSecretName: string + +[Code Reference](/pkg/apis/deployment/v1/sync_authentication_spec.go#L31) + +### .spec.sync.enabled: bool + +[Code Reference](/pkg/apis/deployment/v1/sync_spec.go#L30) + +### .spec.sync.externalAccess.accessPackageSecretNames: []string + +[Code Reference](/pkg/apis/deployment/v1/sync_external_access_spec.go#L36) + +### .spec.sync.externalAccess.advertisedEndpoint: string + +AdvertisedEndpoint is passed to the coordinators/single servers for advertising a specific endpoint + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L55) + +### .spec.sync.externalAccess.loadBalancerIP: string + +LoadBalancerIP define optional IP used to configure a load-balancer on, in case of Auto or LoadBalancer type. + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L45) + +### .spec.sync.externalAccess.loadBalancerSourceRanges: []string + +LoadBalancerSourceRanges define LoadBalancerSourceRanges used for LoadBalancer Service type +If specified and supported by the platform, this will restrict traffic through the cloud-provider +load-balancer will be restricted to the specified client IPs. This field will be ignored if the +cloud-provider does not support the feature. + +Links: +* [Cloud Provider Firewall](https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/) + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L52) + +### .spec.sync.externalAccess.managedServiceNames: []string + +ManagedServiceNames keeps names of services which are not managed by KubeArangoDB. +It is only relevant when type of service is `managed`. + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L59) + +### .spec.sync.externalAccess.masterEndpoint: []string + +[Code Reference](/pkg/apis/deployment/v1/sync_external_access_spec.go#L35) + +### .spec.sync.externalAccess.nodePort: int + +NodePort define optional port used in case of Auto or NodePort type. + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L42) + +### .spec.sync.externalAccess.type: string + +Type specifies the type of Service that will be created to provide access to the ArangoDB deployment from outside the Kubernetes cluster. + +Possible Values: +* Auto (default) - Create a Service of type LoadBalancer and fallback to a Service or type NodePort when the LoadBalancer is not assigned an IP address. +* None - limit access to application running inside the Kubernetes cluster. +* LoadBalancer - Create a Service of type LoadBalancer for the ArangoDB deployment. +* NodePort - Create a Service of type NodePort for the ArangoDB deployment. + +[Code Reference](/pkg/apis/deployment/v1/external_access_spec.go#L39) + +### .spec.sync.image: string + +[Code Reference](/pkg/apis/deployment/v1/sync_spec.go#L36) + +### .spec.sync.monitoring.tokenSecretName: string + +[Code Reference](/pkg/apis/deployment/v1/sync_monitoring_spec.go#L31) + +### .spec.sync.tls.altNames: []string + +[Code Reference](/pkg/apis/deployment/v1/tls_spec.go#L58) + +### .spec.sync.tls.caSecretName: string + +[Code Reference](/pkg/apis/deployment/v1/tls_spec.go#L57) + +### .spec.sync.tls.mode: string + +[Code Reference](/pkg/apis/deployment/v1/tls_spec.go#L61) + +### .spec.sync.tls.sni.mapping.\: []string + +[Code Reference](/pkg/apis/deployment/v1/tls_sni_spec.go#L30) + +### .spec.sync.tls.ttl: string + +[Code Reference](/pkg/apis/deployment/v1/tls_spec.go#L59) + +### .spec.syncmasters.affinity: core.PodAffinity + +Affinity specified additional affinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.PodAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L134) + +### .spec.syncmasters.allowMemberRecreation: bool + +AllowMemberRecreation allows to recreate member. Value is used only for Coordinator and DBServer with default to True, for all other groups set to false. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L171) + +### .spec.syncmasters.annotations: map[string]string + +Annotations specified the annotations added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L98) + +### .spec.syncmasters.annotationsIgnoreList: []string + +AnnotationsIgnoreList list regexp or plain definitions which annotations should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L100) + +### .spec.syncmasters.annotationsMode: string + +AnnotationsMode Define annotations mode which should be use while overriding annotations + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L102) + +### .spec.syncmasters.antiAffinity: core.PodAntiAffinity + +AntiAffinity specified additional antiAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.Pod.AntiAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podantiaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L130) + +### .spec.syncmasters.args: []string + +Args holds additional commandline arguments + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L78) + +### .spec.syncmasters.count: int + +Count holds the requested number of servers + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L72) + +### .spec.syncmasters.entrypoint: string + +Entrypoint overrides container executable + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L80) + +### .spec.syncmasters.envs\[int\].name: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L26) + +### .spec.syncmasters.envs\[int\].value: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L27) + +### .spec.syncmasters.ephemeralVolumes.apps.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.syncmasters.ephemeralVolumes.temp.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.syncmasters.exporterPort: uint16 + +ExporterPort define Port used by exporter + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L184) + +### .spec.syncmasters.extendedRotationCheck: bool + +ExtendedRotationCheck extend checks for rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L157) + +### .spec.syncmasters.externalPortEnabled: bool + +ExternalPortEnabled if external port should be enabled. If is set to false, ports needs to be exposed via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L169) + +### .spec.syncmasters.indexMethod: string + +IndexMethod define group Indexing method + +Possible Values: +* random (default) - Pick random ID for member. Enforced on the Community Operator. +* ordered - Use sequential number as Member ID, starting from 0. Enterprise Operator required. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L177) + +### .spec.syncmasters.initContainers.containers: []core.Container + +Containers contains list of containers + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L91) + +### .spec.syncmasters.initContainers.mode: string + +Mode keep container replace mode + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L94) + +### .spec.syncmasters.internalPort: int + +InternalPort define port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L165) + +### .spec.syncmasters.internalPortProtocol: string + +InternalPortProtocol define protocol of port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L167) + +### .spec.syncmasters.labels: map[string]string + +Labels specified the labels added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L104) + +### .spec.syncmasters.labelsIgnoreList: []string + +LabelsIgnoreList list regexp or plain definitions which labels should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L106) + +### .spec.syncmasters.labelsMode: string + +LabelsMode Define labels mode which should be use while overriding labels + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L108) + +### .spec.syncmasters.maxCount: int + +MaxCount specifies a upper limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L76) + +### .spec.syncmasters.minCount: int + +MinCount specifies a lower limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L74) + +### .spec.syncmasters.nodeAffinity: core.NodeAffinity + +NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of code.NodeAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#nodeaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L138) + +### .spec.syncmasters.nodeSelector: map[string]string + +NodeSelector speficies a set of selectors for nodes + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L114) + +### .spec.syncmasters.numactl.args: []string + +Args define list of the numactl process + +Default Value: [] + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L38) + +### .spec.syncmasters.numactl.enabled: bool + +Enabled define if numactl should be enabled + +Default Value: false + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L30) + +### .spec.syncmasters.numactl.path: string + +Path define numactl path within the container + +Default Value: /usr/bin/numactl + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L34) + +### .spec.syncmasters.overrideDetectedNumberOfCores: bool + +OverrideDetectedNumberOfCores determines if number of cores should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L92) + +### .spec.syncmasters.overrideDetectedTotalMemory: bool + +OverrideDetectedTotalMemory determines if memory should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L90) + +### .spec.syncmasters.podModes.network: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L31) + +### .spec.syncmasters.podModes.pid: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L32) + +### .spec.syncmasters.port: uint16 + +Port define Port used by member + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L182) + +### .spec.syncmasters.priorityClassName: string + +PriorityClassName specifies a priority class name + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L118) + +### .spec.syncmasters.probes.livenessProbeDisabled: bool + +LivenessProbeDisabled if true livenessProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L193) + +### .spec.syncmasters.probes.livenessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.syncmasters.probes.livenessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.syncmasters.probes.livenessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.syncmasters.probes.livenessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.syncmasters.probes.livenessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.syncmasters.probes.ReadinessProbeDisabled: bool + +OldReadinessProbeDisabled if true readinessProbes are disabled +Deprecated: This field is deprecated, keept only for backward compatibility. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L200) + +### .spec.syncmasters.probes.readinessProbeDisabled: bool + +ReadinessProbeDisabled override flag for probe disabled in good manner (lowercase) with backward compatibility + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L202) + +### .spec.syncmasters.probes.readinessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.syncmasters.probes.readinessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.syncmasters.probes.readinessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.syncmasters.probes.readinessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.syncmasters.probes.readinessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.syncmasters.probes.startupProbeDisabled: bool + +StartupProbeDisabled if true startupProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L207) + +### .spec.syncmasters.probes.startupProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.syncmasters.probes.startupProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.syncmasters.probes.startupProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.syncmasters.probes.startupProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.syncmasters.probes.startupProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.syncmasters.pvcResizeMode: string + +VolumeResizeMode specified resize mode for pvc + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L124) + +### .spec.syncmasters.resources: core.ResourceRequirements + +Resources holds resource requests & limits + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L88) + +### .spec.syncmasters.schedulerName: string + +SchedulerName define scheduler name used for group + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L82) + +### .spec.syncmasters.securityContext.addCapabilities: []string + +AddCapabilities add new capabilities to containers + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L45) + +### .spec.syncmasters.securityContext.allowPrivilegeEscalation: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L47) + +### .spec.syncmasters.securityContext.dropAllCapabilities: bool + +DropAllCapabilities specifies if capabilities should be dropped for this pod containers +Deprecated: This field is added for backward compatibility. Will be removed in 1.1.0. + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L43) + +### .spec.syncmasters.securityContext.fsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L55) + +### .spec.syncmasters.securityContext.privileged: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L48) + +### .spec.syncmasters.securityContext.readOnlyRootFilesystem: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L49) + +### .spec.syncmasters.securityContext.runAsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L52) + +### .spec.syncmasters.securityContext.runAsNonRoot: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L50) + +### .spec.syncmasters.securityContext.runAsUser: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L51) + +### .spec.syncmasters.securityContext.seccompProfile: core.SeccompProfile + +SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. + +Links: +* [Documentation of core.SeccompProfile](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#seccompprofile-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L71) + +### .spec.syncmasters.securityContext.seLinuxOptions: core.SELinuxOptions + +SELinuxOptions are the labels to be applied to the container + +Links: +* [Documentation of core.SELinuxOptions](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#selinuxoptions-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L76) + +### .spec.syncmasters.securityContext.supplementalGroups: []int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L54) + +### .spec.syncmasters.securityContext.sysctls: map[string]intstr.IntOrString + +Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported +sysctls (by the container runtime) might fail to launch. +Map Value can be String or Int + +Links: +* [Documentation](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/) + +Example: +```yaml +sysctls: + "kernel.shm_rmid_forced": "0" + "net.core.somaxconn": 1024 + "kernel.msgmax": "65536" +``` + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L66) + +### .spec.syncmasters.serviceAccountName: string + +ServiceAccountName specifies the name of the service account used for Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L112) + +### .spec.syncmasters.shutdownDelay: int + +ShutdownDelay define how long operator should delay finalizer removal after shutdown + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L163) + +### .spec.syncmasters.shutdownMethod: string + +ShutdownMethod describe procedure of member shutdown taken by Operator + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L161) + +### .spec.syncmasters.sidecarCoreNames: []string + +SidecarCoreNames is a list of sidecar containers which must run in the pod. +Some names (e.g.: "server", "worker") are reserved, and they don't have any impact. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L141) + +### .spec.syncmasters.sidecars: []core.Container + +Sidecars specifies a list of additional containers to be started + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L145) + +### .spec.syncmasters.storageClassName: string + +StorageClassName specifies the classname for storage of the servers. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L84) + +### .spec.syncmasters.terminationGracePeriodSeconds: int64 + +TerminationGracePeriodSeconds override default TerminationGracePeriodSeconds for pods - via silent rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L173) + +### .spec.syncmasters.tolerations: []core.Toleration + +Tolerations specifies the tolerations added to Pods in this group. + +Links: +* [Documentation of core.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#toleration-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L96) + +### .spec.syncmasters.volumeAllowShrink: bool + +Deprecated: VolumeAllowShrink allows shrink the volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L126) + +### .spec.syncmasters.volumeClaimTemplate: core.PersistentVolumeClaim + +VolumeClaimTemplate specifies a template for volume claims + +Links: +* [Documentation of core.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaim-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L122) + +### .spec.syncmasters.volumeMounts: []ServerGroupSpecVolumeMount + +VolumeMounts define list of volume mounts mounted into server container + +Links: +* [Documentation of ServerGroupSpecVolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#volumemount-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L153) + +### .spec.syncmasters.volumes\[int\].configMap: core.ConfigMapVolumeSource + +ConfigMap which should be mounted into pod + +Links: +* [Documentation of core.ConfigMapVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#configmapvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L138) + +### .spec.syncmasters.volumes\[int\].emptyDir: core.EmptyDirVolumeSource + +EmptyDir + +Links: +* [Documentation of core.EmptyDirVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#emptydirvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L143) + +### .spec.syncmasters.volumes\[int\].hostPath: core.HostPathVolumeSource + +HostPath + +Links: +* [Documentation of core.HostPathVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#hostpathvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L148) + +### .spec.syncmasters.volumes\[int\].name: string + +Name of volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L128) + +### .spec.syncmasters.volumes\[int\].persistentVolumeClaim: core.PersistentVolumeClaimVolumeSource + +PersistentVolumeClaim + +Links: +* [Documentation of core.PersistentVolumeClaimVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaimvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L153) + +### .spec.syncmasters.volumes\[int\].secret: core.SecretVolumeSource + +Secret which should be mounted into pod + +Links: +* [Documentation of core.SecretVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L133) + +### .spec.syncworkers.affinity: core.PodAffinity + +Affinity specified additional affinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.PodAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L134) + +### .spec.syncworkers.allowMemberRecreation: bool + +AllowMemberRecreation allows to recreate member. Value is used only for Coordinator and DBServer with default to True, for all other groups set to false. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L171) + +### .spec.syncworkers.annotations: map[string]string + +Annotations specified the annotations added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L98) + +### .spec.syncworkers.annotationsIgnoreList: []string + +AnnotationsIgnoreList list regexp or plain definitions which annotations should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L100) + +### .spec.syncworkers.annotationsMode: string + +AnnotationsMode Define annotations mode which should be use while overriding annotations + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L102) + +### .spec.syncworkers.antiAffinity: core.PodAntiAffinity + +AntiAffinity specified additional antiAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of core.Pod.AntiAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podantiaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L130) + +### .spec.syncworkers.args: []string + +Args holds additional commandline arguments + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L78) + +### .spec.syncworkers.count: int + +Count holds the requested number of servers + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L72) + +### .spec.syncworkers.entrypoint: string + +Entrypoint overrides container executable + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L80) + +### .spec.syncworkers.envs\[int\].name: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L26) + +### .spec.syncworkers.envs\[int\].value: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_env_var.go#L27) + +### .spec.syncworkers.ephemeralVolumes.apps.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.syncworkers.ephemeralVolumes.temp.size: resource.Quantity + +Size define size of the ephemeral volume + +Links: +* [Documentation of resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#quantity-resource-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_ephemeral_volumes.go#L64) + +### .spec.syncworkers.exporterPort: uint16 + +ExporterPort define Port used by exporter + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L184) + +### .spec.syncworkers.extendedRotationCheck: bool + +ExtendedRotationCheck extend checks for rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L157) + +### .spec.syncworkers.externalPortEnabled: bool + +ExternalPortEnabled if external port should be enabled. If is set to false, ports needs to be exposed via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L169) + +### .spec.syncworkers.indexMethod: string + +IndexMethod define group Indexing method + +Possible Values: +* random (default) - Pick random ID for member. Enforced on the Community Operator. +* ordered - Use sequential number as Member ID, starting from 0. Enterprise Operator required. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L177) + +### .spec.syncworkers.initContainers.containers: []core.Container + +Containers contains list of containers + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L91) + +### .spec.syncworkers.initContainers.mode: string + +Mode keep container replace mode + +[Code Reference](/pkg/apis/deployment/v1/server_group_init_containers.go#L94) + +### .spec.syncworkers.internalPort: int + +InternalPort define port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L165) + +### .spec.syncworkers.internalPortProtocol: string + +InternalPortProtocol define protocol of port used in internal communication, can be accessed over localhost via sidecar. Only for ArangoD members + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L167) + +### .spec.syncworkers.labels: map[string]string + +Labels specified the labels added to Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L104) + +### .spec.syncworkers.labelsIgnoreList: []string + +LabelsIgnoreList list regexp or plain definitions which labels should be ignored + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L106) + +### .spec.syncworkers.labelsMode: string + +LabelsMode Define labels mode which should be use while overriding labels + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L108) + +### .spec.syncworkers.maxCount: int + +MaxCount specifies a upper limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L76) + +### .spec.syncworkers.minCount: int + +MinCount specifies a lower limit for count + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L74) + +### .spec.syncworkers.nodeAffinity: core.NodeAffinity + +NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions + +Links: +* [Documentation of code.NodeAffinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#nodeaffinity-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L138) + +### .spec.syncworkers.nodeSelector: map[string]string + +NodeSelector speficies a set of selectors for nodes + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L114) + +### .spec.syncworkers.numactl.args: []string + +Args define list of the numactl process + +Default Value: [] + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L38) + +### .spec.syncworkers.numactl.enabled: bool + +Enabled define if numactl should be enabled + +Default Value: false + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L30) + +### .spec.syncworkers.numactl.path: string + +Path define numactl path within the container + +Default Value: /usr/bin/numactl + +[Code Reference](/pkg/apis/deployment/v1/server_group_numactl_spec.go#L34) + +### .spec.syncworkers.overrideDetectedNumberOfCores: bool + +OverrideDetectedNumberOfCores determines if number of cores should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L92) + +### .spec.syncworkers.overrideDetectedTotalMemory: bool + +OverrideDetectedTotalMemory determines if memory should be overrided based on values in resources. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L90) + +### .spec.syncworkers.podModes.network: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L31) + +### .spec.syncworkers.podModes.pid: string + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec_pod_modes.go#L32) + +### .spec.syncworkers.port: uint16 + +Port define Port used by member + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L182) + +### .spec.syncworkers.priorityClassName: string + +PriorityClassName specifies a priority class name + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L118) + +### .spec.syncworkers.probes.livenessProbeDisabled: bool + +LivenessProbeDisabled if true livenessProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L193) + +### .spec.syncworkers.probes.livenessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.syncworkers.probes.livenessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.syncworkers.probes.livenessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.syncworkers.probes.livenessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.syncworkers.probes.livenessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.syncworkers.probes.ReadinessProbeDisabled: bool + +OldReadinessProbeDisabled if true readinessProbes are disabled +Deprecated: This field is deprecated, keept only for backward compatibility. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L200) + +### .spec.syncworkers.probes.readinessProbeDisabled: bool + +ReadinessProbeDisabled override flag for probe disabled in good manner (lowercase) with backward compatibility + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L202) + +### .spec.syncworkers.probes.readinessProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.syncworkers.probes.readinessProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.syncworkers.probes.readinessProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.syncworkers.probes.readinessProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.syncworkers.probes.readinessProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.syncworkers.probes.startupProbeDisabled: bool + +StartupProbeDisabled if true startupProbes are disabled + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L207) + +### .spec.syncworkers.probes.startupProbeSpec.failureThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L227) + +### .spec.syncworkers.probes.startupProbeSpec.initialDelaySeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L223) + +### .spec.syncworkers.probes.startupProbeSpec.periodSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L224) + +### .spec.syncworkers.probes.startupProbeSpec.successThreshold: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L226) + +### .spec.syncworkers.probes.startupProbeSpec.timeoutSeconds: int32 + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L225) + +### .spec.syncworkers.pvcResizeMode: string + +VolumeResizeMode specified resize mode for pvc + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L124) + +### .spec.syncworkers.resources: core.ResourceRequirements + +Resources holds resource requests & limits + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L88) + +### .spec.syncworkers.schedulerName: string + +SchedulerName define scheduler name used for group + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L82) + +### .spec.syncworkers.securityContext.addCapabilities: []string + +AddCapabilities add new capabilities to containers + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L45) + +### .spec.syncworkers.securityContext.allowPrivilegeEscalation: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L47) + +### .spec.syncworkers.securityContext.dropAllCapabilities: bool + +DropAllCapabilities specifies if capabilities should be dropped for this pod containers +Deprecated: This field is added for backward compatibility. Will be removed in 1.1.0. + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L43) + +### .spec.syncworkers.securityContext.fsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L55) + +### .spec.syncworkers.securityContext.privileged: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L48) + +### .spec.syncworkers.securityContext.readOnlyRootFilesystem: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L49) + +### .spec.syncworkers.securityContext.runAsGroup: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L52) + +### .spec.syncworkers.securityContext.runAsNonRoot: bool + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L50) + +### .spec.syncworkers.securityContext.runAsUser: int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L51) + +### .spec.syncworkers.securityContext.seccompProfile: core.SeccompProfile + +SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set. + +Links: +* [Documentation of core.SeccompProfile](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#seccompprofile-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L71) + +### .spec.syncworkers.securityContext.seLinuxOptions: core.SELinuxOptions + +SELinuxOptions are the labels to be applied to the container + +Links: +* [Documentation of core.SELinuxOptions](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#selinuxoptions-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L76) + +### .spec.syncworkers.securityContext.supplementalGroups: []int64 + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L54) + +### .spec.syncworkers.securityContext.sysctls: map[string]intstr.IntOrString + +Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported +sysctls (by the container runtime) might fail to launch. +Map Value can be String or Int + +Links: +* [Documentation](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/) + +Example: +```yaml +sysctls: + "kernel.shm_rmid_forced": "0" + "net.core.somaxconn": 1024 + "kernel.msgmax": "65536" +``` + +[Code Reference](/pkg/apis/deployment/v1/server_group_security_context_spec.go#L66) + +### .spec.syncworkers.serviceAccountName: string + +ServiceAccountName specifies the name of the service account used for Pods in this group. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L112) + +### .spec.syncworkers.shutdownDelay: int + +ShutdownDelay define how long operator should delay finalizer removal after shutdown + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L163) + +### .spec.syncworkers.shutdownMethod: string + +ShutdownMethod describe procedure of member shutdown taken by Operator + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L161) + +### .spec.syncworkers.sidecarCoreNames: []string + +SidecarCoreNames is a list of sidecar containers which must run in the pod. +Some names (e.g.: "server", "worker") are reserved, and they don't have any impact. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L141) + +### .spec.syncworkers.sidecars: []core.Container + +Sidecars specifies a list of additional containers to be started + +Links: +* [Documentation of core.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#container-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L145) + +### .spec.syncworkers.storageClassName: string + +StorageClassName specifies the classname for storage of the servers. + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L84) + +### .spec.syncworkers.terminationGracePeriodSeconds: int64 + +TerminationGracePeriodSeconds override default TerminationGracePeriodSeconds for pods - via silent rotation + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L173) + +### .spec.syncworkers.tolerations: []core.Toleration + +Tolerations specifies the tolerations added to Pods in this group. + +Links: +* [Documentation of core.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#toleration-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L96) + +### .spec.syncworkers.volumeAllowShrink: bool + +Deprecated: VolumeAllowShrink allows shrink the volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L126) + +### .spec.syncworkers.volumeClaimTemplate: core.PersistentVolumeClaim + +VolumeClaimTemplate specifies a template for volume claims + +Links: +* [Documentation of core.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaim-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L122) + +### .spec.syncworkers.volumeMounts: []ServerGroupSpecVolumeMount + +VolumeMounts define list of volume mounts mounted into server container + +Links: +* [Documentation of ServerGroupSpecVolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#volumemount-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_spec.go#L153) + +### .spec.syncworkers.volumes\[int\].configMap: core.ConfigMapVolumeSource + +ConfigMap which should be mounted into pod + +Links: +* [Documentation of core.ConfigMapVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#configmapvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L138) + +### .spec.syncworkers.volumes\[int\].emptyDir: core.EmptyDirVolumeSource + +EmptyDir + +Links: +* [Documentation of core.EmptyDirVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#emptydirvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L143) + +### .spec.syncworkers.volumes\[int\].hostPath: core.HostPathVolumeSource + +HostPath + +Links: +* [Documentation of core.HostPathVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#hostpathvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L148) + +### .spec.syncworkers.volumes\[int\].name: string + +Name of volume + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L128) + +### .spec.syncworkers.volumes\[int\].persistentVolumeClaim: core.PersistentVolumeClaimVolumeSource + +PersistentVolumeClaim + +Links: +* [Documentation of core.PersistentVolumeClaimVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaimvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L153) + +### .spec.syncworkers.volumes\[int\].secret: core.SecretVolumeSource + +Secret which should be mounted into pod + +Links: +* [Documentation of core.SecretVolumeSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#secretvolumesource-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/server_group_volume.go#L133) + +### .spec.timeouts.actions: map[string]meta.Duration + +Actions keep map of the actions timeouts. + +Links: +* [List of supported action names](/docs/generated/actions.md) +* [Definition of meta.Duration](https://github.com/kubernetes/apimachinery/blob/v0.26.6/pkg/apis/meta/v1/duration.go) + +Example: +```yaml +actions: + AddMember: 30m +``` + +[Code Reference](/pkg/apis/deployment/v1/timeouts.go#L44) + +### .spec.timeouts.maintenanceGracePeriod: int64 + +MaintenanceGracePeriod action timeout + +[Code Reference](/pkg/apis/deployment/v1/timeouts.go#L36) + +### .spec.timezone: string + +Timezone if specified, will set a timezone for deployment. +Must be in format accepted by "tzdata", e.g. `America/New_York` or `Europe/London` + +[Code Reference](/pkg/apis/deployment/v1/deployment_spec.go#L262) + +### .spec.tls.altNames: []string + +[Code Reference](/pkg/apis/deployment/v1/tls_spec.go#L58) + +### .spec.tls.caSecretName: string + +[Code Reference](/pkg/apis/deployment/v1/tls_spec.go#L57) + +### .spec.tls.mode: string + +[Code Reference](/pkg/apis/deployment/v1/tls_spec.go#L61) + +### .spec.tls.sni.mapping.\: []string + +[Code Reference](/pkg/apis/deployment/v1/tls_sni_spec.go#L30) + +### .spec.tls.ttl: string + +[Code Reference](/pkg/apis/deployment/v1/tls_spec.go#L59) + +### .spec.topology.enabled: bool + +[Code Reference](/pkg/apis/deployment/v1/topology_spec.go#L26) + +### .spec.topology.label: string + +[Code Reference](/pkg/apis/deployment/v1/topology_spec.go#L28) + +### .spec.topology.zones: int + +[Code Reference](/pkg/apis/deployment/v1/topology_spec.go#L27) + +### .spec.upgrade.autoUpgrade: bool + +Flag specify if upgrade should be auto-injected, even if is not required (in case of stuck) + +[Code Reference](/pkg/apis/deployment/v1/deployment_upgrade_spec.go#L25) + diff --git a/docs/api/ArangoMember.V1.md b/docs/api/ArangoMember.V1.md new file mode 100644 index 000000000..8828eacb1 --- /dev/null +++ b/docs/api/ArangoMember.V1.md @@ -0,0 +1,69 @@ +# API Reference for ArangoMember V1 + +## Spec + +### .spec.deletion_priority: int + +[Code Reference](/pkg/apis/deployment/v1/arango_member_spec.go#L44) + +### .spec.deploymentUID: string + +DeploymentUID define Deployment UID. + +[Code Reference](/pkg/apis/deployment/v1/arango_member_spec.go#L36) + +### .spec.group: int + +Group define Member Groups. + +[Code Reference](/pkg/apis/deployment/v1/arango_member_spec.go#L31) + +### .spec.id: string + +[Code Reference](/pkg/apis/deployment/v1/arango_member_spec.go#L33) + +### .spec.overrides.resources: core.ResourceRequirements + +Resources holds resource requests & limits. Overrides template provided on the group level. + +Links: +* [Documentation of core.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#resourcerequirements-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/arango_member_spec_overrides.go#L38) + +### .spec.overrides.volumeClaimTemplate: core.PersistentVolumeClaim + +VolumeClaimTemplate specifies a template for volume claims. Overrides template provided on the group level. + +Links: +* [Documentation of core.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#persistentvolumeclaim-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/arango_member_spec_overrides.go#L33) + +### .spec.template.checksum: string + +Checksum keep the Pod Spec Checksum (with ignored fields). + +[Code Reference](/pkg/apis/deployment/v1/arango_member_pod_template.go#L60) + +### .spec.template.endpoint: string + +Deprecated: Endpoint is not saved into the template + +[Code Reference](/pkg/apis/deployment/v1/arango_member_pod_template.go#L63) + +### .spec.template.podSpec: core.PodTemplateSpec + +PodSpec specifies the Pod Spec used for this Member. + +Links: +* [Documentation of core.PodTemplateSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#podtemplatespec-v1-core) + +[Code Reference](/pkg/apis/deployment/v1/arango_member_pod_template.go#L54) + +### .spec.template.podSpecChecksum: string + +PodSpecChecksum keep the Pod Spec Checksum (without ignored fields). + +[Code Reference](/pkg/apis/deployment/v1/arango_member_pod_template.go#L57) + diff --git a/docs/bare-metal.md b/docs/bare-metal.md new file mode 100644 index 000000000..75a4ef590 --- /dev/null +++ b/docs/bare-metal.md @@ -0,0 +1,524 @@ +# ArangoDB on bare metal Kubernetes + +A note of warning for lack of a better word upfront: Kubernetes is +awesome and powerful. As with awesome and powerful things, there is +infinite ways of setting up a k8s cluster. With great flexibility +comes great complexity. There are infinite ways of hitting barriers. + +This guide is a walk through for, again in lack of a better word, +a reasonable and flexible setup to get to an ArangoDB cluster setup on +a bare metal kubernetes setup. + +## BEWARE: Do not use this setup for production! + +This guide does not involve setting up dedicated master nodes or high +availability for Kubernetes, but uses for sake of simplicity a single untainted +master. This is the very definition of a test environment. + +If you are interested in running a high available Kubernetes setup, please +refer to: [Creating Highly Available Clusters with kubeadm](https://kubernetes.io/docs/setup/independent/high-availability/) + +## Requirements + +Let there be 3 Linux boxes, `kube01 (192.168.10.61)`, `kube02 (192.168.10.62)` +and `kube03 (192.168.10.3)`, with `kubeadm` and `kubectl` installed and off we go: + +* `kubeadm`, `kubectl` version `>=1.10` + +## Initialize the master node + +The master node is outstanding in that it handles the API server and some other +vital infrastructure + +``` +sudo kubeadm init --pod-network-cidr=10.244.0.0/16 +``` + +``` + [init] Using Kubernetes version: v1.13.2 + [preflight] Running pre-flight checks + [preflight] Pulling images required for setting up a Kubernetes cluster + [preflight] This might take a minute or two, depending on the speed of your internet connection + [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' + [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" + [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" + [kubelet-start] Activating the kubelet service + [certs] Using certificateDir folder "/etc/kubernetes/pki" + [certs] Generating "ca" certificate and key + [certs] Generating "apiserver" certificate and key + [certs] apiserver serving cert is signed for DNS names [kube01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.10.61] + [certs] Generating "apiserver-kubelet-client" certificate and key + [certs] Generating "front-proxy-ca" certificate and key + [certs] Generating "front-proxy-client" certificate and key + [certs] Generating "etcd/ca" certificate and key + [certs] Generating "apiserver-etcd-client" certificate and key + [certs] Generating "etcd/server" certificate and key + [certs] etcd/server serving cert is signed for DNS names [kube01 localhost] and IPs [192.168.10.61 127.0.0.1 ::1] + [certs] Generating "etcd/peer" certificate and key + [certs] etcd/peer serving cert is signed for DNS names [kube01 localhost] and IPs [192.168.10.61 127.0.0.1 ::1] + [certs] Generating "etcd/healthcheck-client" certificate and key + [certs] Generating "sa" key and public key + [kubeconfig] Using kubeconfig folder "/etc/kubernetes" + [kubeconfig] Writing "admin.conf" kubeconfig file + [kubeconfig] Writing "kubelet.conf" kubeconfig file + [kubeconfig] Writing "controller-manager.conf" kubeconfig file + [kubeconfig] Writing "scheduler.conf" kubeconfig file + [control-plane] Using manifest folder "/etc/kubernetes/manifests" + [control-plane] Creating static Pod manifest for "kube-apiserver" + [control-plane] Creating static Pod manifest for "kube-controller-manager" + [control-plane] Creating static Pod manifest for "kube-scheduler" + [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" + [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s + [apiclient] All control plane components are healthy after 23.512869 seconds + [uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace + [kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster + [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kube01" as an annotation + [mark-control-plane] Marking the node kube01 as control-plane by adding the label "node-role.kubernetes.io/master=''" + [mark-control-plane] Marking the node kube01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] + [bootstrap-token] Using token: blcr1y.49wloegyaugice8a + [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles + [bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials + [bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token + [bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster + [bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace + [addons] Applied essential addon: CoreDNS + [addons] Applied essential addon: kube-proxy + + Your Kubernetes master has initialized successfully! + + To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + + You should now deploy a pod network to the cluster. + Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + + You can now join any number of machines by running the following on each node as root: + + kubeadm join 192.168.10.61:6443 --token blcr1y.49wloegyaugice8a --discovery-token-ca-cert-hash sha256:0505933664d28054a62298c68dc91e9b2b5cf01ecfa2228f3c8fa2412b7a78c8 +``` + +Go ahead and do as above instructed and see into getting kubectl to work on the master: + +``` +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config +``` + +## Deploy a pod network + +For this guide, we go with **flannel**, as it is an easy way of setting up a +layer 3 network, which uses the Kubernetes API and just works anywhere, where a +network between the involved machines works: + +``` +kubectl apply -f \ + https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml +``` +``` + clusterrole.rbac.authorization.k8s.io/flannel created + clusterrolebinding.rbac.authorization.k8s.io/flannel created + serviceaccount/flannel created + configmap/kube-flannel-cfg created + daemonset.extensions/kube-flannel-ds-amd64 created + daemonset.extensions/kube-flannel-ds-arm64 created + daemonset.extensions/kube-flannel-ds-arm created + daemonset.extensions/kube-flannel-ds-ppc64le created + daemonset.extensions/kube-flannel-ds-s390x created +``` + +## Join remaining nodes + +Run the above join commands on the nodes `kube02` and `kube03`. Below is the +output on `kube02` for the setup for this guide: + +``` +sudo kubeadm join 192.168.10.61:6443 --token blcr1y.49wloegyaugice8a --discovery-token-ca-cert-hash sha256:0505933664d28054a62298c68dc91e9b2b5cf01ecfa2228f3c8fa2412b7a78c8 +``` +``` + [preflight] Running pre-flight checks + [discovery] Trying to connect to API Server "192.168.10.61:6443" + [discovery] Created cluster-info discovery client, requesting info from "https:// 192.168.10.61:6443" + [discovery] Requesting info from "https://192.168.10.61:6443" again to validate TLS against the pinned public key + [discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.10.61:6443" + [discovery] Successfully established connection with API Server "192.168.10.61:6443" + [join] Reading configuration from the cluster... + [join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' + [kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.13" ConfigMap in the kube-system namespace + [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" + [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" + [kubelet-start] Activating the kubelet service + [tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap... + [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kube02" as an annotation + +This node has joined the cluster: +* Certificate signing request was sent to apiserver and a response was received. +* The Kubelet was informed of the new secure connection details. + +Run 'kubectl get nodes' on the master to see this node join the cluster. +``` + +## Untaint master node + +``` +kubectl taint nodes --all node-role.kubernetes.io/master- +``` +``` + node/kube01 untainted + taint "node-role.kubernetes.io/master:" not found + taint "node-role.kubernetes.io/master:" not found +``` + +## Wait for nodes to get ready and sanity checking + +After some brief period, you should see that your nodes are good to go: + +``` +kubectl get nodes +``` +``` + NAME STATUS ROLES AGE VERSION + kube01 Ready master 38m v1.13.2 + kube02 Ready 13m v1.13.2 + kube03 Ready 63s v1.13.2 +``` + +Just a quick sanity check to see, that your cluster is up and running: + +``` +kubectl get all --all-namespaces +``` +``` + NAMESPACE NAME READY STATUS RESTARTS AGE + kube-system pod/coredns-86c58d9df4-r9l5c 1/1 Running 2 41m + kube-system pod/coredns-86c58d9df4-swzpx 1/1 Running 2 41m + kube-system pod/etcd-kube01 1/1 Running 2 40m + kube-system pod/kube-apiserver-kube01 1/1 Running 2 40m + kube-system pod/kube-controller-manager-kube01 1/1 Running 2 40m + kube-system pod/kube-flannel-ds-amd64-hppt4 1/1 Running 3 16m + kube-system pod/kube-flannel-ds-amd64-kt6jh 1/1 Running 1 3m41s + kube-system pod/kube-flannel-ds-amd64-tg7gz 1/1 Running 2 20m + kube-system pod/kube-proxy-f2g2q 1/1 Running 2 41m + kube-system pod/kube-proxy-gt9hh 1/1 Running 0 3m41s + kube-system pod/kube-proxy-jwmq7 1/1 Running 2 16m + kube-system pod/kube-scheduler-kube01 1/1 Running 2 40m + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 41m + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 41m +``` + +## Deploy helm + +- Obtain current [helm release](https://github.com/helm/helm/releases) for your architecture + +- Create tiller user + + ``` + kubectl create serviceaccount --namespace kube-system tiller + ``` + ``` + serviceaccount/tiller created + ``` + +- Attach `tiller` to proper role + + ``` + kubectl create clusterrolebinding tiller-cluster-rule \ + --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + ``` + ``` + clusterrolebinding.rbac.authorization.k8s.io/tiller-cluster-rule created + ``` + +- Initialise helm + + ``` + helm init --service-account tiller + ``` + ``` + $HELM_HOME has been configured at /home/xxx/.helm. + ... + Happy Helming! + + Tiller (the Helm server-side component) has been + installed into your Kubernetes Cluster. + ``` + +## Deploy ArangoDB operator charts + +- Deploy ArangoDB custom resource definition chart + +``` +helm install https://github.com/arangodb/kube-arangodb/releases/download/0.3.7/kube-arangodb-crd.tgz +``` +``` + NAME: hoping-gorilla + LAST DEPLOYED: Mon Jan 14 06:10:27 2019 + NAMESPACE: default + STATUS: DEPLOYED + + RESOURCES: + ==> v1beta1/CustomResourceDefinition + NAME AGE + arangodeployments.database.arangodb.com 0s + arangodeploymentreplications.replication.database.arangodb.com 0s + + + NOTES: + + kube-arangodb-crd has been deployed successfully! + + Your release is named 'hoping-gorilla'. + + You can now continue install kube-arangodb chart. +``` +- Deploy ArangoDB operator chart + +``` +helm install https://github.com/arangodb/kube-arangodb/releases/download/0.3.7/kube-arangodb.tgz +``` +``` + NAME: illocutionary-whippet + LAST DEPLOYED: Mon Jan 14 06:11:58 2019 + NAMESPACE: default + STATUS: DEPLOYED + + RESOURCES: + ==> v1beta1/ClusterRole + NAME AGE + illocutionary-whippet-deployment-replications 0s + illocutionary-whippet-deployment-replication-operator 0s + illocutionary-whippet-deployments 0s + illocutionary-whippet-deployment-operator 0s + + ==> v1beta1/ClusterRoleBinding + NAME AGE + illocutionary-whippet-deployment-replication-operator-default 0s + illocutionary-whippet-deployment-operator-default 0s + + ==> v1beta1/RoleBinding + NAME AGE + illocutionary-whippet-deployment-replications 0s + illocutionary-whippet-deployments 0s + + ==> v1/Service + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + arango-deployment-replication-operator ClusterIP 10.107.2.133 8528/TCP 0s + arango-deployment-operator ClusterIP 10.104.189.81 8528/TCP 0s + + ==> v1beta1/Deployment + NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE + arango-deployment-replication-operator 2 2 2 0 0s + arango-deployment-operator 2 2 2 0 0s + + ==> v1/Pod(related) + NAME READY STATUS RESTARTS AGE + arango-deployment-replication-operator-5f679fbfd8-nk8kz 0/1 Pending 0 0s + arango-deployment-replication-operator-5f679fbfd8-pbxdl 0/1 ContainerCreating 0 0s + arango-deployment-operator-65f969fc84-gjgl9 0/1 Pending 0 0s + arango-deployment-operator-65f969fc84-wg4nf 0/1 ContainerCreating 0 0s + + +NOTES: + +kube-arangodb has been deployed successfully! + +Your release is named 'illocutionary-whippet'. + +You can now deploy ArangoDeployment & ArangoDeploymentReplication resources. + +See https://www.arangodb.com/docs/stable/tutorials-kubernetes.html +for how to get started. +``` +- As unlike cloud k8s offerings no file volume infrastructure exists, we need + to still deploy the storage operator chart: + +``` +helm install \ + https://github.com/arangodb/kube-arangodb/releases/download/0.3.7/kube-arangodb-storage.tgz +``` +``` + NAME: sad-newt + LAST DEPLOYED: Mon Jan 14 06:14:15 2019 + NAMESPACE: default + STATUS: DEPLOYED + + RESOURCES: + ==> v1/ServiceAccount + NAME SECRETS AGE + arango-storage-operator 1 1s + + ==> v1beta1/CustomResourceDefinition + NAME AGE + arangolocalstorages.storage.arangodb.com 1s + + ==> v1beta1/ClusterRole + NAME AGE + sad-newt-storages 1s + sad-newt-storage-operator 1s + + ==> v1beta1/ClusterRoleBinding + NAME AGE + sad-newt-storage-operator 1s + + ==> v1beta1/RoleBinding + NAME AGE + sad-newt-storages 1s + + ==> v1/Service + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + arango-storage-operator ClusterIP 10.104.172.100 8528/TCP 1s + + ==> v1beta1/Deployment + NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE + arango-storage-operator 2 2 2 0 1s + + ==> v1/Pod(related) + NAME READY STATUS RESTARTS AGE + arango-storage-operator-6bc64ccdfb-tzllq 0/1 ContainerCreating 0 0s + arango-storage-operator-6bc64ccdfb-zdlxk 0/1 Pending 0 0s + + + NOTES: + + kube-arangodb-storage has been deployed successfully! + + Your release is named 'sad-newt'. + + You can now deploy an ArangoLocalStorage resource. + + See https://www.arangodb.com/docs/stable/deployment-kubernetes-storage-resource.html + for further instructions. + +``` +## Deploy ArangoDB cluster + +- Deploy local storage + +``` +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/master/examples/arango-local-storage.yaml +``` +``` + arangolocalstorage.storage.arangodb.com/arangodb-local-storage created +``` + +- Deploy simple cluster + +``` +kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/master/examples/simple-cluster.yaml +``` +``` + arangodeployment.database.arangodb.com/example-simple-cluster created +``` + +## Access your cluster + +- Find your cluster's network address: + +``` +kubectl get services +``` +``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + arango-deployment-operator ClusterIP 10.104.189.81 8528/TCP 14m + arango-deployment-replication-operator ClusterIP 10.107.2.133 8528/TCP 14m + example-simple-cluster ClusterIP 10.109.170.64 8529/TCP 5m18s + example-simple-cluster-ea NodePort 10.98.198.7 8529:30551/TCP 4m8s + example-simple-cluster-int ClusterIP None 8529/TCP 5m19s + kubernetes ClusterIP 10.96.0.1 443/TCP 69m +``` + +- In this case, according to the access service, `example-simple-cluster-ea`, + the cluster's coordinators are reachable here: + +https://kube01:30551, https://kube02:30551 and https://kube03:30551 + +## LoadBalancing + +For this guide we like to use the `metallb` load balancer, which can be easiy +installed as a simple layer 2 load balancer: + +- install the `metalllb` controller: + +``` +kubectl apply -f \ + https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/metallb.yaml +``` +``` + namespace/metallb-system created + serviceaccount/controller created + serviceaccount/speaker created + clusterrole.rbac.authorization.k8s.io/metallb-system:controller created + clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created + role.rbac.authorization.k8s.io/config-watcher created + clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created + clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created + rolebinding.rbac.authorization.k8s.io/config-watcher created + daemonset.apps/speaker created + deployment.apps/controller created +``` + +- Deploy network range configurator. Assuming that the range for the IP addresses, + which are granted to `metalllb` for load balancing is 192.168.10.224/28, + download the [exmample layer2 configurator](https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/example-layer2-config.yaml). + +``` +wget https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/example-layer2-config.yaml +``` + +- Edit the `example-layer2-config.yaml` file to use the according addresses. + Do this with great care, as YAML files are indention sensitive. + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: my-ip-space + protocol: layer2 + addresses: + - 192.168.10.224/28 +``` + +- deploy the configuration map: + +``` +kubectl apply -f example-layer2-config.yaml +``` +``` + configmap/config created +``` + +- restart ArangoDB's endpoint access service: + +``` +kubectl delete service example-simple-cluster-ea +``` +``` + service "example-simple-cluster-ea" deleted +``` + +- watch, how the service goes from `Nodeport` to `LoadBalancer` the output above + +``` +kubectl get services +``` +``` NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + arango-deployment-operator ClusterIP 10.104.189.81 8528/TCP 34m + arango-deployment-replication-operator ClusterIP 10.107.2.133 8528/TCP 34m + example-simple-cluster ClusterIP 10.109.170.64 8529/TCP 24m + example-simple-cluster-ea LoadBalancer 10.97.217.222 192.168.10.224 8529:30292/TCP 22s + example-simple-cluster-int ClusterIP None 8529/TCP 24m + kubernetes ClusterIP 10.96.0.1 443/TCP 89m +``` + +- Now you are able of accessing all 3 coordinators through https://192.168.10.224:8529 diff --git a/docs/customer_questions.md b/docs/customer_questions.md new file mode 100644 index 000000000..e172b61c8 --- /dev/null +++ b/docs/customer_questions.md @@ -0,0 +1,11 @@ +# Customer questions + +- What is your experience with using Kubernetes? +- What is your experience with using ArangoDB on Kubernetes? +- What do you think of the operator concept for an ArangoDB Kubernetes offering? +- What is the minimum version of Kubernetes you're running / need? +- What kind of persistent volumes do you use / plan to use? +- What kind of load-balancer support do you use / need for ArangoDB in Kubernetes? +- Do you have a need to limit ArangoDB Pods to a sub-section of your Kubernetes cluster? +- Do you see a need to shutdown a cluster and bring it back alive later (with its data!)? +- In which cloud/on premises environment are you going to use Kubernetes (AWS, GCE, on premise...)? diff --git a/docs/design/README.md b/docs/design/README.md new file mode 100644 index 000000000..bdd89f717 --- /dev/null +++ b/docs/design/README.md @@ -0,0 +1,30 @@ +# ArangoDB operator design documents + +- [Architecture change](./arch_change.md) +- [Constraints](./constraints.md) +- [Health](./health.md) +- [Metrics](./metrics.md) +- [Kubernetes Pod name versus cluster ID](./pod_name_versus_cluster_id.md) +- [Resource & labels](./resource_and_labels.md) +- [Resource Management](./resource_management.md) +- [Scaling](./scaling.md) +- [Status](./status.md) +- [Upgrading](./upgrading.md) +- [Rotating Pods](./rotating.md) +- [Maintenance](./maintenance.md) +- [Additional configuration](./additional_configuration.md) +- [Topology awareness](./topology_awareness.md) +- [Configuring timezone](./configuring_tz.md) +- [Operator API](./api.md) +- [Logging](./logging.md) +- [Manual Recovery](./recovery.md) +- [Backup](./backup.md) + +## Features +- [Force rebuild out-synced Shards with broken Merkle Tree](./features/rebuild_out_synced_shards.md) +- [Failover Leader service](./features/failover_leader_service.md) +- [Restore defaults from last accepted state of deployment](./features/deployment_spec_defaults.md) + +## Debugging +- [Collecting debug info](./debugging.md) +- \ No newline at end of file diff --git a/docs/design/acceptance_test.md b/docs/design/acceptance_test.md new file mode 100644 index 000000000..5967035dc --- /dev/null +++ b/docs/design/acceptance_test.md @@ -0,0 +1,533 @@ +# Acceptance test for kube-arangodb operator on specific Kubernetes platform + +This acceptance test plan describes all test scenario's that must be executed +successfully in order to consider the kube-arangodb operator production ready +on a specific Kubernetes setup (from now on we'll call a Kubernetes setup a platform). + +## Platform parameters + +Before the test, record the following parameters for the platform the test is executed on. + +- Name of the platform +- Version of the platform +- Upstream Kubernetes version used by the platform (run `kubectl version`) +- Number of nodes used by the Kubernetes cluster (run `kubectl get node`) +- `StorageClasses` provided by the platform (run `kubectl get storageclass`) +- Does the platform use RBAC? (run `kubectl describe clusterrolebinding`) +- Does the platform support services of type `LoadBalancer`? + +If one of the above questions can have multiple answers (e.g. different Kubernetes versions) +then make the platform more specific. E.g. consider "GKE with Kubernetes 1.10.2" a platform +instead of "GKE" which can have version "1.8", "1.9" & "1.10.2". + +## Platform preparations + +Before the tests can be run, the platform has to be prepared. + +### Deploy the ArangoDB operators + +Deploy the following ArangoDB operators: + +- `ArangoDeployment` operator +- `ArangoDeploymentReplication` operator +- `ArangoLocalStorage` operator + +To do so, follow the [instructions in the documentation](https://www.arangodb.com/docs/stable/deployment-kubernetes-usage.html). + +### `PersistentVolume` provider + +If the platform does not provide a `PersistentVolume` provider, create one by running: + +```bash +kubectl apply -f examples/arango-local-storage.yaml +``` + +## Basis tests + +The basis tests are executed on every platform with various images: + +Run the following tests with the following images: + +- Community +- Enterprise + +For every tests, one of these images can be chosen, as long as each image +is used in a test at least once. + +### Test 1a: Create single server deployment + +Create an `ArangoDeployment` of mode `Single`. + +Hint: Use `tests/acceptance/single.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 1 `Pod` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +### Test 1b: Create active failover deployment + +Create an `ArangoDeployment` of mode `ActiveFailover`. + +Hint: Use `tests/acceptance/activefailover.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 5 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +### Test 1c: Create cluster deployment + +Create an `ArangoDeployment` of mode `Cluster`. + +Hint: Use `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +### Test 1d: Create cluster deployment with dc2dc + +This test requires the use of the enterprise image. + +Create an `ArangoDeployment` of mode `Cluster` and dc2dc enabled. + +Hint: Derive from `tests/acceptance/cluster-sync.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 15 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The deployment must yield a `Service` named `-sync` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +### Test 2a: Scale an active failover deployment + +Create an `ArangoDeployment` of mode `ActiveFailover`. + +- [ ] The deployment must start +- [ ] The deployment must yield 5 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Change the value of `spec.single.count` from 2 to 3. + +- [ ] A single server is added +- [ ] The deployment must yield 6 `Pods` + +Change the value of `spec.single.count` from 3 to 2. + +- [ ] A single server is removed +- [ ] The deployment must yield 5 `Pods` + +### Test 2b: Scale a cluster deployment + +Create an `ArangoDeployment` of mode `Cluster`. + +Hint: Use `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Change the value of `spec.dbservers.count` from 3 to 5. + +- [ ] Two dbservers are added +- [ ] The deployment must yield 11 `Pods` + +Change the value of `spec.coordinators.count` from 3 to 4. + +- [ ] A coordinator is added +- [ ] The deployment must yield 12 `Pods` + +Change the value of `spec.dbservers.count` from 5 to 2. + +- [ ] Three dbservers are removed (one by one) +- [ ] The deployment must yield 9 `Pods` + +Change the value of `spec.coordinators.count` from 4 to 1. + +- [ ] Three coordinators are removed (one by one) +- [ ] The deployment must yield 6 `Pods` + +### Test 3: Production environment + +Production environment tests are only relevant if there are enough nodes +available that `Pods` can be scheduled on. + +The number of available nodes must be >= the maximum server count in +any group. + +### Test 3a: Create single server deployment in production environment + +Create an `ArangoDeployment` of mode `Single` with an environment of `Production`. + +Hint: Derive from `tests/acceptance/single.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 1 `Pod` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +### Test 3b: Create active failover deployment in production environment + +Create an `ArangoDeployment` of mode `ActiveFailover` with an environment of `Production`. + +Hint: Derive from `tests/acceptance/activefailover.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 5 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +### Test 3c: Create cluster deployment in production environment + +Create an `ArangoDeployment` of mode `Cluster` with an environment of `Production`. + +Hint: Derive from `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +### Test 3d: Create cluster deployment in production environment and scale it + +Create an `ArangoDeployment` of mode `Cluster` with an environment of `Production`. + +Hint: Derive from `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Change the value of `spec.dbservers.count` from 3 to 4. + +- [ ] Two dbservers are added +- [ ] The deployment must yield 10 `Pods` + +Change the value of `spec.coordinators.count` from 3 to 4. + +- [ ] A coordinator is added +- [ ] The deployment must yield 11 `Pods` + +Change the value of `spec.dbservers.count` from 4 to 2. + +- [ ] Three dbservers are removed (one by one) +- [ ] The deployment must yield 9 `Pods` + +Change the value of `spec.coordinators.count` from 4 to 2. + +- [ ] Three coordinators are removed (one by one) +- [ ] The deployment must yield 7 `Pods` + +### Test 4a: Create cluster deployment with `ArangoLocalStorage` provided volumes + +Ensure an `ArangoLocalStorage` is deployed. + +Hint: Use from `tests/acceptance/local-storage.yaml`. + +Create an `ArangoDeployment` of mode `Cluster` with a `StorageClass` that is +mapped to an `ArangoLocalStorage` provider. + +Hint: Derive from `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +### Test 4b: Create cluster deployment with a platform provided `StorageClass` + +This test only applies to platforms that provide their own `StorageClasses`. + +Create an `ArangoDeployment` of mode `Cluster` with a `StorageClass` that is +provided by the platform. + +Hint: Derive from `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +### Test 5a: Test `Pod` resilience on single servers + +Create an `ArangoDeployment` of mode `Single`. + +Hint: Use from `tests/acceptance/single.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 1 `Pod` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Delete the `Pod` of the deployment that contains the single server. + +- [ ] The `Pod` must be restarted +- [ ] After the `Pod` has restarted, the server must have the same data and be responsive again + +### Test 5b: Test `Pod` resilience on active failover + +Create an `ArangoDeployment` of mode `ActiveFailover`. + +Hint: Use from `tests/acceptance/activefailover.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 5 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Delete a `Pod` of the deployment that contains an agent. + +- [ ] While the `Pod` is gone & restarted, the cluster must still respond to requests (R/W) +- [ ] The `Pod` must be restarted + +Delete a `Pod` of the deployment that contains a single server. + +- [ ] While the `Pod` is gone & restarted, the cluster must still respond to requests (R/W) +- [ ] The `Pod` must be restarted + +### Test 5c: Test `Pod` resilience on clusters + +Create an `ArangoDeployment` of mode `Cluster`. + +Hint: Use from `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Delete a `Pod` of the deployment that contains an agent. + +- [ ] While the `Pod` is gone & restarted, the cluster must still respond to requests (R/W) +- [ ] The `Pod` must be restarted + +Delete a `Pod` of the deployment that contains a dbserver. + +- [ ] While the `Pod` is gone & restarted, the cluster must still respond to requests (R/W), except + for requests to collections with a replication factor of 1. +- [ ] The `Pod` must be restarted + +Delete a `Pod` of the deployment that contains an coordinator. + +- [ ] While the `Pod` is gone & restarted, the cluster must still respond to requests (R/W), except + requests targeting the restarting coordinator. +- [ ] The `Pod` must be restarted + +### Test 6a: Test `Node` reboot on single servers + +Create an `ArangoDeployment` of mode `Single`. + +Hint: Use from `tests/acceptance/single.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 1 `Pod` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Reboot the `Node` of the deployment that contains the single server. + +- [ ] The `Pod` running on the `Node` must be restarted +- [ ] After the `Pod` has restarted, the server must have the same data and be responsive again + +### Test 6b: Test `Node` reboot on active failover + +Create an `ArangoDeployment` of mode `ActiveFailover` with an environment of `Production`. + +Hint: Use from `tests/acceptance/activefailover.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 5 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Reboot a `Node`. + +- [ ] While the `Node` is restarting, the cluster must still respond to requests (R/W) +- [ ] All `Pods` on the `Node` must be restarted + +### Test 6c: Test `Node` reboot on clusters + +Create an `ArangoDeployment` of mode `Cluster` with an environment of `Production`. + +Hint: Use from `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Reboot a `Node`. + +- [ ] While the `Node` is restarting, the cluster must still respond to requests (R/W) +- [ ] All `Pods` on the `Node` must be restarted + +### Test 6d: Test `Node` removal on single servers + +This test is only valid when `StorageClass` is used that provides network attached `PersistentVolumes`. + +Create an `ArangoDeployment` of mode `Single`. + +Hint: Use from `tests/acceptance/single.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 1 `Pod` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Remove the `Node` containing the deployment from the Kubernetes cluster. + +- [ ] The `Pod` running on the `Node` must be restarted on another `Node` +- [ ] After the `Pod` has restarted, the server must have the same data and be responsive again + +### Test 6e: Test `Node` removal on active failover + +Create an `ArangoDeployment` of mode `ActiveFailover` with an environment of `Production`. + +Hint: Use from `tests/acceptance/activefailover.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 5 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Remove a `Node` containing the `Pods` of the deployment from the Kubernetes cluster. + +- [ ] While the `Pods` are being restarted on new `Nodes`, the cluster must still respond to requests (R/W) +- [ ] The `Pods` running on the `Node` must be restarted on another `Node` +- [ ] After the `Pods` have restarted, the server must have the same data and be responsive again + +### Test 6f: Test `Node` removal on clusters + +This test is only valid when: + +- A `StorageClass` is used that provides network attached `PersistentVolumes` +- or all collections have a replication factor of 2 or higher + +Create an `ArangoDeployment` of mode `Cluster` with an environment of `Production`. + +Hint: Use from `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Remove a `Node` containing the `Pods` of the deployment from the Kubernetes cluster. + +- [ ] While the `Pods` are being restarted on new `Nodes`, the cluster must still respond to requests (R/W) +- [ ] The `Pods` running on the `Node` must be restarted on another `Node` +- [ ] After the `Pods` have restarted, the server must have the same data and be responsive again + +### Test 6g: Test `Node` removal on clusters with replication factor 1 + +This test is only valid when: + +- A `StorageClass` is used that provides `Node` local `PersistentVolumes` +- and at least some collections have a replication factor of 1 + +Create an `ArangoDeployment` of mode `Cluster` with an environment of `Production`. + +Hint: Use from `tests/acceptance/cluster.yaml`. + +- [ ] The deployment must start +- [ ] The deployment must yield 9 `Pods` +- [ ] The deployment must yield a `Service` named `` +- [ ] The deployment must yield a `Service` named `-ea` +- [ ] The `Service` named `-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Remove a `Node`, containing the dbserver `Pod` that holds a collection with replication factor 1, +from the Kubernetes cluster. + +- [ ] While the `Pods` are being restarted on new `Nodes`, the cluster must still respond to requests (R/W), + except requests involving collections with a replication factor of 1 +- [ ] The `Pod` running the dbserver with a collection that has a replication factor of 1 must NOT be restarted on another `Node` + +Remove the collections with the replication factor of 1 + +- [ ] The remaining `Pods` running on the `Node` must be restarted on another `Node` +- [ ] After the `Pods` have restarted, the server must have the same data, except for the removed collections, and be responsive again + +### Test 7a: Test DC2DC on 2 clusters, running in the same Kubernetes cluster + +This test requires the use of the enterprise image. + +Create 2 `ArangoDeployment` of mode `Cluster` and dc2dc enabled. + +Hint: Derive from `tests/acceptance/cluster-sync.yaml`, name the deployments `cluster1` and `cluster2`. + +Make sure to include a name ('cluster1-to-2`) for an external access package. + +```yaml +apiVersion: "database.arangodb.com/v1alpha" +kind: "ArangoDeployment" +metadata: + name: "cluster1" +spec: + mode: Cluster + image: ewoutp/arangodb:3.3.14 + sync: + enabled: true + externalAccess: + accessPackageSecretNames: ["cluster1-to-2"] +``` + +- [ ] The deployments must start +- [ ] The deployments must yield 15 `Pods` +- [ ] The deployments must yield a `Service` named `cluster[1|2]` +- [ ] The deployments must yield a `Service` named `cluster[1|2]-ea` +- [ ] The deployments must yield a `Service` named `cluster[1|2]-sync` +- [ ] The `Services` named `cluster[1|2]-ea` must be accessible from outside (LoadBalancer or NodePort) and show WebUI + +Create an `ArangoDeploymentReplication` from `tests/acceptance/cluster12-replication.yaml`. + +It will take some time until the synchronization (from `cluster1` to `cluster2`) is configured. + +- [ ] The status of the `cluster12-replication` resource shows .... +- [ ] The webUI of `cluster1` shows that you can create a new collection there. +- [ ] The webUI of `cluster2` shows that you cannot create a new collection there. + +Create a collection named `testcol` with a replication factor 2 and 3 shards (using the webUI of `cluster1`). + +- [ ] The webUI of `cluster2` shows collection `testcol` with the given replication factor and number of shards. + +Create multiple documents in the collection named `testcol` (using the webUI of `cluster1`). + +- [ ] The documents are visible in webUI of `cluster2`. + +Modify multiple documents in the collection named `testcol` (using the webUI of `cluster1`). + +- [ ] The modified documents are visible in webUI of `cluster2`. + +Remove one or more documents from the collection named `testcol` (using the webUI of `cluster1`). + +- [ ] The documents are no longer visible in webUI of `cluster2`. + +Create a new database called `db2` (using the webUI of `cluster1`). + +- [ ] The webUI of `cluster2` shows database `db2`. diff --git a/docs/design/acceptance_test_platforms.md b/docs/design/acceptance_test_platforms.md new file mode 100644 index 000000000..61f31807d --- /dev/null +++ b/docs/design/acceptance_test_platforms.md @@ -0,0 +1,13 @@ +# Acceptance test platforms + +The [kube-arangodb acceptance tests](./acceptance_test.md) must be +executed on the following platforms: + +- Google GKE, with Kubernetes version 1.10 +- Amazon EKS, with Kubernetes version 1.10 +- Amazon & Kops, with Kubernetes version 1.10 +- Azure AKS, with Kubernetes version 1.10 +- Openshift, based on Kubernetes version 1.10 +- Bare metal with kubeadm 1.10 +- Minikube with Kubernetes version 1.10 +- Kubernetes on docker for Mac, with Kubernetes version 1.10 diff --git a/docs/design/additional_configuration.md b/docs/design/additional_configuration.md new file mode 100644 index 000000000..f4f9599fc --- /dev/null +++ b/docs/design/additional_configuration.md @@ -0,0 +1,17 @@ +# Additional configuration + +It is possible to additionally fine-tune operator behavior by +providing arguments via `operator.args` chart template value. + +For example, you can specify burst size for k8s API requests or how long the operator +should wait for ArangoDeployment termination after receiving interruption signal: +``` +operator: + args: ["--kubernetes.burst=40", --shutdown.timeout=2m"] +``` + +The full list of available arguments can be retrieved using +``` +export OPERATOR_IMAGE=arangodb/kube-arangodb:1.2.9 +kubectl run arango-operator-help --image=$OPERATOR_IMAGE -i --rm --restart=Never -- --help +``` diff --git a/docs/design/api.md b/docs/design/api.md new file mode 100644 index 000000000..87e1147bb --- /dev/null +++ b/docs/design/api.md @@ -0,0 +1,30 @@ +# Operator API + +A running operator exposes HTTP and gRPC API listeners to allow retrieving and setting some configuration values programmatically. +Both listeners require a secured connection to be established. It is possible to provide TLS certificate via k8s secret +using command line option `--api.tls-secret-name`. If secret name is not provided, operator will use self-signed certificate. + +Some HTTP endpoints require the authorization to work with. All gRPC endpoints require the authorization. +The authorization can be accomplished by providing JWT token in 'Authorization' header, e.g. `Authorization: Bearer ` +The JWT token can be fetched from k8s secret (by default `arangodb-operator-api-jwt`). The token is generated automatically +on operator startup using the signing key specified in `arangodb-operator-api-jwt-key` secret. If it is empty or not exists, +the signing key will be auto-generated and saved into secret. You can specify other signing key using `--api.jwt-key-secret-name` CLI option. + +## HTTP + +The HTTP API is running at endpoint specified by operator command line options `--api.http-port` (8628 by default). + +The HTTP API exposes endpoints used to get operator health and readiness status, operator version, and prometheus-compatible metrics. + +For now only `/metrics` and `/log/level` endpoints require authorization. + + +## gRPC + +The gRPC API is running at endpoint specified by operator command line options `--api.grpc-port` (8728 by default). + +The gRPC API is exposed to allow programmatic access to some operator features and status. + +gRPC protobuf definitions and go-client can be found at `github.com/kube-arangodb/pkg/api/server` package. + +All gRPC requests require per-RPC metadata set to contain a valid Authorization header. diff --git a/docs/design/arch_change.md b/docs/design/arch_change.md new file mode 100644 index 000000000..10e268d70 --- /dev/null +++ b/docs/design/arch_change.md @@ -0,0 +1,66 @@ +# Architecture change + +Currently `AMD64` is a default architecture in the operator +To enable `ARM64` support in operator add following config in kube-arangodb chart: + +```bash +helm upgrade --install kube-arangodb \ + https://github.com/arangodb/kube-arangodb/releases/download/$VER/kube-arangodb-$VER.tgz \ + --set "operator.architectures={amd64,arm64}" +``` + +## ARM64 ArangoDeployment + +`AMD64` is a default architecture in the ArangoDeployment. +`ARM64` is available since ArangoDB 3.10.0. +To create `ARM64` ArangoDeployment you need to add `arm64` architecture to the deployment: + +```yaml +apiVersion: database.arangodb.com/v1 +kind: ArangoDeployment +metadata: + name: cluster +spec: + image: arangodb:3.10 + mode: Cluster + architecture: + - arm64 +``` + +## Member Architecture change (Enterprise only) + +To migrate members from AMD64 to ARM64 you need to add `arm64` architecture to the existing deployment as a first item on the list, e.g. +```yaml +apiVersion: database.arangodb.com/v1 +kind: ArangoDeployment +metadata: + name: cluster +spec: + image: arangodb:3.10 + mode: Cluster + architecture: + - arm64 + - amd64 +``` + +All new members since now will be created on ARM64 nodes +All recreated members since now will be created on ARM64 nodes + +To change architecture of a specific member, you need to use following annotation: +```bash +kubectl annotate pod {MY_POD} deployment.arangodb.com/arch=arm64 +``` + +It will add to the member status `ArchitectureMismatch` condition, e.g.: +```yaml + - lastTransitionTime: "2022-09-15T07:38:05Z" + lastUpdateTime: "2022-09-15T07:38:05Z" + reason: Member has a different architecture than the deployment + status: "True" + type: ArchitectureMismatch +``` + +To provide requested arch changes for the member, we need to rotate it, so an additional step is required: +```bash +`kubectl annotate pod {MY_POD} deployment.arangodb.com/rotate=true` +``` diff --git a/docs/design/backup.md b/docs/design/backup.md new file mode 100644 index 000000000..c45b51dde --- /dev/null +++ b/docs/design/backup.md @@ -0,0 +1,59 @@ +# ArangoBackup + +## Lifetime + +The Lifetime of an ArangoBackup let us define the time an ArangoBackup is available in the system. +E.g.: if we want to keep the ArangoBackup for 1 day, we can set the Lifetime to 1 day. After 1 day the ArangoBackup will be deleted automatically. + +```yaml +apiVersion: "backup.arangodb.com/v1alpha" +kind: "ArangoBackup" +metadata: + name: backup-with-one-day-lifetime +spec: + deployment: + name: deployment + lifetime: 1d +``` + +## Upload + +You can upload the backup to a remote storage. +Here is an example for uploading the backup to AWS S3. + +```yaml +apiVersion: "backup.arangodb.com/v1alpha" +kind: "ArangoBackup" +metadata: + name: backup-and-upload +spec: + deployment: + name: deployment + upload: + repositoryURL: "s3:BUCKET_NAME" + credentialsSecretName: upload-credentials +``` + +To make this work, you need to create a `upload-credentials` Secret with the credentials for the remote storage: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: upload-credentials +type: Opaque +stringData: + token: | + { + "s3": { + "type": "s3", + "provider": "AWS", + "env_auth": "false", + "region": "eu-central-1", + "access_key_id": "ACCESS_KEY_ID", + "secret_access_key": "SECRECT_ACCESS_KEY", + "acl": "private", + "no_check_bucket": "true" + } + } +``` diff --git a/docs/design/configuring_tz.md b/docs/design/configuring_tz.md new file mode 100644 index 000000000..3babda494 --- /dev/null +++ b/docs/design/configuring_tz.md @@ -0,0 +1,18 @@ +# Configuring timezone + +To set timezone for cluster components, mount the required timezone into container +by adjusting `spec.` of ArangoDeployment resource: +```yaml +dbservers: + volumeMounts: + - mountPath: /etc/localtime + name: timezone + volumes: + - hostPath: + path: /usr/share/zoneinfo/Europe/Warsaw + type: File + name: timezone +``` + +If `/usr/share/zoneinfo` is not present on your host your probably have to install `tzdata` package. + diff --git a/docs/design/constraints.md b/docs/design/constraints.md new file mode 100644 index 000000000..1ca3aae25 --- /dev/null +++ b/docs/design/constraints.md @@ -0,0 +1,59 @@ +# Constraints + +The ArangoDB operator tries to honor various constraints to support high availability of +the ArangoDB cluster. + +## Run agents and dbservers on separate machines + +It is essential for resilience and high availability that no two agents +are running on the same node and no two dbservers are running running +on the same node. + +To ensure this, the agent and dbserver Pods are configured with pod-anti-affinity. + +```yaml +kind: Pod +spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - arangodb + - key: arangodb_cluster_name + operator: In + values: + - + - key: role + operator: In + values: + - agent (or dbserver) +``` + +The settings used for pod affinity are based on the `spec.environment` setting. + +For a `development` environment we use `preferredDuringSchedulingIgnoredDuringExecution` +so deployments are still possible on tiny clusters like `minikube`. + +For `production` environments we enforce (anti-)affinity using +`requiredDuringSchedulingIgnoredDuringExecution`. + +## Run coordinators on separate machines + +It is preferred to run coordinators of separate machines. + +To achieve this, the coordinator Pods are configured with pod-anti-affinity +using the `preferredDuringSchedulingIgnoredDuringExecution` setting. + +## Run syncworkers on same machine as dbserver + +It is preferred to run syncworkers on the same machine as +dbservers. + +To achieve this, the syncworker Pods are configured with pod-affinity +using the `preferredDuringSchedulingIgnoredDuringExecution` setting. diff --git a/docs/design/dashboard.md b/docs/design/dashboard.md new file mode 100644 index 000000000..01da1eaab --- /dev/null +++ b/docs/design/dashboard.md @@ -0,0 +1,64 @@ +# Deployment Operator Dashboard + +To inspect the state of an `ArangoDeployment` you can use `kubectl get ...` to inspect +the `status` of the resource itself, but to get the entire "picture" you also +must inspect the status of the `Pods` created for the deployment, the `PersistentVolumeClaims`, +the `PersistentVolumes`, the `Services` and some `Secrets`. + +The goal of the operator dashboard is to simplify this inspection process. + +The deployment operator dashboard provides: + +- A status overview of all `ArangoDeployments` it controls +- A status overview of all resources created by the operator (for an `ArangoDeployment`) +- Run the arangoinspector on deployments +- Instructions for upgrading deployments to newer versions + +It does not provide: + +- Direct access to the deployed database +- Anything that can already be done in the web-UI of the database or naturaly belongs there. + +The dashboard is a single-page web application that is served by the operator itself. + +## Design decisions + +### Leader only + +Since only the operator instance that won the leader election has the latest state of all +deployments, only that instance will serve dashboard requests. + +For this purpose, a `Service` is created when deploying the operator. +This service uses a `role=leader` selector to ensure that only the right instance +will be included in its list of endpoints. + +### Exposing the dashboard + +By default the `Service` that selects the leading operator instance is not exposed outside the Kubernetes cluster. +Users must use `kubectl expose service ...` to add additional `Services` of type `LoadBalancer` +or `NodePort` to expose the dashboard if and how they want to. + +### Readonly behavior + +The dashboard only provides readonly functions. +When modifications to an `ArangoDeployment` are needed (e.g. when upgrading to a new version), the dashboard +will provide instructions for doing so using `kubectl` commands. + +In doing so, the requirements for authentication & access control of the dashboard itself remain limited, +while all possible authentication & access control features of Kubernetes are still available to ensure +a secure deployment. + +### Authentication + +The dashboard requires a username+password to gain access, unless it is started with an option to disable authentication. +This username+password pair is stored in a standard basic authentication `Secret` in the Kubernetes cluster. + +### Frontend technology + +The frontend part of the dashboard will be built with React. +This aligns with future developments in the context of the web-UI of the database itself. + +### Backend technology + +The backend of the dashboard contains an HTTPS server that serves the dashboard webpage (including all required web resources) +and all API methods it needs. diff --git a/docs/design/debugging.md b/docs/design/debugging.md new file mode 100644 index 000000000..0da57cd6d --- /dev/null +++ b/docs/design/debugging.md @@ -0,0 +1,31 @@ +# Collecting debug data + +## Agency dump + +To collect only agency dump, run: + +```shell +kubectl exec -ti {POD_kube-arangodb-operator} -- /usr/bin/arangodb_operator admin agency dump > agency_dump.json +``` + +## Deployment debug package + +To collect debug package, which contains things like: +- deployment pod logs +- operator pod logs +- kubernetes events +- deployment yaml files +- agency dump + +Ensure you have debug mode enabled in the operator deployment: +```shell +```bash +helm upgrade --install kube-arangodb \ + https://github.com/arangodb/kube-arangodb/releases/download/$VER/kube-arangodb-$VER.tgz \ + --set "rbac.extensions.debug=true" +``` + +Then run: +```shell +kubectl exec -ti {POD_kube-arangodb-operator} -- /usr/bin/arangodb_operator debugPackage --namespace {namespace} -o - > db.tar.gz +``` diff --git a/docs/design/exporter.md b/docs/design/exporter.md new file mode 100644 index 000000000..36ad84789 --- /dev/null +++ b/docs/design/exporter.md @@ -0,0 +1,93 @@ +# ArangoDB Exporter for Prometheus + +This exporter exposes the statistics provided by a specific ArangoDB instance +in a format compatible with prometheus. + +## Usage + +To use the ArangoDB Exporter, run the following: + +```bash +arangodb_operator exporter \ + --arangodb.endpoint=http://:8529 \ + --arangodb.jwtsecret= \ + --ssl.keyfile= +``` + +This results in an ArangoDB Exporter exposing all statistics of +the ArangoDB server (running at `http://:8529`) +at `http://:9101/metrics`. + +## Exporter mode + +Expose ArangoDB metrics for ArangoDB >= 3.6.0 + +In default mode metrics provided by ArangoDB `_admin/metrics` (<=3.7) or `_admin/metrics/v2` (3.8+) are exposed on Exporter port. + +## Configuring Prometheus + +There are several ways to configure Prometheus to fetch metrics from the ArangoDB Exporter. + +Below you will find a sample Prometheus configuration file that can be used to fetch +metrics from an ArangoDB exporter listening on localhost port 9101 (without TLS). + +```yaml +global: + scrape_interval: 15s +scrape_configs: +- job_name: arangodb + static_configs: + - targets: ['localhost:9101'] +``` + +For more info on configuring Prometheus go to [its configuration documentation](https://prometheus.io/docs/prometheus/latest/configuration/configuration). + +If you're using the [Prometheus Operator](https://github.com/coreos/prometheus-operator) +in Kubernetes, you need to create an additional `Service` and a `ServiceMonitor` resource +like this: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: arangodb-exporters-service + labels: + app: arangodb-exporter +spec: + selector: + app: arangodb-exporter + ports: + - name: metrics + port: 9101 + +--- + +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: arangodb-exporter + namespace: monitoring + labels: + team: frontend + prometheus: kube-prometheus +spec: + namespaceSelector: + matchNames: + - default + selector: + matchLabels: + app: arangodb-exporter + endpoints: + - port: metrics + scheme: https + tlsConfig: + insecureSkipVerify: true +``` + +Note 1: that the typical deployment on the Prometheus operator is done in +a namespace called `monitoring`. Make sure to match the `namespace` +of the `ServiceMonitor` to match that. + +Note 2: that the `Prometheus` custom resource has a field called `serviceMonitorSelector`. +Make sure that the `matchLabels` selector in there matches the labels of +your `ServiceMonitor`. \ No newline at end of file diff --git a/docs/design/features/deployment_spec_defaults.md b/docs/design/features/deployment_spec_defaults.md new file mode 100644 index 000000000..c93c28e36 --- /dev/null +++ b/docs/design/features/deployment_spec_defaults.md @@ -0,0 +1,20 @@ +# Restore defaults from last accepted state of deployment + +## Overview + +ArangoDeployment has a lot of fields, which have default values. +If `--deployment.feature.deployment-spec-defaults-restore` is enabled (which is by default), +then the operator will restore the default values from the last accepted state of the deployment. + +E.g., if user removes the `spec.dbservers` field from the deployment, +then the operator will restore the default value of this field back. + +## How to use + +To disable this feature use `--deployment.feature.deployment-spec-defaults-restore=false` arg, which needs be passed to the operator: + +```shell +helm upgrade --install kube-arangodb \ +https://github.com/arangodb/kube-arangodb/releases/download/$VER/kube-arangodb-$VER.tgz \ + --set "operator.args={--deployment.feature.deployment-spec-defaults-restore=false}" +``` diff --git a/docs/design/features/ephemeral_volumes.md b/docs/design/features/ephemeral_volumes.md new file mode 100644 index 000000000..a941659fe --- /dev/null +++ b/docs/design/features/ephemeral_volumes.md @@ -0,0 +1,20 @@ +# Operator Ephemeral Volumes + +## Overview + +Operator add 2 EmptyDir mounts to the ArangoDB Pods: + +- `ephemeral-apps` which is mounted under `/ephemeral/app` and passed to the ArangoDB process via `--javascript.app-path` arg +- `ephemeral-tmp` which is mounted under `/ephemeral/tmp` and passed to the ArangoDB process via `--temp.path` arg + +This adds possibility to enable ReadOnly FileSystem via PodSecurityContext configuration. + +## How to use + +To enable this feature use `--deployment.feature.ephemeral-volumes` arg, which needs be passed to the operator: + +```shell +helm upgrade --install kube-arangodb \ +https://github.com/arangodb/kube-arangodb/releases/download/$VER/kube-arangodb-$VER.tgz \ + --set "operator.args={--deployment.feature.ephemeral-volumes}" +``` \ No newline at end of file diff --git a/docs/design/features/failover_leader_service.md b/docs/design/features/failover_leader_service.md new file mode 100644 index 000000000..98f43affd --- /dev/null +++ b/docs/design/features/failover_leader_service.md @@ -0,0 +1,23 @@ +# Failover Leader service + +## Overview + +This feature is designed to solve the problem with the Leader service in Active Failover mode. +It attaches the `deployment.arangodb.com/leader=true` label to the Leader member of the cluster. +If a member is a Follower, then this label is removed. +Above labels are used by the cluster Services to route the traffic to the Leader member. + +In case of double Leader situation (which will be fixed in future versions of ArangoDB), +the operator will remove the `deployment.arangodb.com/leader=true` label from all members, +which will cause the cluster outage. + +## How to use + +This feature is disabled by default. +To enable it use `--deployment.feature.failover-leadership ` arg, which needs be passed to the operator: + +```shell +helm upgrade --install kube-arangodb \ +https://github.com/arangodb/kube-arangodb/releases/download/$VER/kube-arangodb-$VER.tgz \ + --set "operator.args={--deployment.feature.force-rebuild-out-synced-shards}" +``` diff --git a/docs/design/features/rebalancer.md b/docs/design/features/rebalancer.md new file mode 100644 index 000000000..4c1cdfca5 --- /dev/null +++ b/docs/design/features/rebalancer.md @@ -0,0 +1,10 @@ +# ArangoDB Rebalancer Support + +## How to use + +To enable Rebalancer in ArangoDeployment: +```yaml +spec: + rebalancer: + enabled: true +``` \ No newline at end of file diff --git a/docs/design/features/rebalancer_v2.md b/docs/design/features/rebalancer_v2.md new file mode 100644 index 000000000..b29fdda73 --- /dev/null +++ b/docs/design/features/rebalancer_v2.md @@ -0,0 +1,25 @@ +# ArangoDB Rebalancer V2 Support + +## Overview + +ArangoDB as of 3.10.0 provide Cluster Rebalancer functionality via [api](https://www.arangodb.com/docs/stable/http/cluster.html#rebalance). + +Operator will use above functionality to check shard movement plan and enforce it on the Cluster. + + +## How to use + +To enable this feature use `--deployment.feature.rebalancer-v2` arg, which needs be passed to the operator: + +```shell +helm upgrade --install kube-arangodb \ +https://github.com/arangodb/kube-arangodb/releases/download/$VER/kube-arangodb-$VER.tgz \ + --set "operator.args={--deployment.feature.rebalancer-v2}" +``` + +To enable Rebalancer in ArangoDeployment: +```yaml +spec: + rebalancer: + enabled: true +``` \ No newline at end of file diff --git a/docs/design/features/rebuild_out_synced_shards.md b/docs/design/features/rebuild_out_synced_shards.md new file mode 100644 index 000000000..a78006f09 --- /dev/null +++ b/docs/design/features/rebuild_out_synced_shards.md @@ -0,0 +1,28 @@ +# Force rebuild out-synced Shards with broken Merkle Tree + +## Overview + +Right now during DBServer restart (so also during the upgrade) we can face an issue with out-synced shards. + +This is known problem and occurs when Leader and Follower disagree on the number of documents in a shard, +they will not be able to get in sync, and retry forever. + +This feature is designed to solve this problem by forcing rebuild of out-synced shards with broken Merkle Tree, +by using internal DBServer API. + +This fix is addressed to the ArangoDB versions lower then: 3.10.6 and 3.9.11. + +## How to use + +This feature is disabled by default. +- To enable it use `--deployment.feature.force-rebuild-out-synced-shards` arg, which needs be passed to the operator. +- Optionally we can override default timeouts by attaching following args to the operator: + - `--timeout.shard-rebuild {duration}` - timeout after which particular out-synced shard is considered as failed and rebuild is triggered (default 60m0s) + - `--timeout.shard-rebuild-retry {duration}` - timeout after which rebuild shards retry flow is triggered (default 60m0s) + +Here is the example `helm` command which enables this feature and sets shard-rebuild timeout to 10 minutes: +```shell +helm upgrade --install kube-arangodb \ +https://github.com/arangodb/kube-arangodb/releases/download/$VER/kube-arangodb-$VER.tgz \ + --set "operator.args={--deployment.feature.force-rebuild-out-synced-shards,--timeout.shard-rebuild=10m}" +``` diff --git a/docs/design/features/secured_containers.md b/docs/design/features/secured_containers.md new file mode 100644 index 000000000..9de169207 --- /dev/null +++ b/docs/design/features/secured_containers.md @@ -0,0 +1,28 @@ +# Secured Containers + +## Overview + +Change Default settings of: + +* PodSecurityContext + * `FSGroup` is set to `3000` +* SecurityContext (Container) + * `RunAsUser` is set to `1000` + * `RunAsGroup` is set to `2000` + * `RunAsNonRoot` is set to `true` + * `ReadOnlyRootFilesystem` is set to `true` + * `Capabilities.Drop` is set to `["ALL"]` + +## Dependencies + +- [Operator Ephemeral Volumes](./ephemeral_volumes.md) should be Enabled and Supported. + +## How to use + +To enable this feature use `--deployment.feature.secured-containers` arg, which needs be passed to the operator: + +```shell +helm upgrade --install kube-arangodb \ +https://github.com/arangodb/kube-arangodb/releases/download/$VER/kube-arangodb-$VER.tgz \ + --set "operator.args={--deployment.feature.secured-containers}" +``` \ No newline at end of file diff --git a/docs/design/health.md b/docs/design/health.md new file mode 100644 index 000000000..ef5aa3051 --- /dev/null +++ b/docs/design/health.md @@ -0,0 +1,23 @@ +# Health checks + +## Liveness Probe + +Liveness checks are done by Kubernetes to detect `Pods` that are still running, +but not responsive. + +For agents, & dbservers a liveness probe is added for `/_api/version`. + +For syncmasters a liveness probe is added for `/_api/version` with +a token in an `Authorization` header. If a monitoring token is specified, +this token is used, otherwise the syncmaster JWT token is used. + +For syncworkers a liveness probe is added for `/_api/version` with +a monitoring token in an `Authorization` header. +If no monitoring token is specified, there is liveness probe added for syncworkers. + +## Readiness Probe + +Readiness probes are done by Kubernetes to exclude `Pods` from `Services` until +they are fully ready to handle requests. + +For coordinators a readiness probe is added for `/_api/version`. diff --git a/docs/design/lifecycle_hooks_and_finalizers.md b/docs/design/lifecycle_hooks_and_finalizers.md new file mode 100644 index 000000000..d30b4723d --- /dev/null +++ b/docs/design/lifecycle_hooks_and_finalizers.md @@ -0,0 +1,37 @@ +# Lifecycle hooks & Finalizers + +The ArangoDB operator expects full control of the `Pods` and `PersistentVolumeClaims` it creates. +Therefore it takes measures to prevent the removal of those resources +until it is safe to do so. + +To achieve this, the server containers in the `Pods` have +a `preStop` hook configured and finalizers are added to the `Pods` +and `PersistentVolumeClaims`. + +The `preStop` hook executes a binary that waits until all finalizers of +the current pod have been removed. +Until this `preStop` hook terminates, Kubernetes will not send a `TERM` signal +to the processes inside the container, which ensures that the server remains running +until it is safe to stop them. + +The operator performs all actions needed when a delete of a `Pod` or +`PersistentVolumeClaims` has been triggered. +E.g. for a dbserver it cleans out the server if the `Pod` and `PersistentVolumeClaim` are being deleted. + +## Lifecycle init-container + +Because the binary that is called in the `preStop` hook is not part of a standard +ArangoDB docker image, it has to be brought into the filesystem of a `Pod`. +This is done by an initial container that copies the binary to an `emptyDir` volume that +is shared between the init-container and the server container. + +## Finalizers + +The ArangoDB operators adds the following finalizers to `Pods`. + +- `dbserver.database.arangodb.com/drain`: Added to DBServers, removed only when the dbserver can be restarted or is completely drained +- `agent.database.arangodb.com/agency-serving`: Added to Agents, removed only when enough agents are left to keep the agency serving + +The ArangoDB operators adds the following finalizers to `PersistentVolumeClaims`. + +- `pvc.database.arangodb.com/member-exists`: removed only when its member exists no longer exists or can be safely rebuild diff --git a/docs/design/logging.md b/docs/design/logging.md new file mode 100644 index 000000000..fb9f64774 --- /dev/null +++ b/docs/design/logging.md @@ -0,0 +1,43 @@ +# Logging configuration + +## Operator logging + +### Log level + +To adjust logging level of the operator, you can use `operator.args` in chart template value +as described in [Additional configuration](./additional_configuration.md). + +For example, to set log level to `INFO` and `DEBUG` for `requests` package, you can use the following value: +```yaml +operator: + args: ["--log.level=INFO", "--log.level=requests=DEBUG"] +``` + +### Log format + +By default, operator logs in `pretty` format. + +To switch logging format to the JSON, you can use `operator.args` in chart template value: +```yaml +operator: + args: ["--log.format=json"] +``` + +## ArangoDeployment logging + +By default, ArangoDeployment logs in `pretty` format. + +To switch logging format to the JSON we need to pass `--log.use-json-format` argument to the ArangoDB server in the deployment: +```yaml +apiVersion: database.arangodb.com/v1 +kind: ArangoDeployment +metadata: + name: single +spec: + mode: Single + single: + args: + - --log.use-json-format + - --log.level=INFO + - --log.level=backup=TRACE +``` diff --git a/docs/design/maintenance.md b/docs/design/maintenance.md new file mode 100644 index 000000000..afbf9f6af --- /dev/null +++ b/docs/design/maintenance.md @@ -0,0 +1,14 @@ +# Maintenance + +## ArangoDeployment + +Maintenance on ArangoDeployment can be enabled using annotation. + +Key: `deployment.arangodb.com/maintenance` +Value: `true` + +To enable maintenance mode for ArangoDeployment kubectl command can be used: +`kubectl annotate arangodeployment deployment deployment.arangodb.com/maintenance=true` + +To disable maintenance mode for ArangoDeployment kubectl command can be used: +`kubectl annotate --overwrite arangodeployment deployment deployment.arangodb.com/maintenance-` \ No newline at end of file diff --git a/docs/design/metrics.md b/docs/design/metrics.md new file mode 100644 index 000000000..964665444 --- /dev/null +++ b/docs/design/metrics.md @@ -0,0 +1,147 @@ +# Metrics + +Operator provides metrics of its operations in a format supported by [Prometheus](https://prometheus.io/). + +The metrics are exposed through HTTPS on port `8528` under path `/metrics`. + +For a full list of available metrics, see [here](./../generated/metrics/README.md). + +#### Contents +- [Integration with standard Prometheus installation (no TLS)](#Integration-with-standard-Prometheus-installation-no-TLS) +- [Integration with standard Prometheus installation (TLS)](#Integration-with-standard-Prometheus-installation-TLS) +- [Integration with Prometheus Operator](#Integration-with-Prometheus-Operator) +- [Exposing ArangoDB metrics](#ArangoDB-metrics) + + +## Integration with standard Prometheus installation (no TLS) + +After creating operator deployment, you must configure Prometheus using a configuration file that instructs it +about which targets to scrape. +To do so, add a new scrape job to your prometheus.yaml config: +```yaml +scrape_configs: + - job_name: 'arangodb-operator' + + scrape_interval: 10s # scrape every 10 seconds. + + scheme: 'https' + tls_config: + insecure_skip_verify: true + + static_configs: + - targets: + - ":8528" +``` + +## Integration with standard Prometheus installation (TLS) + +By default, the operator uses self-signed certificate for its server API. +To use your own certificate, you need to create k8s secret containing certificate and provide secret name to operator. + +Create k8s secret (in same namespace where the operator is running): +```shell +kubectl create secret tls my-own-certificate --cert ./cert.crt --key ./cert.key +``` +Then edit the operator deployment definition (`kubectl edit deployments.apps`) to use your secret for its server API: +``` +spec: + # ... + containers: + # ... + args: + - --server.tls-secret-name=my-own-certificate + # ... +``` +Wait for operator pods to restart. + +Now update Prometheus config to use your certificate for operator scrape job: +```yaml +tls_config: + # if you are using self-signed certificate, just specify CA certificate: + ca_file: /etc/prometheus/rootCA.crt + + # otherwise, specify the generated client certificate and key: + cert_file: /etc/prometheus/cert.crt + key_file: /etc/prometheus/cert.key +``` + +## Integration with Prometheus Operator + +Assuming that you have [Prometheus Operator](https://prometheus-operator.dev/) installed in your cluster (`monitoring` namespace), +and kube-arangodb installed in `default` namespace, you can easily configure the integration with ArangoDB operator. + +The easiest way to do that is to create new a ServiceMonitor: +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: arango-deployment-operator + namespace: monitoring + labels: + prometheus: kube-prometheus +spec: + selector: + matchLabels: + app.kubernetes.io/name: kube-arangodb + namespaceSelector: + matchNames: + - default + endpoints: + - port: server + scheme: https + tlsConfig: + insecureSkipVerify: true +``` + +You also can see the example of Grafana dashboard at `examples/metrics` folder of this repo. + + + +## ArangoDB metrics + +The operator can run sidecar containers for ArangoDB deployments of type `Cluster` which expose metrics in Prometheus format. +Edit your `ArangoDeployment` resource, setting `spec.metrics.enabled` to true to enable ArangoDB metrics: +```yaml +spec: + metrics: + enabled: true +``` + +The operator will run a sidecar container for every cluster component. +In addition to the sidecar containers the operator will deploy a `Service` to access the exporter ports (from within the k8s cluster), +and a resource of type `ServiceMonitor`, provided the corresponding custom resource definition is deployed in the k8s cluster. +If you are running Prometheus in the same k8s cluster with the Prometheus operator, this will be the case. +The ServiceMonitor will have the following labels set: +```yaml +app: arangodb +arango_deployment: YOUR_DEPLOYMENT_NAME +context: metrics +metrics: prometheus +``` +This makes it possible to configure your Prometheus deployment to automatically start monitoring on the available Prometheus feeds. +To this end, you must configure the `serviceMonitorSelector` in the specs of your Prometheus deployment to match these labels. For example: +```yaml +serviceMonitorSelector: + matchLabels: + metrics: prometheus +``` +would automatically select all pods of all ArangoDB cluster deployments which have metrics enabled. + +By default, the sidecar metrics exporters are using TLS for all connections. You can disable the TLS by specifying +```yaml +spec: + metrics: + enabled: true + tls: false +``` + +You can fine-tune the monitored metrics by specifying `ArangoDeployment` annotations. Example: +```yaml +spec: + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9101' + prometheus.io/scrape_interval: '5s' +``` + +See the [list of exposed ArangoDB metrics](https://www.arangodb.com/docs/stable/http/administration-and-monitoring-metrics.html#list-of-exposed-metrics) diff --git a/docs/design/pod_evication_and_replacement.md b/docs/design/pod_evication_and_replacement.md new file mode 100644 index 000000000..8a5fa5e94 --- /dev/null +++ b/docs/design/pod_evication_and_replacement.md @@ -0,0 +1,124 @@ +# Pod Eviction & Replacement + +This chapter specifies the rules around evicting pods from nodes and +restarting or replacing them. + +## Eviction + +Eviction is the process of removing a pod that is running on a node from that node. + +This is typically the result of a drain action (`kubectl drain`) or +from a taint being added to a node (either automatically by Kubernetes or manually by an operator). + +## Replacement + +Replacement is the process of replacing a pod by another pod that takes over the responsibilities +of the original pod. + +The replacement pod has a new ID and new (read empty) persistent data. + +Note that replacing a pod is different from restarting a pod. A pod is restarted when it has been reported +to have termined. + +## NoExecute Tolerations + +NoExecute tolerations are used to control the behavior of Kubernetes (wrt. to a Pod) when the node +that the pod is running on is no longer reachable or becomes not-ready. + +See the applicable [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) for more info. + +## Rules + +The rules for eviction & replacement are specified per type of pod. + +### Image ID Pods + +The Image ID pods are started to fetch the ArangoDB version of a specific +ArangoDB image and fetch the docker sha256 of that image. +They have no persistent state. + +- Image ID pods can always be evicted from any node +- Image ID pods can always be restarted on a different node. + There is no need to replace an image ID pod, nor will it cause problems when + 2 image ID pods run at the same time. +- `node.kubernetes.io/unreachable:NoExecute` toleration time is set very low (5sec) +- `node.kubernetes.io/not-ready:NoExecute` toleration time is set very low (5sec) + +### Coordinator Pods + +Coordinator pods run an ArangoDB coordinator as part of an ArangoDB cluster. +They have no persistent state, but do have a unique ID. + +- Coordinator pods can always be evicted from any node +- Coordinator pods can always be replaced with another coordinator pod with a different ID on a different node +- `node.kubernetes.io/unreachable:NoExecute` toleration time is set low (15sec) +- `node.kubernetes.io/not-ready:NoExecute` toleration time is set low (15sec) + +### DBServer Pods + +DBServer pods run an ArangoDB dbserver as part of an ArangoDB cluster. +It has persistent state potentially tied to the node it runs on and it has a unique ID. + +- DBServer pods can be evicted from any node as soon as: + - It has been completely drained AND + - It is no longer the shard master for any shard +- DBServer pods can be replaced with another dbserver pod with a different ID on a different node when: + - It is not the shard master for any shard OR + - For every shard it is the master for, there is an in-sync follower +- `node.kubernetes.io/unreachable:NoExecute` toleration time is set high to "wait it out a while" (5min) +- `node.kubernetes.io/not-ready:NoExecute` toleration time is set high to "wait it out a while" (5min) + +### Agent Pods + +Agent pods run an ArangoDB dbserver as part of an ArangoDB agency. +It has persistent state potentially tight to the node it runs on and it has a unique ID. + +- Agent pods can be evicted from any node as soon as: + - It is no longer the agency leader AND + - There is at least an agency leader that is responding AND + - There is at least an agency follower that is responding +- Agent pods can be replaced with another agent pod with the same ID but wiped persistent state on a different node when: + - The old pod is known to be deleted (e.g. explicit eviction) +- `node.kubernetes.io/unreachable:NoExecute` toleration time is set high to "wait it out a while" (5min) +- `node.kubernetes.io/not-ready:NoExecute` toleration time is set high to "wait it out a while" (5min) + +### Single Server Pods + +Single server pods run an ArangoDB server as part of an ArangoDB single server deployment. +It has persistent state potentially tied to the node. + +- Single server pods cannot be evicted from any node. +- Single server pods cannot be replaced with another pod. +- `node.kubernetes.io/unreachable:NoExecute` toleration time is not set to "wait it out forever" +- `node.kubernetes.io/not-ready:NoExecute` toleration time is not set "wait it out forever" + +### Single Pods in Active Failover Deployment + +Single pods run an ArangoDB single server as part of an ArangoDB active failover deployment. +It has persistent state potentially tied to the node it runs on and it has a unique ID. + +- Single pods can be evicted from any node as soon as: + - It is a follower of an active-failover deployment (Q: can we trigger this failover to another server?) +- Single pods can always be replaced with another single pod with a different ID on a different node. +- `node.kubernetes.io/unreachable:NoExecute` toleration time is set high to "wait it out a while" (5min) +- `node.kubernetes.io/not-ready:NoExecute` toleration time is set high to "wait it out a while" (5min) + +### SyncMaster Pods + +SyncMaster pods run an ArangoSync as master as part of an ArangoDB DC2DC cluster. +They have no persistent state, but do have a unique address. + +- SyncMaster pods can always be evicted from any node +- SyncMaster pods can always be replaced with another syncmaster pod on a different node +- `node.kubernetes.io/unreachable:NoExecute` toleration time is set low (15sec) +- `node.kubernetes.io/not-ready:NoExecute` toleration time is set low (15sec) + +### SyncWorker Pods + +SyncWorker pods run an ArangoSync as worker as part of an ArangoDB DC2DC cluster. +They have no persistent state, but do have in-memory state and a unique address. + +- SyncWorker pods can always be evicted from any node +- SyncWorker pods can always be replaced with another syncworker pod on a different node +- `node.kubernetes.io/unreachable:NoExecute` toleration time is set a bit higher to try to avoid resynchronization (1min) +- `node.kubernetes.io/not-ready:NoExecute` toleration time is set a bit higher to try to avoid resynchronization (1min) diff --git a/docs/design/pod_name_versus_cluster_id.md b/docs/design/pod_name_versus_cluster_id.md new file mode 100644 index 000000000..52b2fb8e8 --- /dev/null +++ b/docs/design/pod_name_versus_cluster_id.md @@ -0,0 +1,18 @@ +# Kubernetes Pod name versus cluster ID + +All resources being created will get a name that contains +the user provided cluster name and a unique part. + +The unique part will be difference for every pod that +is being created. +E.g. when upgrading to a new version, we generate a new +unique pod name. + +The servers in the ArangoDB cluster will be assigned +a persistent, unique ID. +When a Pod changes (e.g. because of an upgrade) the +Pod name changes, but the cluster ID remains the same. + +As a result, the status part of the customer resource +must list the current Pod name and cluster ID for +every server. diff --git a/docs/design/recovery.md b/docs/design/recovery.md new file mode 100644 index 000000000..7b1a14c86 --- /dev/null +++ b/docs/design/recovery.md @@ -0,0 +1,270 @@ +# Manual Recovery + +## Overview +Let's consider a situation where we had a ArangoDeployment in Cluster mode (3 DbServers, 3 Coordinators, 3 Agents) +with Local storage attached (only one K8s Node in the K8s cluster). + +Due to some reason the ArangoDeployment was deleted (e.g. ETCD storage has been wiped out) and we want to recover it. +Fortunately, we have a backup of the data on the disk. + +To recover the ArangoDeployment we need to: +1. Create PV and PVC for each member with persistent storage (agent, dbservers, single) +2. Create a new ArangoDeployment with the same members IDs + +## Local storage data + +We have a members (Agents & DbServers) data in the following directories: +```bash +> ls -1 /var/data/ +f9rs2htwc9e0bzme +fepwdnnbf0keylgx +gqnkahucthoaityt +vka6ic19qcl1y3ec +rhlf8vixbsbewefo +rlzl467vfgsdpofu +``` + +To find out the name of the members to which data should be attached, +we need to check the `UUID` file content in each directory: +```bash +> cat /var/data/*/UUID +AGNT-pntg5yc8 +AGNT-kfyuj8ow +AGNT-bv5rofcz +PRMR-9xztmg4t +PRMR-l1pp19yl +PRMR-31akmzrp +``` + +## Initial ArangoDeployment + +Here is an example of the initial ArangoDeployment before deletion: +```yaml +cat <`. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: single` +- `PersistentVolumeClaim` for, data stored in the single server, named `_pvc`. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: single` +- `Service` for accessing the single server, named ``. + The service will provide access to the single server from within the Kubernetes cluster. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: single` + +## Full cluster + +For a full cluster deployment, the following Kubernetes resources are created: + +- `Pods` running ArangoDB agent named `_agent_`. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: agent` + +- `PersistentVolumeClaims` for, data stored in the agents, named `_agent_pvc_`. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: agent` + +- `Pods` running ArangoDB coordinators named `_coordinator_`. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: coordinator` + - Note: Coordinators are configured to use an `emptyDir` volume since + they do not need persistent storage. + +- `Pods` running ArangoDB dbservers named `_dbserver_`. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: dbserver` + +- `PersistentVolumeClaims` for, data stored in the dbservers, named `_dbserver_pvc_`. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: dbserver` + +- Headless `Service` for accessing the all server, named `_servers`. + The service will provide access all server server from within the k8s cluster. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - Selector: + - `app=arangodb` + - `arangodb_deployment: ` + +- `Service` for accessing the all coordinators, named ``. + The service will provide access all coordinators from within the k8s cluster. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: coordinator` + - Selector: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: coordinator` + +## Full cluster with DC2DC + +For a full cluster with datacenter replication deployment, +the same resources are created as for a Full cluster, with the following +additions: + +- `Pods` running ArangoSync workers named `_syncworker_`. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: syncworker` + +- `Pods` running ArangoSync master named `_syncmaster_`. + - Labels: + - `app=arangodb` + - `arangodb_deployment: ` + - `role: syncmaster` + +- `Service` for accessing the sync masters, named `_sync`. + The service will provide access to all syncmaster from within the Kubernetes cluster. diff --git a/docs/design/resource_management.md b/docs/design/resource_management.md new file mode 100644 index 000000000..324466a29 --- /dev/null +++ b/docs/design/resource_management.md @@ -0,0 +1,22 @@ +# Resource Management + +## overrideDetectedTotalMemory + +The `spec..overrideDetectedTotalMemory` flag is an option that allows users to override the total memory available to the ArangoDB member +by automatically injecting `ARANGODB_OVERRIDE_DETECTED_TOTAL_MEMORY` ENV variable into the container with the value of `spec..resources.limits.memory`. + +Sample: + +```yaml +apiVersion: database.arangodb.com/v1 +kind: ArangoDeployment +metadata: + name: cluster +spec: + mode: Cluster + dbservers: + overrideDetectedTotalMemory: true + resources: + limits: + memory: 1Gi +``` diff --git a/docs/design/rotating.md b/docs/design/rotating.md new file mode 100644 index 000000000..bffc003c6 --- /dev/null +++ b/docs/design/rotating.md @@ -0,0 +1,13 @@ +# Rotation + +## ArangoDeployment + +Rotation of ArangoDeployment Pods can be triggered by Pod deletion or by annotation (safe way). + +Using annotation Pods gonna be rotated one-by-one which will keep cluster alive. + +Key: `deployment.arangodb.com/rotate` +Value: `true` + +To rotate ArangoDeployment Pod kubectl command can be used: +`kubectl annotate pod arango-pod deployment.arangodb.com/rotate=true` diff --git a/docs/design/scaling.md b/docs/design/scaling.md new file mode 100644 index 000000000..68c479f07 --- /dev/null +++ b/docs/design/scaling.md @@ -0,0 +1,28 @@ +# Scaling + +The internal process followed by the ArangoDB operator +when scaling up is as follows: + +- Set CR state to `Scaling` +- Create an additional server Pod +- Wait until server is ready before continuing +- Set CR state to `Ready` + +The internal process followed by the ArangoDB operator +when scaling down a dbserver is as follows: + +- Set CR state to `Scaling` +- Drain the dbserver (TODO fill in procedure) +- Shutdown the dbserver such that it removes itself from the agency +- Remove the dbserver Pod +- Set CR state to `Ready` + +The internal process followed by the ArangoDB operator +when scaling down a coordinator is as follows: + +- Set CR state to `Scaling` +- Shutdown the coordinator such that it removes itself from the agency +- Remove the coordinator Pod +- Set CR state to `Ready` + +Note: Scaling is always done 1 server at a time. diff --git a/docs/design/status.md b/docs/design/status.md new file mode 100644 index 000000000..01b4f2e81 --- /dev/null +++ b/docs/design/status.md @@ -0,0 +1,33 @@ +# Status + +The status field of the `CustomResource` must contain all persistent state needed to +create & maintain the cluster. + +## `status.state: string` + +This field contains the current status of the cluster. +Possible values are: + +- `Creating` when the cluster is first be created. +- `Ready` when all pods if the cluster are in running state. +- `Scaling` when pods are being added to an existing cluster or removed from an existing cluster. +- `Upgrading` when cluster is in the process of being upgraded to another version. + +## `status.members..[x].state: string` + +This field contains the pod state of server x of this group. +Possible values are: + +- `Creating` when the pod is about to be created. +- `Ready` when the pod has been created. +- `Draining` when a dbserver pod is being drained. +- `ShuttingDown` when a server is in the process of shutting down. + +## `status.members..[x].podName: string` + +This field contains the name of the current pod that runs server x of this group. + +## `status.members..[x].clusterID: string` + +This field contains the unique cluster ID of server x of this group. +The field is only valid for groups `single`, `agents`, `dbservers` & `coordinators`. diff --git a/docs/design/test_clusters.md b/docs/design/test_clusters.md new file mode 100644 index 000000000..73d1c461d --- /dev/null +++ b/docs/design/test_clusters.md @@ -0,0 +1,16 @@ +# Test clusters + +The ArangoDB operator is tested on various types of kubernetes clusters. + +To prepare a cluster for running the ArangoDB operator tests, +do the following: + +- Create a `kubectl` config file for accessing the cluster. +- Use that config file. +- Run `./scripts/kube_configure_test_cluster.sh`. This creates a `ConfigMap` + named `arango-operator-test` in the `kube-system` namespace containing the + following environment variables. + +```bash +REQUIRE_LOCAL_STORAGE=1 +``` diff --git a/docs/design/testing.md b/docs/design/testing.md new file mode 100644 index 000000000..25e4680ab --- /dev/null +++ b/docs/design/testing.md @@ -0,0 +1,40 @@ +# Testing + +## Scenario's + +The following test scenario's must be covered by automated tests: + +- Creating 1 deployment (all modes, all environments, all storage engines) +- Creating multiple deployments (all modes, all environments, all storage engines), + controlling each individually +- Creating deployment with/without authentication +- Creating deployment with/without TLS + +- Updating deployment wrt: + - Number of servers (scaling, up/down) + - Image version (upgrading, downgrading within same minor version range (e.g. 3.2.x)) + - Immutable fields (should be reset automatically) + +- Resilience: + - Delete individual pods + - Delete individual PVCs + - Delete individual Services + - Delete Node + - Restart Node + - API server unavailable + +- Persistent Volumes: + - hint: RBAC file might need to be changed + - hint: get info via - client-go.CoreV1() + - Number of volumes should stay in reasonable bounds + - For some cases it might be possible to check that, the amount before and after the test stays the same + - A Cluster start should need 6 Volumes (DBServer + Agents) + - The release of a volume-claim should result in a release of the volume + +## Test environments + +- Kubernetes clusters + - Single node + - Multi node + - Access control mode (RBAC, ...) + - Persistent volumes ... diff --git a/docs/design/topology_awareness.md b/docs/design/topology_awareness.md new file mode 100644 index 000000000..83402213a --- /dev/null +++ b/docs/design/topology_awareness.md @@ -0,0 +1,198 @@ +# Topology awareness + +## Table of contents +1. [Overview](#1) +2. [Requirements](#2) +3. [Enable/Disable topology](#3) +4. [Check topology](#4) + +## Overview + +Topology awareness is responsible for the even distribution of groups of pods across nodes in the cluster. +A distribution should be done by the zone, so thanks to that if one of the zone fails there are other working pods +in different zones. For the time being, there are 3 groups of pods that can be distributed evenly +(coordinators, agents, DB servers). For each of these groups, the Kube-ArangoDB operator +tries to distribute them in different zones in a cluster, so there can not +be a situation where many pods of the same group exist in one zone and there are no +pods in other zones. It would lead to many issues when a zone with many pods failed. +When Kube-ArangoDB operator is going to add a new pod, but all zones already contain a pod of this group, +it will choose the zone with the fewest number of pods of this group. + +#### Example +Let's say we have two zones (uswest-1, uswest-2) and we would like to distribute ArangoDB cluster +with 3 coordinators, 3 agents, and 3 DB servers. First coordinator, agent, and DB server would go to random zone (e.g. uswest-1). +Second coordinator must be assigned to the `uswest-2` zone, because the zone `uswest-1` already contains one coordinator. +The same happens for the second agent and the second DB server. Third coordinator can be placed randomly +because each of the zone contains exactly one coordinator, so after this operation one of the zone should have 2 coordinators +and second zone should have 1 coordinator. The same applies to agents and DB servers. + +According to the above example we can see that: +- coordinators should not be assigned to the same zone with other coordinators, unless ALL zones contain coordinators. +- agents should not be placed in the same zone with other agents, unless ALL zones contain agents. +- DB servers should not be placed in the same zone with other DB servers, unless ALL zones contain DB servers. + +## Requirements + +- It does not work in a `Single` mode of a deployment. + The `spec.mode` of the Kubernetes resource ArangoDeployment can not be set to `Single`. +- Kube-ArangoDB version should be at least 1.2.10 and enterprise version. + +## How to enable/disable topology awareness for the ArangoDeployment + +Enable topology: +```yaml +spec: + topology: + enabled: true + label: string # A node's label which will be considered as distribution affinity. By default: 'topology.kubernetes.io/zone' + zones: int # How many zones will be used to assign pods there. It must be higher than 0. +``` + +Disable topology: +```yaml +spec: + topology: + enable: false +``` +or remove `spec.topology` object. + +## How to check which ArangoDB members are assigned to the topology + +#### Topology aware + +Each member should be topology aware, and it can be checked in list of conditions here `status.members.[agents|coordinators|dbservers].conditions`. +Example: +```yaml +status: + ... + members: + agents: + - conditions: + reason: Topology awareness enabled + status: True + type: TopologyAware +``` + +If `status` for the condition's type `TopologyAware` is set to `false` then it is required to replace ArangoDB member. +To do so we need to set pod's annotation `deployment.arangodb.com/replace` to `true`, starting from all +coordinators which are not assigned to any zone. This situation usually happens when +topology was enabled on an existing ArangoDeployment resource. + +#### Member topology +Each member's status should have topology, and it can be checked here `status.members.[agents|coordinators|dbservers].topology` and here `status.topology`. +Example: +```yaml +status: + ... + members: + agents: + - id: AGNT-2shphs7a + topology: + id: 35a61527-9d2b-49df-8a31-e62417fcd7e6 + label: eu-central-1c + rack: 0 + ... + topology: + id: 35a61527-9d2b-49df-8a31-e62417fcd7e6 + label: topology.kubernetes.io/zone + size: 3 + zones: + - id: 0 + labels: + - eu-central-1c + members: + agnt: + - AGNT-2shphs7a + ... + - ... + ... +``` +which means that `AGNT-2shphs7a` is assigned to `eu-central-1c`. + +#### Pod's labels + +A pod which belongs to the member should have two new labels. +Example: +```yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + deployment.arangodb.com/topology: 35a61527-9d2b-49df-8a31-e62417fcd7e6 + deployment.arangodb.com/zone: "0" +``` + +#### Pod anti-affinity + +A pod which belongs to the member should have a new pod anti affinity rules. +Example: +```yaml +spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: deployment.arangodb.com/topology + operator: In + values: + - 35a61527-9d2b-49df-8a31-e62417fcd7e6 + - ... + - key: deployment.arangodb.com/zone + operator: In + values: + - "1" + - "2" + - ... + topologyKey: topology.kubernetes.io/zone + - ... +``` +which means that pod can not be assigned to zone `1` and `2`. + +#### Node affinity + +A pod which belongs to the member can have a node affinity rules. If a pod does not have it then it will have pod affinities. +Example: +```yaml +spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - eu-central-1c + - ... + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: NotIn + values: + - eu-central-1a + - eu-central-1b + - ... +``` + +#### Pod affinity + +A pod which belongs to the member can have a pod affinity rules. If a pod does not have it then it will have node affinity. +Example: +```yaml +spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: deployment.arangodb.com/topology + operator: In + values: + - 35a61527-9d2b-49df-8a31-e62417fcd7e6 + - key: deployment.arangodb.com/zone + operator: In + values: + - "1" + - ... + topologyKey: topology.kubernetes.io/zone +``` diff --git a/docs/design/upgrading.md b/docs/design/upgrading.md new file mode 100644 index 000000000..e30937f4c --- /dev/null +++ b/docs/design/upgrading.md @@ -0,0 +1,32 @@ +# Upgrade procedure + +## Upgrading ArangoDB single to another version + +The process for upgrading an existing ArangoDB single server +to another version is as follows: + +- Set CR state to `Upgrading` +- Remove the server Pod (keep persistent volume) +- Create a new server Pod with new version +- Wait until server is ready before continuing +- Set CR state to `Ready` + +## Upgrading ArangoDB cluster to another version + +The process for upgrading an existing ArangoDB cluster +to another version is as follows: + +- Set CR state to `Upgrading` +- For each agent: + - Remove the agent Pod (keep persistent volume) + - Create new agent Pod with new version + - Wait until agent is ready before continuing +- For each dbserver: + - Remove the dbserver Pod (keep persistent volume) + - Create new dbserver Pod with new version + - Wait until dbserver is ready before continuing +- For each coordinator: + - Remove the coordinator Pod (keep persistent volume) + - Create new coordinator Pod with new version + - Wait until coordinator is ready before continuing +- Set CR state to `Ready` diff --git a/docs/generated/actions.md b/docs/generated/actions.md new file mode 100644 index 000000000..a32c19615 --- /dev/null +++ b/docs/generated/actions.md @@ -0,0 +1,188 @@ +# ArangoDB Operator Metrics + +## List + + +| Action | Internal | Timeout | Optional | Edition | Description | +|:----------------------------------:|:--------:|:-------:|:--------:|:----------------------:|:------------------------------------------------------------------------------------------------------------------:| +| AddMember | no | 10m0s | no | Community & Enterprise | Adds new member to the Member list | +| AppendTLSCACertificate | no | 30m0s | no | Enterprise Only | Append Certificate into CA TrustStore | +| ArangoMemberUpdatePodSpec | no | 10m0s | no | Community & Enterprise | Propagate Member Pod spec (requested) | +| ArangoMemberUpdatePodStatus | no | 10m0s | no | Community & Enterprise | Propagate Member Pod status (current) | +| BackupRestore | no | 15m0s | no | Enterprise Only | Restore selected Backup | +| BackupRestoreClean | no | 15m0s | no | Enterprise Only | Clean restore status in case of restore spec change | +| BootstrapSetPassword | no | 10m0s | no | Community & Enterprise | Change password during bootstrap procedure | +| BootstrapUpdate | no | 10m0s | no | Community & Enterprise | Update bootstrap status | +| CleanMemberService | no | 30m0s | no | Community & Enterprise | Removes Server Service | +| CleanOutMember | no | 48h0m0s | no | Community & Enterprise | Run the CleanOut job on member | +| CleanTLSCACertificate | no | 30m0s | no | Enterprise Only | Remove Certificate from CA TrustStore | +| CleanTLSKeyfileCertificate | no | 30m0s | no | Enterprise Only | Remove old TLS certificate from server | +| ClusterMemberCleanup | no | 10m0s | no | Community & Enterprise | Remove member from Cluster if it is gone already (Coordinators) | +| ~~DisableClusterScaling~~ | no | 10m0s | no | Community & Enterprise | Disable Cluster Scaling integration | +| DisableMaintenance | no | 10m0s | no | Community & Enterprise | Disable ArangoDB maintenance mode | +| DisableMemberMaintenance | no | 10m0s | no | Enterprise Only | Disable ArangoDB DBServer maintenance mode | +| ~~EnableClusterScaling~~ | no | 10m0s | no | Community & Enterprise | Enable Cluster Scaling integration | +| EnableMaintenance | no | 10m0s | no | Community & Enterprise | Enable ArangoDB maintenance mode | +| EnableMemberMaintenance | no | 10m0s | no | Enterprise Only | Enable ArangoDB DBServer maintenance mode | +| EncryptionKeyAdd | no | 10m0s | no | Enterprise Only | Add the encryption key to the pool | +| EncryptionKeyPropagated | no | 10m0s | no | Enterprise Only | Update condition of encryption propagation | +| EncryptionKeyRefresh | no | 10m0s | no | Enterprise Only | Refresh the encryption keys on member | +| EncryptionKeyRemove | no | 10m0s | no | Enterprise Only | Remove the encryption key to the pool | +| EncryptionKeyStatusUpdate | no | 10m0s | no | Enterprise Only | Update status of encryption propagation | +| EnforceResignLeadership | no | 45m0s | yes | Community & Enterprise | Run the ResignLeadership job on DBServer and checks data compatibility after | +| Idle | no | 10m0s | no | Community & Enterprise | Define idle operation in case if preconditions are not meet | +| JWTAdd | no | 10m0s | no | Enterprise Only | Adds new JWT to the pool | +| JWTClean | no | 10m0s | no | Enterprise Only | Remove JWT key from the pool | +| JWTPropagated | no | 10m0s | no | Enterprise Only | Update condition of JWT propagation | +| JWTRefresh | no | 10m0s | no | Enterprise Only | Refresh current JWT secrets on the member | +| JWTSetActive | no | 10m0s | no | Enterprise Only | Change active JWT key on the cluster | +| JWTStatusUpdate | no | 10m0s | no | Enterprise Only | Update status of JWT propagation | +| KillMemberPod | no | 10m0s | no | Community & Enterprise | Execute Delete on Pod (put pod in Terminating state) | +| LicenseSet | no | 10m0s | no | Community & Enterprise | Update Cluster license (3.9+) | +| MarkToRemoveMember | no | 10m0s | no | Community & Enterprise | Marks member to be removed. Used when member Pod is annotated with replace annotation | +| MemberPhaseUpdate | no | 10m0s | no | Community & Enterprise | Change member phase | +| ~~MemberRIDUpdate~~ | no | 10m0s | no | Community & Enterprise | Update Run ID of member | +| MemberStatusSync | no | 10m0s | no | Community & Enterprise | Sync ArangoMember Status with ArangoDeployment Status, to keep Member information up to date | +| PVCResize | no | 30m0s | no | Community & Enterprise | Start the resize procedure. Updates PVC Requests field | +| PVCResized | no | 15m0s | no | Community & Enterprise | Waits for PVC resize to be completed | +| PlaceHolder | no | 10m0s | no | Community & Enterprise | Empty placeholder action | +| RebalancerCheck | no | 10m0s | no | Enterprise Only | Check Rebalancer job progress | +| RebalancerCheckV2 | no | 10m0s | no | Community & Enterprise | Check Rebalancer job progress | +| RebalancerClean | no | 10m0s | no | Enterprise Only | Cleans Rebalancer jobs | +| RebalancerCleanV2 | no | 10m0s | no | Community & Enterprise | Cleans Rebalancer jobs | +| RebalancerGenerate | yes | 10m0s | no | Enterprise Only | Generates the Rebalancer plan | +| RebalancerGenerateV2 | yes | 10m0s | no | Community & Enterprise | Generates the Rebalancer plan | +| RebuildOutSyncedShards | no | 24h0m0s | no | Community & Enterprise | Run Rebuild Out Synced Shards procedure for DBServers | +| RecreateMember | no | 15m0s | no | Community & Enterprise | Recreate member with same ID and Data | +| RefreshTLSKeyfileCertificate | no | 30m0s | no | Enterprise Only | Recreate Server TLS Certificate secret | +| RemoveMember | no | 15m0s | no | Community & Enterprise | Removes member from the Cluster and Status | +| RemoveMemberPVC | no | 15m0s | no | Community & Enterprise | Removes member PVC and enforce recreate procedure | +| RenewTLSCACertificate | no | 30m0s | no | Enterprise Only | Recreate Managed CA secret | +| RenewTLSCertificate | no | 30m0s | no | Enterprise Only | Recreate Server TLS Certificate secret | +| ResignLeadership | no | 30m0s | yes | Community & Enterprise | Run the ResignLeadership job on DBServer | +| ResourceSync | no | 10m0s | no | Community & Enterprise | Runs the Resource sync | +| RotateMember | no | 15m0s | no | Community & Enterprise | Waits for Pod restart and recreation | +| RotateStartMember | no | 15m0s | no | Community & Enterprise | Start member rotation. After this action member is down | +| RotateStopMember | no | 15m0s | no | Community & Enterprise | Finalize member rotation. After this action member is started back | +| RuntimeContainerArgsLogLevelUpdate | no | 10m0s | no | Community & Enterprise | Change ArangoDB Member log levels in runtime | +| RuntimeContainerImageUpdate | no | 10m0s | no | Community & Enterprise | Update Container Image in runtime | +| RuntimeContainerSyncTolerations | no | 10m0s | no | Community & Enterprise | Update Pod Tolerations in runtime | +| ~~SetCondition~~ | no | 10m0s | no | Community & Enterprise | Set deployment condition | +| SetConditionV2 | no | 10m0s | no | Community & Enterprise | Set deployment condition | +| SetCurrentImage | no | 6h0m0s | no | Community & Enterprise | Update deployment current image after image discovery | +| SetCurrentMemberArch | no | 10m0s | no | Community & Enterprise | Set current member architecture | +| SetMaintenanceCondition | no | 10m0s | no | Community & Enterprise | Update ArangoDB maintenance condition | +| ~~SetMemberCondition~~ | no | 10m0s | no | Community & Enterprise | Set member condition | +| SetMemberConditionV2 | no | 10m0s | no | Community & Enterprise | Set member condition | +| SetMemberCurrentImage | no | 10m0s | no | Community & Enterprise | Update Member current image | +| ShutdownMember | no | 30m0s | no | Community & Enterprise | Sends Shutdown requests and waits for container to be stopped | +| TLSKeyStatusUpdate | no | 10m0s | no | Enterprise Only | Update Status of TLS propagation process | +| TLSPropagated | no | 10m0s | no | Enterprise Only | Update TLS propagation condition | +| TimezoneSecretSet | no | 30m0s | no | Community & Enterprise | Set timezone details in cluster | +| TopologyDisable | no | 10m0s | no | Enterprise Only | Disable TopologyAwareness | +| TopologyEnable | no | 10m0s | no | Enterprise Only | Enable TopologyAwareness | +| TopologyMemberAssignment | no | 10m0s | no | Enterprise Only | Update TopologyAwareness Members assignments | +| TopologyZonesUpdate | no | 10m0s | no | Enterprise Only | Update TopologyAwareness Zones info | +| UpToDateUpdate | no | 10m0s | no | Community & Enterprise | Update UpToDate condition | +| UpdateTLSSNI | no | 10m0s | no | Enterprise Only | Update certificate in SNI | +| UpgradeMember | no | 6h0m0s | no | Community & Enterprise | Run the Upgrade procedure on member | +| WaitForMemberInSync | no | 30m0s | no | Community & Enterprise | Wait for member to be in sync. In case of DBServer waits for shards. In case of Agents to catch-up on Agency index | +| WaitForMemberReady | no | 30m0s | no | Community & Enterprise | Wait for member Ready condition | +| WaitForMemberUp | no | 30m0s | no | Community & Enterprise | Wait for member to be responsive | + + + +## ArangoDeployment spec + + +```yaml +spec: + timeouts: + actions: + AddMember: 10m0s + AppendTLSCACertificate: 30m0s + ArangoMemberUpdatePodSpec: 10m0s + ArangoMemberUpdatePodStatus: 10m0s + BackupRestore: 15m0s + BackupRestoreClean: 15m0s + BootstrapSetPassword: 10m0s + BootstrapUpdate: 10m0s + CleanMemberService: 30m0s + CleanOutMember: 48h0m0s + CleanTLSCACertificate: 30m0s + CleanTLSKeyfileCertificate: 30m0s + ClusterMemberCleanup: 10m0s + DisableClusterScaling: 10m0s + DisableMaintenance: 10m0s + DisableMemberMaintenance: 10m0s + EnableClusterScaling: 10m0s + EnableMaintenance: 10m0s + EnableMemberMaintenance: 10m0s + EncryptionKeyAdd: 10m0s + EncryptionKeyPropagated: 10m0s + EncryptionKeyRefresh: 10m0s + EncryptionKeyRemove: 10m0s + EncryptionKeyStatusUpdate: 10m0s + EnforceResignLeadership: 45m0s + Idle: 10m0s + JWTAdd: 10m0s + JWTClean: 10m0s + JWTPropagated: 10m0s + JWTRefresh: 10m0s + JWTSetActive: 10m0s + JWTStatusUpdate: 10m0s + KillMemberPod: 10m0s + LicenseSet: 10m0s + MarkToRemoveMember: 10m0s + MemberPhaseUpdate: 10m0s + MemberRIDUpdate: 10m0s + MemberStatusSync: 10m0s + PVCResize: 30m0s + PVCResized: 15m0s + PlaceHolder: 10m0s + RebalancerCheck: 10m0s + RebalancerCheckV2: 10m0s + RebalancerClean: 10m0s + RebalancerCleanV2: 10m0s + RebalancerGenerate: 10m0s + RebalancerGenerateV2: 10m0s + RebuildOutSyncedShards: 24h0m0s + RecreateMember: 15m0s + RefreshTLSKeyfileCertificate: 30m0s + RemoveMember: 15m0s + RemoveMemberPVC: 15m0s + RenewTLSCACertificate: 30m0s + RenewTLSCertificate: 30m0s + ResignLeadership: 30m0s + ResourceSync: 10m0s + RotateMember: 15m0s + RotateStartMember: 15m0s + RotateStopMember: 15m0s + RuntimeContainerArgsLogLevelUpdate: 10m0s + RuntimeContainerImageUpdate: 10m0s + RuntimeContainerSyncTolerations: 10m0s + SetCondition: 10m0s + SetConditionV2: 10m0s + SetCurrentImage: 6h0m0s + SetCurrentMemberArch: 10m0s + SetMaintenanceCondition: 10m0s + SetMemberCondition: 10m0s + SetMemberConditionV2: 10m0s + SetMemberCurrentImage: 10m0s + ShutdownMember: 30m0s + TLSKeyStatusUpdate: 10m0s + TLSPropagated: 10m0s + TimezoneSecretSet: 30m0s + TopologyDisable: 10m0s + TopologyEnable: 10m0s + TopologyMemberAssignment: 10m0s + TopologyZonesUpdate: 10m0s + UpToDateUpdate: 10m0s + UpdateTLSSNI: 10m0s + UpgradeMember: 6h0m0s + WaitForMemberInSync: 30m0s + WaitForMemberReady: 30m0s + WaitForMemberUp: 30m0s + +``` + \ No newline at end of file diff --git a/docs/generated/metrics/README.md b/docs/generated/metrics/README.md new file mode 100644 index 000000000..12dc09f52 --- /dev/null +++ b/docs/generated/metrics/README.md @@ -0,0 +1,38 @@ +# ArangoDB Operator Metrics + +## List of the Operator metrics + + +| Name | Namespace | Group | Type | Description | +|:-------------------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:-----------------:|:-------:|:--------------------------------------------------------------------------------------| +| [arangodb_operator_agency_errors](./arangodb_operator_agency_errors.md) | arangodb_operator | agency | Counter | Current count of agency cache fetch errors | +| [arangodb_operator_agency_fetches](./arangodb_operator_agency_fetches.md) | arangodb_operator | agency | Counter | Current count of agency cache fetches | +| [arangodb_operator_agency_index](./arangodb_operator_agency_index.md) | arangodb_operator | agency | Gauge | Current index of the agency cache | +| [arangodb_operator_agency_cache_health_present](./arangodb_operator_agency_cache_health_present.md) | arangodb_operator | agency_cache | Gauge | Determines if local agency cache health is present | +| [arangodb_operator_agency_cache_healthy](./arangodb_operator_agency_cache_healthy.md) | arangodb_operator | agency_cache | Gauge | Determines if agency is healthy | +| [arangodb_operator_agency_cache_leaders](./arangodb_operator_agency_cache_leaders.md) | arangodb_operator | agency_cache | Gauge | Determines agency leader vote count | +| [arangodb_operator_agency_cache_member_commit_offset](./arangodb_operator_agency_cache_member_commit_offset.md) | arangodb_operator | agency_cache | Gauge | Determines agency member commit offset | +| [arangodb_operator_agency_cache_member_serving](./arangodb_operator_agency_cache_member_serving.md) | arangodb_operator | agency_cache | Gauge | Determines if agency member is reachable | +| [arangodb_operator_agency_cache_present](./arangodb_operator_agency_cache_present.md) | arangodb_operator | agency_cache | Gauge | Determines if local agency cache is present | +| [arangodb_operator_agency_cache_serving](./arangodb_operator_agency_cache_serving.md) | arangodb_operator | agency_cache | Gauge | Determines if agency is serving | +| [arangodb_operator_engine_assertions](./arangodb_operator_engine_assertions.md) | arangodb_operator | engine | Counter | Number of assertions invoked during Operator runtime | +| [arangodb_operator_engine_ops_alerts](./arangodb_operator_engine_ops_alerts.md) | arangodb_operator | engine | Counter | Counter for actions which requires ops attention | +| [arangodb_operator_engine_panics_recovered](./arangodb_operator_engine_panics_recovered.md) | arangodb_operator | engine | Counter | Number of Panics recovered inside Operator reconciliation loop | +| [arangodb_operator_kubernetes_client_request_errors](./arangodb_operator_kubernetes_client_request_errors.md) | arangodb_operator | kubernetes_client | Counter | Number of Kubernetes Client request errors | +| [arangodb_operator_kubernetes_client_requests](./arangodb_operator_kubernetes_client_requests.md) | arangodb_operator | kubernetes_client | Counter | Number of Kubernetes Client requests | +| [arangodb_operator_members_unexpected_container_exit_codes](./arangodb_operator_members_unexpected_container_exit_codes.md) | arangodb_operator | members | Counter | Counter of unexpected restarts in pod (Containers/InitContainers/EphemeralContainers) | +| [arangodb_operator_rebalancer_enabled](./arangodb_operator_rebalancer_enabled.md) | arangodb_operator | rebalancer | Gauge | Determines if rebalancer is enabled | +| [arangodb_operator_rebalancer_moves_current](./arangodb_operator_rebalancer_moves_current.md) | arangodb_operator | rebalancer | Gauge | Define how many moves are currently in progress | +| [arangodb_operator_rebalancer_moves_failed](./arangodb_operator_rebalancer_moves_failed.md) | arangodb_operator | rebalancer | Counter | Define how many moves failed | +| [arangodb_operator_rebalancer_moves_generated](./arangodb_operator_rebalancer_moves_generated.md) | arangodb_operator | rebalancer | Counter | Define how many moves were generated | +| [arangodb_operator_rebalancer_moves_succeeded](./arangodb_operator_rebalancer_moves_succeeded.md) | arangodb_operator | rebalancer | Counter | Define how many moves succeeded | +| [arangodb_operator_resources_arangodeployment_accepted](./arangodb_operator_resources_arangodeployment_accepted.md) | arangodb_operator | resources | Gauge | Defines if ArangoDeployment has been accepted | +| [arangodb_operator_resources_arangodeployment_immutable_errors](./arangodb_operator_resources_arangodeployment_immutable_errors.md) | arangodb_operator | resources | Counter | Counter for deployment immutable errors | +| [arangodb_operator_resources_arangodeployment_propagated](./arangodb_operator_resources_arangodeployment_propagated.md) | arangodb_operator | resources | Gauge | Defines if ArangoDeployment Spec is propagated | +| [arangodb_operator_resources_arangodeployment_status_restores](./arangodb_operator_resources_arangodeployment_status_restores.md) | arangodb_operator | resources | Counter | Counter for deployment status restored | +| [arangodb_operator_resources_arangodeployment_uptodate](./arangodb_operator_resources_arangodeployment_uptodate.md) | arangodb_operator | resources | Gauge | Defines if ArangoDeployment is uptodate | +| [arangodb_operator_resources_arangodeployment_validation_errors](./arangodb_operator_resources_arangodeployment_validation_errors.md) | arangodb_operator | resources | Counter | Counter for deployment validation errors | +| [arangodb_operator_resources_arangodeploymentreplication_active](./arangodb_operator_resources_arangodeploymentreplication_active.md) | arangodb_operator | resources | Gauge | Defines if ArangoDeploymentReplication is configured and running | +| [arangodb_operator_resources_arangodeploymentreplication_failed](./arangodb_operator_resources_arangodeploymentreplication_failed.md) | arangodb_operator | resources | Gauge | Defines if ArangoDeploymentReplication is in Failed phase | + + \ No newline at end of file diff --git a/docs/generated/metrics/arangodb_operator_agency_cache_health_present.md b/docs/generated/metrics/arangodb_operator_agency_cache_health_present.md new file mode 100644 index 000000000..9d411a0dc --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_cache_health_present.md @@ -0,0 +1,12 @@ +# arangodb_operator_agency_cache_health_present (Gauge) + +## Description + +Determines if local agency cache health is present + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_agency_cache_healthy.md b/docs/generated/metrics/arangodb_operator_agency_cache_healthy.md new file mode 100644 index 000000000..7247c91e4 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_cache_healthy.md @@ -0,0 +1,12 @@ +# arangodb_operator_agency_cache_healthy (Gauge) + +## Description + +Determines if agency is healthy + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_agency_cache_leaders.md b/docs/generated/metrics/arangodb_operator_agency_cache_leaders.md new file mode 100644 index 000000000..c58ec73ff --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_cache_leaders.md @@ -0,0 +1,13 @@ +# arangodb_operator_agency_cache_leaders (Gauge) + +## Description + +Determines agency leader vote count. Should be always one + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | +| agent | Agent ID | diff --git a/docs/generated/metrics/arangodb_operator_agency_cache_member_commit_offset.md b/docs/generated/metrics/arangodb_operator_agency_cache_member_commit_offset.md new file mode 100644 index 000000000..7a00682d0 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_cache_member_commit_offset.md @@ -0,0 +1,13 @@ +# arangodb_operator_agency_cache_member_commit_offset (Gauge) + +## Description + +Determines agency member commit offset. Set to -1 if Agent is not reachable + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | +| agent | Agent ID | diff --git a/docs/generated/metrics/arangodb_operator_agency_cache_member_serving.md b/docs/generated/metrics/arangodb_operator_agency_cache_member_serving.md new file mode 100644 index 000000000..cec191f74 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_cache_member_serving.md @@ -0,0 +1,13 @@ +# arangodb_operator_agency_cache_member_serving (Gauge) + +## Description + +Determines if agency member is reachable + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | +| agent | Agent ID | diff --git a/docs/generated/metrics/arangodb_operator_agency_cache_present.md b/docs/generated/metrics/arangodb_operator_agency_cache_present.md new file mode 100644 index 000000000..5ecc6a270 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_cache_present.md @@ -0,0 +1,12 @@ +# arangodb_operator_agency_cache_present (Gauge) + +## Description + +Determines if local agency cache is present + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_agency_cache_serving.md b/docs/generated/metrics/arangodb_operator_agency_cache_serving.md new file mode 100644 index 000000000..472e5206c --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_cache_serving.md @@ -0,0 +1,12 @@ +# arangodb_operator_agency_cache_serving (Gauge) + +## Description + +Determines if agency is serving + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_agency_errors.md b/docs/generated/metrics/arangodb_operator_agency_errors.md new file mode 100644 index 000000000..c6746ac64 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_errors.md @@ -0,0 +1,12 @@ +# arangodb_operator_agency_errors (Counter) + +## Description + +Current count of agency cache fetch errors + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_agency_fetches.md b/docs/generated/metrics/arangodb_operator_agency_fetches.md new file mode 100644 index 000000000..c7139fea9 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_fetches.md @@ -0,0 +1,12 @@ +# arangodb_operator_agency_fetches (Counter) + +## Description + +Current count of agency cache fetches + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_agency_index.md b/docs/generated/metrics/arangodb_operator_agency_index.md new file mode 100644 index 000000000..a6ee5ade4 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_agency_index.md @@ -0,0 +1,12 @@ +# arangodb_operator_agency_index (Gauge) + +## Description + +Current index of the agency cache + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_engine_assertions.md b/docs/generated/metrics/arangodb_operator_engine_assertions.md new file mode 100644 index 000000000..924697f20 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_engine_assertions.md @@ -0,0 +1,18 @@ +# arangodb_operator_engine_assertions (Counter) + +## Description + +Number of assertions invoked during Operator runtime + +## Labels + +| Label | Description | +|:-----:|:--------------| +| key | Assertion Key | + + +## Alerting + +| Priority | Query | Description | +|:--------:|:--------------------------------------------------:|:--------------------------------------------| +| Warning | irate(arangodb_operator_engine_assertions[1m]) > 1 | Trigger an alert if OPS attention is needed | diff --git a/docs/generated/metrics/arangodb_operator_engine_ops_alerts.md b/docs/generated/metrics/arangodb_operator_engine_ops_alerts.md new file mode 100644 index 000000000..8901cace4 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_engine_ops_alerts.md @@ -0,0 +1,19 @@ +# arangodb_operator_engine_ops_alerts (Counter) + +## Description + +Counter for actions which requires ops attention + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | + + +## Alerting + +| Priority | Query | Description | +|:--------:|:--------------------------------------------------:|:--------------------------------------------| +| Warning | irate(arangodb_operator_engine_ops_alerts[1m]) > 1 | Trigger an alert if OPS attention is needed | diff --git a/docs/generated/metrics/arangodb_operator_engine_panics_recovered.md b/docs/generated/metrics/arangodb_operator_engine_panics_recovered.md new file mode 100644 index 000000000..e67a5ddcf --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_engine_panics_recovered.md @@ -0,0 +1,11 @@ +# arangodb_operator_engine_panics_recovered (Counter) + +## Description + +Number of Panics recovered inside Operator reconciliation loop. Section represents recovery section + +## Labels + +| Label | Description | +|:-------:|:--------------| +| section | Panic Section | diff --git a/docs/generated/metrics/arangodb_operator_kubernetes_client_request_errors.md b/docs/generated/metrics/arangodb_operator_kubernetes_client_request_errors.md new file mode 100644 index 000000000..877a01b6b --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_kubernetes_client_request_errors.md @@ -0,0 +1,12 @@ +# arangodb_operator_kubernetes_client_request_errors (Counter) + +## Description + +Number of Kubernetes Client request errors + +## Labels + +| Label | Description | +|:---------:|:-------------------------------------------------------------| +| component | K8S Resource name | +| verb | Verb (create,update,update-status,patch,delete,force-delete) | diff --git a/docs/generated/metrics/arangodb_operator_kubernetes_client_requests.md b/docs/generated/metrics/arangodb_operator_kubernetes_client_requests.md new file mode 100644 index 000000000..2bd47740a --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_kubernetes_client_requests.md @@ -0,0 +1,12 @@ +# arangodb_operator_kubernetes_client_requests (Counter) + +## Description + +Number of Kubernetes Client requests + +## Labels + +| Label | Description | +|:---------:|:-------------------------------------------------------------| +| component | K8S Resource name | +| verb | Verb (create,update,update-status,patch,delete,force-delete) | diff --git a/docs/generated/metrics/arangodb_operator_kubernetes_events_created.md b/docs/generated/metrics/arangodb_operator_kubernetes_events_created.md new file mode 100644 index 000000000..c7ca6476c --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_kubernetes_events_created.md @@ -0,0 +1,13 @@ +# arangodb_operator_kubernetes_events_created (Counter) + +## Description + +Counter for created events + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | +| eventType | Event Type | diff --git a/docs/generated/metrics/arangodb_operator_members_unexpected_container_exit_codes.md b/docs/generated/metrics/arangodb_operator_members_unexpected_container_exit_codes.md new file mode 100644 index 000000000..73bbb8fbb --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_members_unexpected_container_exit_codes.md @@ -0,0 +1,17 @@ +# arangodb_operator_members_unexpected_container_exit_codes (Counter) + +## Description + +Counter of unexpected restarts in pod (Containers/InitContainers/EphemeralContainers) + +## Labels + +| Label | Description | +|:--------------:|:-------------------------------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | +| member | Member ID | +| container | Container Name | +| container_type | Container/InitContainer/EphemeralContainer | +| code | ExitCode | +| reason | Reason | diff --git a/docs/generated/metrics/arangodb_operator_rebalancer_enabled.md b/docs/generated/metrics/arangodb_operator_rebalancer_enabled.md new file mode 100644 index 000000000..fdc513a7a --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_rebalancer_enabled.md @@ -0,0 +1,12 @@ +# arangodb_operator_rebalancer_enabled (Gauge) + +## Description + +Determines if rebalancer is enabled + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_rebalancer_moves_current.md b/docs/generated/metrics/arangodb_operator_rebalancer_moves_current.md new file mode 100644 index 000000000..ab1c67038 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_rebalancer_moves_current.md @@ -0,0 +1,12 @@ +# arangodb_operator_rebalancer_moves_current (Gauge) + +## Description + +Define how many moves are currently in progress + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_rebalancer_moves_failed.md b/docs/generated/metrics/arangodb_operator_rebalancer_moves_failed.md new file mode 100644 index 000000000..22ace44e2 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_rebalancer_moves_failed.md @@ -0,0 +1,12 @@ +# arangodb_operator_rebalancer_moves_failed (Counter) + +## Description + +Define how many moves failed + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_rebalancer_moves_generated.md b/docs/generated/metrics/arangodb_operator_rebalancer_moves_generated.md new file mode 100644 index 000000000..9ae74acd6 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_rebalancer_moves_generated.md @@ -0,0 +1,12 @@ +# arangodb_operator_rebalancer_moves_generated (Counter) + +## Description + +Define how many moves were generated + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_rebalancer_moves_succeeded.md b/docs/generated/metrics/arangodb_operator_rebalancer_moves_succeeded.md new file mode 100644 index 000000000..c4ee3c5fc --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_rebalancer_moves_succeeded.md @@ -0,0 +1,12 @@ +# arangodb_operator_rebalancer_moves_succeeded (Counter) + +## Description + +Define how many moves succeeded + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_resources_arangodeployment_accepted.md b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_accepted.md new file mode 100644 index 000000000..c9e7c29f3 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_accepted.md @@ -0,0 +1,12 @@ +# arangodb_operator_resources_arangodeployment_accepted (Gauge) + +## Description + +Defines if ArangoDeployment has been accepted + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_resources_arangodeployment_immutable_errors.md b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_immutable_errors.md new file mode 100644 index 000000000..5fc4d07d5 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_immutable_errors.md @@ -0,0 +1,12 @@ +# arangodb_operator_resources_arangodeployment_immutable_errors (Counter) + +## Description + +Counter for deployment immutable errors + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_resources_arangodeployment_propagated.md b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_propagated.md new file mode 100644 index 000000000..6a9568f97 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_propagated.md @@ -0,0 +1,12 @@ +# arangodb_operator_resources_arangodeployment_propagated (Gauge) + +## Description + +Defines if ArangoDeployment Spec is propagated + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_resources_arangodeployment_status_restores.md b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_status_restores.md new file mode 100644 index 000000000..04ef8c09b --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_status_restores.md @@ -0,0 +1,12 @@ +# arangodb_operator_resources_arangodeployment_status_restores (Counter) + +## Description + +Counter for deployment status restored + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_resources_arangodeployment_uptodate.md b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_uptodate.md new file mode 100644 index 000000000..6d4017a5b --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_uptodate.md @@ -0,0 +1,12 @@ +# arangodb_operator_resources_arangodeployment_uptodate (Gauge) + +## Description + +Defines if ArangoDeployment is uptodate + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_resources_arangodeployment_validation_errors.md b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_validation_errors.md new file mode 100644 index 000000000..fe38289cf --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_resources_arangodeployment_validation_errors.md @@ -0,0 +1,12 @@ +# arangodb_operator_resources_arangodeployment_validation_errors (Counter) + +## Description + +Counter for deployment validation errors + +## Labels + +| Label | Description | +|:---------:|:---------------------| +| namespace | Deployment Namespace | +| name | Deployment Name | diff --git a/docs/generated/metrics/arangodb_operator_resources_arangodeploymentreplication_active.md b/docs/generated/metrics/arangodb_operator_resources_arangodeploymentreplication_active.md new file mode 100644 index 000000000..c8dff79e1 --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_resources_arangodeploymentreplication_active.md @@ -0,0 +1,12 @@ +# arangodb_operator_resources_arangodeploymentreplication_active (Gauge) + +## Description + +Defines if ArangoDeploymentReplication is configured and running + +## Labels + +| Label | Description | +|:---------:|:--------------------------------| +| namespace | DeploymentReplication Namespace | +| name | DeploymentReplication Name | diff --git a/docs/generated/metrics/arangodb_operator_resources_arangodeploymentreplication_failed.md b/docs/generated/metrics/arangodb_operator_resources_arangodeploymentreplication_failed.md new file mode 100644 index 000000000..ac22d18de --- /dev/null +++ b/docs/generated/metrics/arangodb_operator_resources_arangodeploymentreplication_failed.md @@ -0,0 +1,12 @@ +# arangodb_operator_resources_arangodeploymentreplication_failed (Gauge) + +## Description + +Defines if ArangoDeploymentReplication is in Failed phase + +## Labels + +| Label | Description | +|:---------:|:--------------------------------| +| namespace | DeploymentReplication Namespace | +| name | DeploymentReplication Name | diff --git a/docs/providers/README.md b/docs/providers/README.md new file mode 100644 index 000000000..d59b40570 --- /dev/null +++ b/docs/providers/README.md @@ -0,0 +1,3 @@ +# Supported Providers + +- [Amazon EKS](./eks/README.md) \ No newline at end of file diff --git a/docs/providers/eks/README.md b/docs/providers/eks/README.md new file mode 100644 index 000000000..2a856a4fc --- /dev/null +++ b/docs/providers/eks/README.md @@ -0,0 +1,26 @@ +# Amazon AWS Remarks + +## Elastic Block Storage + +Documentation: +- [AWS EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) + +Remarks: +- It is recommended to use at least GP2 (can be IO1) volume type for ArangoDeployment PV. +- GP2 Volume IOPS is mostly based on storage size. If bigger load is expected use bigger volumes. +- GP2 Volume supports burst mode. In case load in ArangoDeployment is expected only periodically you can use +smaller GP2 Volumes to save costs. +- AWS EBS support resizing of Volume. Volume size can be changed during lifetime, but it requires pod to be recreated. + +## LoadBalancer + +Documentation: +- [AWS LB Annotations](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#load-balancers) + +Remarks: +- AWS LB in TCP mode is able to resend request in case of timeout while waiting for response from Coordinator/DBServer. +This can break some POST requests, like data insertion. To change default value, set to 60s, +you can set annotation for ArangoDeployment LoadBalancer service. +``` +kubectl annotate --overwrite service/-ea service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout= +``` \ No newline at end of file diff --git a/index.yaml b/index.yaml index 41d32754d..22f14d9ba 100644 --- a/index.yaml +++ b/index.yaml @@ -1,6 +1,14 @@ apiVersion: v1 entries: kube-arangodb: + - apiVersion: v1 + created: "2023-10-23T19:29:42.019243557+02:00" + description: ArangoDB Kubernetes Operator + digest: cdec37e032276cc4016a9c88b1c9b1e75d2da35bdbe5cb2e75343d379f5952ad + name: kube-arangodb + urls: + - https://github.com/arangodb/kube-arangodb/releases/download/1.2.34/kube-arangodb-1.2.34.tgz + version: 1.2.34 - apiVersion: v1 created: "2023-10-23T08:20:47.117117874Z" description: ArangoDB Kubernetes Operator @@ -515,4 +523,4 @@ entries: urls: - https://github.com/arangodb/kube-arangodb/releases/download/1.2.1/kube-arangodb-crd-1.2.1.tgz version: 1.2.1 -generated: "2023-10-23T08:20:47.117868035Z" +generated: "2023-10-23T19:29:42.019789729+02:00"