diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 4d594da9ab..cfbbd1a2ee 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -7,6 +7,7 @@ ARG USER_GID=$USER_UID ARG AWS_CLI_VERSION=2.0.30 ARG HELM_VERSION=3.3.1 ARG KUBECTL_VERSION=1.22.4 +ARG KUBELOGIN_VERSION=0.0.33 ARG TERRAFORM_VERSION=1.1.3 RUN : INSTALL APT REQUIREMENTS \ @@ -35,6 +36,11 @@ RUN : INSTALL HELM BINARY \ && chmod +x ./kubectl \ && mv ./kubectl /usr/local/bin/kubectl \ && kubectl version --client \ + && : INSTALL KUBELOGIN BINARY \ + && curl -fsSLO https://github.com/Azure/kubelogin/releases/download/v${KUBELOGIN_VERSION}/kubelogin-linux-amd64.zip \ + && unzip -j kubelogin-linux-amd64.zip -d /usr/local/bin \ + && rm kubelogin-linux-amd64.zip \ + && kubelogin --version \ && : INSTALL TERRAFORM BINARY \ && curl -fsSLO https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip \ && unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip -d /usr/local/bin \ @@ -48,6 +54,7 @@ RUN : INSTALL HELM BINARY \ && aws --version RUN : INSTALL GEM REQUIREMENTS \ + && gem install net-ssh -v 6.1.0 \ && gem install \ bcrypt_pbkdf ed25519 rake rspec_junit_formatter rubocop rubocop-junit_formatter serverspec solargraph diff --git a/Dockerfile b/Dockerfile index 0ca2a856c8..e99092d84c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,7 @@ ARG USER_GID=$USER_UID ARG AWS_CLI_VERSION=2.0.30 ARG HELM_VERSION=3.3.1 ARG KUBECTL_VERSION=1.22.4 +ARG KUBELOGIN_VERSION=0.0.33 ARG TERRAFORM_VERSION=1.1.3 ENV EPICLI_DOCKER_SHARED_DIR=/shared @@ -28,6 +29,11 @@ RUN : INSTALL APT REQUIREMENTS \ && chmod +x ./kubectl \ && mv ./kubectl /usr/local/bin/kubectl \ && kubectl version --client \ + && : INSTALL KUBELOGIN BINARY \ + && curl -fsSLO https://github.com/Azure/kubelogin/releases/download/v${KUBELOGIN_VERSION}/kubelogin-linux-amd64.zip \ + && unzip -j kubelogin-linux-amd64.zip -d /usr/local/bin \ + && rm kubelogin-linux-amd64.zip \ + && kubelogin --version \ && : INSTALL TERRAFORM BINARY \ && curl -fsSLO https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip \ && unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip -d /usr/local/bin \ @@ -42,6 +48,7 @@ RUN : INSTALL APT REQUIREMENTS \ && aws --version \ \ && : INSTALL GEM REQUIREMENTS \ + && gem install net-ssh -v 6.1.0 \ && gem install \ bcrypt_pbkdf ed25519 rake rspec_junit_formatter serverspec \ \ diff --git a/ansible/playbooks/roles/applications/templates/rabbitmq/03-rabbitmq.yml.j2 b/ansible/playbooks/roles/applications/templates/rabbitmq/03-rabbitmq.yml.j2 index 70886c46e8..fe8e3537da 100644 --- a/ansible/playbooks/roles/applications/templates/rabbitmq/03-rabbitmq.yml.j2 +++ b/ansible/playbooks/roles/applications/templates/rabbitmq/03-rabbitmq.yml.j2 @@ -47,7 +47,8 @@ data: ## Set to "hostname" to use pod hostnames. ## When this value is changed, so should the variable used to set the RABBITMQ_NODENAME ## environment variable. - cluster_formation.k8s.address_type = ip + cluster_formation.k8s.address_type = hostname + cluster_formation.k8s.hostname_suffix = .{{ rabbitmq_service_name }}.{{ namespace_name }}.svc.cluster.local ## How often should node cleanup checks run? cluster_formation.node_cleanup.interval = 30 ## Set to false if automatic removal of unknown/absent nodes @@ -120,9 +121,6 @@ spec: fieldPath: status.podIP - name: RABBITMQ_USE_LONGNAME value: "true" - # See a note on cluster_formation.k8s.address_type in the config file section - - name: RABBITMQ_NODENAME - value: "rabbit@$(MY_POD_IP)" - name: K8S_SERVICE_NAME value: "{{ rabbitmq_service_name }}" - name: RABBITMQ_ERLANG_COOKIE diff --git a/ansible/playbooks/roles/common/tasks/main.yml b/ansible/playbooks/roles/common/tasks/main.yml index 14071887ac..08b0a57c6b 100644 --- a/ansible/playbooks/roles/common/tasks/main.yml +++ b/ansible/playbooks/roles/common/tasks/main.yml @@ -91,6 +91,30 @@ owner: root group: root +- name: Customize CoreDNS for AKS + when: k8s_as_cloud_service + run_once: true + delegate_to: localhost + become: false + environment: + KUBECONFIG: "{{ kubeconfig.local }}" + block: + + - name: Render Epiphany hosts template + template: + src: coredns-epi.yml.j2 + dest: "/tmp/coredns-epi.yml" + mode: u=rw,go=r + + - name: Apply custom CoreDNS configmap + become: false + command: | + kubectl apply -f /tmp/coredns-epi.yml + + - name: Restart CoreDNS + command: | + kubectl -n kube-system rollout restart deployment coredns + - include_tasks: epiuser.yml tags: - epiuser diff --git a/ansible/playbooks/roles/common/templates/coredns-epi.yml.j2 b/ansible/playbooks/roles/common/templates/coredns-epi.yml.j2 new file mode 100644 index 0000000000..8801fda4e7 --- /dev/null +++ b/ansible/playbooks/roles/common/templates/coredns-epi.yml.j2 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns-custom # this is the name of the configmap you can overwrite with your changes + namespace: kube-system +data: + epi.override: | # you may select any name here, but it must end with the .override file extension + hosts { + {% for host in play_hosts %} + {% set name = host.split('.') %} + {{ hostvars[host]['ansible_default_ipv4']['address'] }} {{ host }} {{ name[0] }} + {% endfor %} + fallthrough + } diff --git a/ansible/playbooks/roles/elasticsearch_curator/tasks/install-es-curator-Debian.yml b/ansible/playbooks/roles/elasticsearch_curator/tasks/install-es-curator-Debian.yml index fb6e332206..293434b2d2 100644 --- a/ansible/playbooks/roles/elasticsearch_curator/tasks/install-es-curator-Debian.yml +++ b/ansible/playbooks/roles/elasticsearch_curator/tasks/install-es-curator-Debian.yml @@ -5,3 +5,7 @@ apt: name: "elasticsearch-curator={{ curator_version }}" state: present + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" diff --git a/ansible/playbooks/roles/firewall/tasks/Debian/install-firewall.yml b/ansible/playbooks/roles/firewall/tasks/Debian/install-firewall.yml index 2ef0849349..ab5f3d62c0 100644 --- a/ansible/playbooks/roles/firewall/tasks/Debian/install-firewall.yml +++ b/ansible/playbooks/roles/firewall/tasks/Debian/install-firewall.yml @@ -14,6 +14,10 @@ name: ufw state: absent purge: true + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" - name: Install firewalld package block: @@ -22,6 +26,9 @@ name: firewalld state: present register: install_firewalld + until: install_firewalld is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" - name: Stop and mask service for consistency with RHEL block: diff --git a/ansible/playbooks/roles/grafana/tasks/install-packages-Debian.yml b/ansible/playbooks/roles/grafana/tasks/install-packages-Debian.yml index dba83b1888..e8f5993f97 100644 --- a/ansible/playbooks/roles/grafana/tasks/install-packages-Debian.yml +++ b/ansible/playbooks/roles/grafana/tasks/install-packages-Debian.yml @@ -3,3 +3,7 @@ apt: name: "{{ _package_name }}" state: present + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" diff --git a/ansible/playbooks/roles/haproxy_runc/tasks/install-packages-Debian.yml b/ansible/playbooks/roles/haproxy_runc/tasks/install-packages-Debian.yml index 20a95056a5..912a597cfe 100644 --- a/ansible/playbooks/roles/haproxy_runc/tasks/install-packages-Debian.yml +++ b/ansible/playbooks/roles/haproxy_runc/tasks/install-packages-Debian.yml @@ -6,3 +6,7 @@ - containerd.io={{ containerd_defaults.containerd_version }}-* # provides "runc" state: present allow_downgrade: true + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" diff --git a/ansible/playbooks/roles/keycloak/templates/dynamic-chart-values.yml.j2 b/ansible/playbooks/roles/keycloak/templates/dynamic-chart-values.yml.j2 index affd81810c..24b76ccca4 100644 --- a/ansible/playbooks/roles/keycloak/templates/dynamic-chart-values.yml.j2 +++ b/ansible/playbooks/roles/keycloak/templates/dynamic-chart-values.yml.j2 @@ -7,7 +7,7 @@ Keep data structure in sync with Helm chart's values. ref: https://github.com/codecentric/helm-charts/blob/keycloakx-1.6.1/charts/keycloakx/values.yaml -#} -{% if specification.image_registry.use_local %} +{% if specification.image_registry.use_local and not k8s_as_cloud_service %} image: repository: {{ image_registry_address }}/{{ _chart_values.image.repository }} dbchecker: diff --git a/ansible/playbooks/roles/postgres_exporter/tasks/main.yml b/ansible/playbooks/roles/postgres_exporter/tasks/main.yml index 0cbc959e98..74e73d23aa 100644 --- a/ansible/playbooks/roles/postgres_exporter/tasks/main.yml +++ b/ansible/playbooks/roles/postgres_exporter/tasks/main.yml @@ -8,6 +8,10 @@ state: present install_recommends: no when: ansible_os_family == 'Debian' + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" - name: Create postgres_exporter system group group: diff --git a/ansible/playbooks/roles/postgresql/templates/postgresql.conf.j2 b/ansible/playbooks/roles/postgresql/templates/postgresql.conf.j2 index 6dc1df10b5..ca8343e49f 100644 --- a/ansible/playbooks/roles/postgresql/templates/postgresql.conf.j2 +++ b/ansible/playbooks/roles/postgresql/templates/postgresql.conf.j2 @@ -772,6 +772,9 @@ default_text_search_config = 'pg_catalog.english' #include_if_exists = '...' # include file only if it exists #include = '...' # include file include = 'postgresql-epiphany.conf' # Epiphany managed configuration that overrides settings above +{% if specification.custom_postgresql_config %} +include_if_exists = '{{ specification.custom_postgresql_config }}' +{% endif %} #------------------------------------------------------------------------------ diff --git a/ansible/playbooks/roles/preflight/tasks/common/check-routing.yml b/ansible/playbooks/roles/preflight/tasks/common/check-routing.yml index 17d4bcb103..d0eba56439 100644 --- a/ansible/playbooks/roles/preflight/tasks/common/check-routing.yml +++ b/ansible/playbooks/roles/preflight/tasks/common/check-routing.yml @@ -37,7 +37,7 @@ - name: Validate if ansible_default_ipv4.address matches address from inventory when: - common_vars.provider == "any" - - common_vars.specification.cloud is undefined + - not k8s_as_cloud_service assert: that: ansible_default_ipv4.address == ansible_host fail_msg: >- diff --git a/ansible/playbooks/roles/prometheus/files/secret.yml b/ansible/playbooks/roles/prometheus/files/secret.yml new file mode 100644 index 0000000000..60a1fa97c4 --- /dev/null +++ b/ansible/playbooks/roles/prometheus/files/secret.yml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: prometheus + namespace: kube-system + annotations: + kubernetes.io/service-account.name: prometheus +type: kubernetes.io/service-account-token diff --git a/ansible/playbooks/roles/prometheus/tasks/configure-k8s-apps-monitoring.yml b/ansible/playbooks/roles/prometheus/tasks/configure-k8s-apps-monitoring.yml index 77fd4e37b6..b43319f9b3 100644 --- a/ansible/playbooks/roles/prometheus/tasks/configure-k8s-apps-monitoring.yml +++ b/ansible/playbooks/roles/prometheus/tasks/configure-k8s-apps-monitoring.yml @@ -30,6 +30,14 @@ run_once: true delegate_to: localhost +# Starting from K8s v1.24, secrets are not automatically generated when service accounts are created +- name: Create secret + when: k8s_as_cloud_service is defined and k8s_as_cloud_service + become: false + command: "kubectl apply -f {{ role_path }}/files/secret.yml" + run_once: true + delegate_to: localhost + - name: Get kubernetes bearer token for prometheus become: false shell: |- diff --git a/ansible/playbooks/roles/prometheus/templates/prometheus.yml.j2 b/ansible/playbooks/roles/prometheus/templates/prometheus.yml.j2 index 247f785ffe..5e7a641125 100644 --- a/ansible/playbooks/roles/prometheus/templates/prometheus.yml.j2 +++ b/ansible/playbooks/roles/prometheus/templates/prometheus.yml.j2 @@ -104,7 +104,51 @@ scrape_configs: # Scrape config for nodes (kubelet). - - job_name: 'kubernetes-pods' + - job_name: 'kubernetes-pods-http' + kubernetes_sd_configs: + - role: pod + api_server: "{{ api_server_address }}" + tls_config: + insecure_skip_verify: true + bearer_token: "{{ bearer_token }}" + tls_config: + insecure_skip_verify: true + bearer_token: "{{ bearer_token }}" + scheme: http + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_scrape + - action: replace + regex: (.+) + source_labels: + - __meta_kubernetes_pod_annotation_prometheus_io_path + target_label: __metrics_path__ + - action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + source_labels: + - __address__ + - __meta_kubernetes_pod_annotation_prometheus_io_port + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: kubernetes_namespace + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: kubernetes_pod_name + - action: drop + source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + regex: https + +# To scrape metrics over HTTPS, you need to annotate application pods that expose an HTTPS endpoint +# with the following key/value pair: prometheus.io/scheme="https". + - job_name: 'kubernetes-pods-https' kubernetes_sd_configs: - role: pod api_server: "{{ api_server_address }}" @@ -142,6 +186,9 @@ scrape_configs: source_labels: - __meta_kubernetes_pod_name target_label: kubernetes_pod_name + - action: keep + source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + regex: https # Scrape config for Kubelet cAdvisor. diff --git a/ansible/playbooks/roles/rabbitmq/tasks/install-packages-debian.yml b/ansible/playbooks/roles/rabbitmq/tasks/install-packages-debian.yml index bd53f5e313..d1cbcbdc44 100644 --- a/ansible/playbooks/roles/rabbitmq/tasks/install-packages-debian.yml +++ b/ansible/playbooks/roles/rabbitmq/tasks/install-packages-debian.yml @@ -24,3 +24,7 @@ - rabbitmq-server={{ versions.debian.rabbitmq }} update_cache: true state: present + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/aarch64/redhat/packages.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/aarch64/redhat/packages.yml index d3f9e2f6d3..5f370f9ea2 100644 --- a/ansible/playbooks/roles/repository/files/download-requirements/requirements/aarch64/redhat/packages.yml +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/aarch64/redhat/packages.yml @@ -43,6 +43,8 @@ packages: - 'fping' - 'fuse-overlayfs' # for docker-ce-rootless-extras - 'fuse3' # for docker-ce-rootless-extras + - 'glibc-common' # for samba packages + - 'glibc-langpack-en' # for samba packages - 'gnutls' # for cifs-utils - 'gssproxy' # for nfs-utils - 'htop' diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat/packages.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat/packages.yml index 3d7d06044a..01fd4f153d 100644 --- a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat/packages.yml +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat/packages.yml @@ -45,6 +45,8 @@ packages: - 'fping' - 'fuse-overlayfs' # for docker-ce-rootless-extras - 'fuse3' # for docker-ce-rootless-extras + - 'glibc-common' # for samba packages + - 'glibc-langpack-en' # for samba packages - 'gnutls' # for cifs-utils - 'gssproxy' # for nfs-utils - 'htop' diff --git a/ansible/playbooks/roles/repository/tasks/Debian/setup.yml b/ansible/playbooks/roles/repository/tasks/Debian/setup.yml index e512e51137..9bb9398ab5 100644 --- a/ansible/playbooks/roles/repository/tasks/Debian/setup.yml +++ b/ansible/playbooks/roles/repository/tasks/Debian/setup.yml @@ -7,6 +7,10 @@ - apache2 - libdpkg-perl # required by dpkg-scanpackages script cache_valid_time: 3600 # 1 h + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" when: not offline_mode - name: Create epirepo repository diff --git a/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml b/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml index 729b157438..88dbb2621c 100644 --- a/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml +++ b/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-kubeadm.yml @@ -36,6 +36,10 @@ vars: packages_to_install_with_version: >- {{ packages_to_install.items() | map('join', '=') | list }} + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" - name: K8s/install | Include hold packages task include_tasks: hold-packages.yml diff --git a/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml b/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml index b6bb7d7d63..677317c091 100644 --- a/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml +++ b/ansible/playbooks/roles/upgrade/tasks/kubernetes/Debian/install-packages.yml @@ -7,6 +7,10 @@ - name: Install K8s packages when: packages_to_install_or_upgrade | length or packages_to_downgrade | length + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" vars: target_packages: kubeadm: "{{ version }}-00" # kubeadm may be removed as dependency and then has to be reinstalled @@ -52,6 +56,10 @@ apt: name: "{{ packages_to_downgrade }}" state: absent + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" when: packages_to_downgrade | length - name: "K8s/install | Install packages: {{ packages_to_install_with_version | join( ', ' ) }}" diff --git a/ansible/playbooks/roles/upgrade/tasks/legacy-haproxy/Debian/uninstall-haproxy.yml b/ansible/playbooks/roles/upgrade/tasks/legacy-haproxy/Debian/uninstall-haproxy.yml index ba744a5124..7d9adc444a 100644 --- a/ansible/playbooks/roles/upgrade/tasks/legacy-haproxy/Debian/uninstall-haproxy.yml +++ b/ansible/playbooks/roles/upgrade/tasks/legacy-haproxy/Debian/uninstall-haproxy.yml @@ -4,3 +4,7 @@ name: haproxy update_cache: true state: absent + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" diff --git a/ci/ansible/playbooks/os/ubuntu/upgrade-release.yml b/ci/ansible/playbooks/os/ubuntu/upgrade-release.yml index 6002cc0138..f688751504 100644 --- a/ci/ansible/playbooks/os/ubuntu/upgrade-release.yml +++ b/ci/ansible/playbooks/os/ubuntu/upgrade-release.yml @@ -58,6 +58,10 @@ - name: Install all updates apt: upgrade: safe # equivalent of 'apt upgrade' + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" # 4) Reboot the system (to use the latest kernel) @@ -85,6 +89,10 @@ apt: autoremove: true purge: true + register: result + until: result is succeeded + retries: "{{ retries }}" + delay: "{{ delay }}" # 6) Upgrade OS release diff --git a/ci/pipelines/linters.yaml b/ci/pipelines/linters.yaml index 78d6ef7361..fb2bc1d5ac 100755 --- a/ci/pipelines/linters.yaml +++ b/ci/pipelines/linters.yaml @@ -1,151 +1,151 @@ ---- -trigger: - branches: - include: - - develop - -pr: - branches: - include: - - develop - -pool: - name: $(agentPoolName) - -variables: - ansible_lint_error_threshold: 338 - pylint_score_cli_threshold: 9.41 - pylint_score_tests_threshold: 9.78 - rubocop_linter_threshold: 183 - -jobs: - - job: Run_linters - displayName: Run linters - steps: - - task: UsePythonVersion@0 - displayName: Use Python 3.10 - # To be compatible with the epicli's parent image (python:3.10-slim). - inputs: - versionSpec: 3.10 - - - task: Bash@3 - displayName: Install Ansible Lint and its dependencies - # Installing Ansible 5.2.0 to be compatible with the epicli image. - inputs: - targetType: inline - script: | - python3 -m pip install --upgrade ansible==6.2.0 ansible-lint==6.5.0 ansible-lint-junit==0.16 lxml pip setuptools - - - task: Bash@3 - displayName: Run Ansible Lint - inputs: - targetType: inline - script: | - set -e - if ansible-lint -p ansible --show-relpath --nocolor 1> ansible_lint_stdout 2> ansible_lint_stderr \ - || grep 'violation(s) that are fatal' ansible_lint_stderr; then - # Suppress the next line when the "load-failure" bug in ansible-lint is solved - # https://github.com/ansible/ansible-lint/issues/2217 - sed -i '/load-failure/d' ansible_lint_stdout - error_count=$(wc -l < ansible_lint_stdout) - # Convert to junit - ansible-lint-junit ansible_lint_stdout -o ansible_lint_output.xml - test $error_count -le $(ansible_lint_error_threshold) - else - exit 1 - fi - - - task: PublishTestResults@2 - displayName: Publish Ansible Lint test results - inputs: - testResultsFiles: ansible_lint_output.xml - searchFolder: $(System.DefaultWorkingDirectory) - testRunTitle: Ansible Lint test results - - - task: Bash@3 - displayName: Install Pylint and its dependencies - inputs: - targetType: inline - script: | - # epicli deps: click - python3 -m pip install --upgrade pylint pylint-fail-under pylint-junit \ - click - - - task: Bash@3 - displayName: Run Pylint on CLI code - inputs: - targetType: inline - script: | - python3 -m pylint ./cli \ - --rcfile .pylintrc \ - --fail-under=$(pylint_score_cli_threshold) \ - --output cli_code_results.xml - - - task: PublishTestResults@2 - displayName: Publish Pylint test results for CLI Code - inputs: - testResultsFiles: cli_code_results.xml - searchFolder: $(System.DefaultWorkingDirectory) - testRunTitle: Pylint test results for CLI Code - - - task: Bash@3 - displayName: Run Pylint on test code - inputs: - targetType: inline - script: | - python3 -m pylint ./tests \ - --rcfile .pylintrc \ - --fail-under=$(pylint_score_tests_threshold) \ - --output test_code_results.xml \ - --disable=F0401 # Disable import-error checking - - - task: PublishTestResults@2 - displayName: Publish Pylint test results for test code - inputs: - testResultsFiles: test_code_results.xml - searchFolder: $(System.DefaultWorkingDirectory) - testRunTitle: Pylint test results for test code - - - task: Bash@3 - displayName: Install Rubocop and its dependencies - inputs: - targetType: inline - script: | - set -e - apt-get -y update - apt-get -y install rubygems - gem install rubocop-ast:1.17.0 rubocop:1.28.2 rubocop-junit_formatter - - - task: Bash@3 - displayName: Run Rubocop linter on test code - inputs: - targetType: inline - script: | - rubocop ./tests \ - -c .rubocop.yml \ - --require rubocop/formatter/junit_formatter \ - --format RuboCop::Formatter::JUnitFormatter \ - --out rubocop_results.xml \ - --fail-level error - - - task: Bash@3 - displayName: Assert number of linter failures - inputs: - targetType: inline - script: | - set -e - # Fetch number of detected failures from results file, then test if it does not exceed the declared threshold - # rubocop_linter_threshold is set based on latest linter results performed after code cleaning - detected_failures=$( \ - grep --only-matching 'failures=.[0-9]*.' rubocop_results.xml | \ - grep --only-matching '[0-9]*') - echo "Number of detected failures: $detected_failures" - echo "Failures threshold value: $(rubocop_linter_threshold)" - test $detected_failures -le $(rubocop_linter_threshold) - - - task: PublishTestResults@2 - displayName: Publish Rubocop linting test results - inputs: - testResultsFiles: rubocop_results.xml - searchFolder: $(System.DefaultWorkingDirectory) - testRunTitle: Rubocop linting test results +--- +trigger: + branches: + include: + - develop + +pr: + branches: + include: + - develop + +pool: + name: $(agentPoolName) + +variables: + ansible_lint_error_threshold: 338 + pylint_score_cli_threshold: 9.41 + pylint_score_tests_threshold: 9.78 + rubocop_linter_threshold: 183 + +jobs: + - job: Run_linters + displayName: Run linters + steps: + - task: UsePythonVersion@0 + displayName: Use Python 3.10 + # To be compatible with the epicli's parent image (python:3.10-slim). + inputs: + versionSpec: 3.10 + + - task: Bash@3 + displayName: Install Ansible Lint and its dependencies + # Installing Ansible 5.2.0 to be compatible with the epicli image. + inputs: + targetType: inline + script: | + python3 -m pip install --upgrade ansible==6.2.0 ansible-compat==3.* ansible-lint==6.5.0 ansible-lint-junit==0.16 lxml pip setuptools + + - task: Bash@3 + displayName: Run Ansible Lint + inputs: + targetType: inline + script: | + set -e + if ansible-lint -p ansible --show-relpath --nocolor 1> ansible_lint_stdout 2> ansible_lint_stderr \ + || grep 'violation(s) that are fatal' ansible_lint_stderr; then + # Suppress the next line when the "load-failure" bug in ansible-lint is solved + # https://github.com/ansible/ansible-lint/issues/2217 + sed -i '/load-failure/d' ansible_lint_stdout + error_count=$(wc -l < ansible_lint_stdout) + # Convert to junit + ansible-lint-junit ansible_lint_stdout -o ansible_lint_output.xml + test $error_count -le $(ansible_lint_error_threshold) + else + exit 1 + fi + + - task: PublishTestResults@2 + displayName: Publish Ansible Lint test results + inputs: + testResultsFiles: ansible_lint_output.xml + searchFolder: $(System.DefaultWorkingDirectory) + testRunTitle: Ansible Lint test results + + - task: Bash@3 + displayName: Install Pylint and its dependencies + inputs: + targetType: inline + script: | + # epicli deps: click + python3 -m pip install --upgrade pylint==2.* pylint-fail-under pylint-junit \ + click + + - task: Bash@3 + displayName: Run Pylint on CLI code + inputs: + targetType: inline + script: | + python3 -m pylint ./cli \ + --rcfile .pylintrc \ + --fail-under=$(pylint_score_cli_threshold) \ + --output cli_code_results.xml + + - task: PublishTestResults@2 + displayName: Publish Pylint test results for CLI Code + inputs: + testResultsFiles: cli_code_results.xml + searchFolder: $(System.DefaultWorkingDirectory) + testRunTitle: Pylint test results for CLI Code + + - task: Bash@3 + displayName: Run Pylint on test code + inputs: + targetType: inline + script: | + python3 -m pylint ./tests \ + --rcfile .pylintrc \ + --fail-under=$(pylint_score_tests_threshold) \ + --output test_code_results.xml \ + --disable=F0401 # Disable import-error checking + + - task: PublishTestResults@2 + displayName: Publish Pylint test results for test code + inputs: + testResultsFiles: test_code_results.xml + searchFolder: $(System.DefaultWorkingDirectory) + testRunTitle: Pylint test results for test code + + - task: Bash@3 + displayName: Install Rubocop and its dependencies + inputs: + targetType: inline + script: | + set -e + apt-get -y update + apt-get -y install rubygems + gem install rubocop-ast:1.17.0 rubocop:1.28.2 rubocop-junit_formatter + + - task: Bash@3 + displayName: Run Rubocop linter on test code + inputs: + targetType: inline + script: | + rubocop ./tests \ + -c .rubocop.yml \ + --require rubocop/formatter/junit_formatter \ + --format RuboCop::Formatter::JUnitFormatter \ + --out rubocop_results.xml \ + --fail-level error + + - task: Bash@3 + displayName: Assert number of linter failures + inputs: + targetType: inline + script: | + set -e + # Fetch number of detected failures from results file, then test if it does not exceed the declared threshold + # rubocop_linter_threshold is set based on latest linter results performed after code cleaning + detected_failures=$( \ + grep --only-matching 'failures=.[0-9]*.' rubocop_results.xml | \ + grep --only-matching '[0-9]*') + echo "Number of detected failures: $detected_failures" + echo "Failures threshold value: $(rubocop_linter_threshold)" + test $detected_failures -le $(rubocop_linter_threshold) + + - task: PublishTestResults@2 + displayName: Publish Rubocop linting test results + inputs: + testResultsFiles: rubocop_results.xml + searchFolder: $(System.DefaultWorkingDirectory) + testRunTitle: Rubocop linting test results diff --git a/cli/src/ansible/AnsibleVarsGenerator.py b/cli/src/ansible/AnsibleVarsGenerator.py index 484fb6df5d..98d936f52f 100644 --- a/cli/src/ansible/AnsibleVarsGenerator.py +++ b/cli/src/ansible/AnsibleVarsGenerator.py @@ -148,10 +148,7 @@ def populate_group_vars(self, ansible_dir): main_vars['full_download'] = Config().full_download # Consider to move this to the provider level. - if self.cluster_model.provider != 'any': - main_vars['k8s_as_cloud_service'] = self.cluster_model.specification.cloud.k8s_as_cloud_service - else: - main_vars['k8s_as_cloud_service'] = False + main_vars['k8s_as_cloud_service'] = self.cluster_model.specification.cloud.k8s_as_cloud_service if self.is_upgrade_run: shared_config_doc = self.get_shared_config_from_manifest() diff --git a/docs/changelogs/CHANGELOG-2.0.md b/docs/changelogs/CHANGELOG-2.0.md index e55f60b7b3..5df851adeb 100644 --- a/docs/changelogs/CHANGELOG-2.0.md +++ b/docs/changelogs/CHANGELOG-2.0.md @@ -1,6 +1,17 @@ # Changelog 2.0 +## [2.0.10] YYYY-MM-DD + +### Fixed + +- [#3413](https://github.com/hitachienergy/epiphany/issues/3413) - [RHEL] Missing dependencies for `samba-client` package + +### Added + +- [#3408](https://github.com/hitachienergy/epiphany/issues/3408) - Apply changes made by the Lumada team +- [#3410](https://github.com/hitachienergy/epiphany/issues/3410) - AKS and Epiphany integration + ## [2.0.9] 2023-10-19 ### Updated diff --git a/docs/home/howto/DATABASES.md b/docs/home/howto/DATABASES.md index d96ec8473c..4fd1840e37 100644 --- a/docs/home/howto/DATABASES.md +++ b/docs/home/howto/DATABASES.md @@ -56,6 +56,10 @@ specification: value: 10 comment: maximum number of simultaneously running WAL sender processes when: replication + - name: max_replication_slots + value: 10 + comment: maximum number of replication slots that a server can support + when: replication - name: wal_keep_size value: 500 comment: the size of WAL files held for standby servers (MB) diff --git a/docs/home/howto/MODULES.md b/docs/home/howto/MODULES.md index b6607edaee..418e197e7e 100644 --- a/docs/home/howto/MODULES.md +++ b/docs/home/howto/MODULES.md @@ -211,6 +211,10 @@ AWS: value: 10 comment: maximum number of simultaneously running WAL sender processes when: replication + - name: max_replication_slots + value: 10 + comment: maximum number of replication slots that a server can support + when: replication - name: wal_keep_segments value: 34 comment: number of WAL files held for standby servers diff --git a/schema/any/defaults/epiphany-cluster.yml b/schema/any/defaults/epiphany-cluster.yml index f3dc9a0a08..e9899ee060 100644 --- a/schema/any/defaults/epiphany-cluster.yml +++ b/schema/any/defaults/epiphany-cluster.yml @@ -9,6 +9,8 @@ specification: admin_user: name: operations # YOUR-ADMIN-USERNAME key_path: /root/.ssh/epiphany-operations/id_rsa # YOUR-SSH-KEY-PATH + cloud: + k8s_as_cloud_service: False components: kubernetes_master: count: 1 diff --git a/schema/common/defaults/configuration/keycloak.yml b/schema/common/defaults/configuration/keycloak.yml index ef07688eb0..f604888d76 100644 --- a/schema/common/defaults/configuration/keycloak.yml +++ b/schema/common/defaults/configuration/keycloak.yml @@ -38,7 +38,7 @@ specification: # - '--spi-events-listener-jboss-logging-success-level=info' # enable successful events in logs extraEnv: |- - {%- raw -%} + {%- raw %} - name: KEYCLOAK_ADMIN valueFrom: secretKeyRef: @@ -63,7 +63,7 @@ specification: # Pod affinity affinity: |- - {%- raw -%} + {%- raw %} podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - labelSelector: @@ -82,7 +82,7 @@ specification: # Probes configuration based on https://github.com/keycloak/keycloak/blob/19.0/operator/src/main/java/org/keycloak/operator/controllers/KeycloakDeployment.java livenessProbe: |- - {%- raw -%} + {%- raw %} exec: command: - curl @@ -97,7 +97,7 @@ specification: failureThreshold: 15 readinessProbe: |- - {%- raw -%} + {%- raw %} exec: command: - curl @@ -112,7 +112,7 @@ specification: failureThreshold: 15 startupProbe: |- - {%- raw -%} + {%- raw %} exec: command: - curl @@ -156,7 +156,7 @@ specification: type: kubernetes.io/tls stringData: ca.crt: |- - {%- raw -%} + {%- raw %} {{- $ca := genCA "epiphany-keycloak-ca" 3650 }} {{- $_ := set $.Values "_shared" dict }} {{- $_ := set $.Values._shared "ca" $ca }} @@ -168,7 +168,7 @@ specification: {{- $caCert -}} {%- endraw %} tls.crt: |- - {%- raw -%} + {%- raw %} {{- $altNames := list "keycloak-0.keycloak-headless.keycloak.svc.cluster.local" "keycloak-0.keycloak-headless.keycloak" "keycloak-0.keycloak-headless" "keycloak-0" "keycloak-1.keycloak-headless.keycloak.svc.cluster.local" "keycloak-1.keycloak-headless.keycloak" "keycloak-1.keycloak-headless" "keycloak-1" "keycloak-headless.keycloak.svc.cluster.local" "keycloak-headless.keycloak" "keycloak-headless" "keycloak-http.keycloak.svc.cluster.local" "keycloak-http.keycloak" "keycloak-http" }} {{- $crt := genSignedCert "keycloak-http.keycloak.svc.cluster.local" nil $altNames 3650 $.Values._shared.ca }} {{- $_ := set $.Values._shared "crt" $crt }} @@ -180,7 +180,7 @@ specification: {{- $tlsCert -}} {%- endraw %} tls.key: |- - {%- raw -%} + {%- raw %} {{- $tlsKey := $.Values._shared.crt.Key }} {{- $secret := (lookup "v1" "Secret" .Release.Namespace (printf "%s-%s" (include "keycloak.fullname" .) "tls-certs")) }} {{- if $secret }} diff --git a/schema/common/defaults/configuration/postgresql.yml b/schema/common/defaults/configuration/postgresql.yml index 0e90e8c020..6e0fff9995 100644 --- a/schema/common/defaults/configuration/postgresql.yml +++ b/schema/common/defaults/configuration/postgresql.yml @@ -3,6 +3,7 @@ kind: configuration/postgresql title: PostgreSQL name: default specification: + custom_postgresql_config: '' # leave it empty to use postgresql-epiphany.conf config_file: parameter_groups: - name: CONNECTIONS AND AUTHENTICATION @@ -60,6 +61,10 @@ specification: value: 10 comment: maximum number of simultaneously running WAL sender processes when: replication + - name: max_replication_slots + value: 10 + comment: maximum number of replication slots that a server can support + when: replication - name: wal_keep_size value: 500 comment: the size of WAL files held for standby servers (MB) diff --git a/schema/common/defaults/configuration/shared-config.yml b/schema/common/defaults/configuration/shared-config.yml index e127bfdc90..3b41e79f86 100644 --- a/schema/common/defaults/configuration/shared-config.yml +++ b/schema/common/defaults/configuration/shared-config.yml @@ -3,10 +3,12 @@ kind: configuration/shared-config title: "Shared configuration that will be visible to all roles" name: default specification: - custom_repository_url: '' # leave it empty to use local repository or provide url to your repo - custom_image_registry_address: '' # leave it empty to use local registry or provide address of your registry (hostname:port). This registry will be used to populate K8s control plane and should contain all required images. - download_directory: /tmp # directory where files and images will be stored just before installing/loading - vault_location: '' # if empty "BUILD DIRECTORY/vault" will be used + custom_image_registry_address: '' # leave it empty to use local registry or provide address of your registry (hostname:port). This registry will be used to populate K8s control plane and should contain all required images. + custom_repository_url: '' # leave it empty to use local repository or provide url to your repo + delay: 30 # time between retries when task fails + download_directory: /tmp # directory where files and images will be stored just before installing/loading + promote_to_ha: false + retries: 60 # used for custom retries count when task fails + use_ha_control_plane: false + vault_location: '' # if empty "BUILD DIRECTORY/vault" will be used vault_tmp_file_location: SET_BY_AUTOMATION - use_ha_control_plane: False - promote_to_ha: False diff --git a/schema/common/validation/configuration/postgresql.yml b/schema/common/validation/configuration/postgresql.yml index 6e2ef5f130..aec8a23e24 100644 --- a/schema/common/validation/configuration/postgresql.yml +++ b/schema/common/validation/configuration/postgresql.yml @@ -4,6 +4,8 @@ title: "Postgresql specification schema" description: "Postgresql specification schema" type: object properties: + custom_postgresql_config: + type: string config_file: type: object properties: diff --git a/schema/common/validation/configuration/shared-config.yml b/schema/common/validation/configuration/shared-config.yml index 908861f488..d42eb0af85 100644 --- a/schema/common/validation/configuration/shared-config.yml +++ b/schema/common/validation/configuration/shared-config.yml @@ -4,17 +4,21 @@ title: "Shared-config specification schema" description: "Shared-config specification schema" type: object properties: - custom_repository_url: - type: string custom_image_registry_address: type: string + custom_repository_url: + type: string + delay: + type: integer download_directory: type: string + promote_to_ha: + type: boolean + retries: + type: integer + use_ha_control_plane: + type: boolean vault_location: type: string vault_tmp_file_location: type: string - use_ha_control_plane: - type: boolean - promote_to_ha: - type: boolean diff --git a/tests/spec/spec/applications/rabbitmq/rabbitmq.rb b/tests/spec/spec/applications/rabbitmq/rabbitmq.rb index bb92d38197..dbc2b3f40a 100644 --- a/tests/spec/spec/applications/rabbitmq/rabbitmq.rb +++ b/tests/spec/spec/applications/rabbitmq/rabbitmq.rb @@ -146,15 +146,13 @@ def callRabbitMQDeploymentTests describe 'Checking node health using RabbitMQ API' do service_replicas.times do |i| describe command("curl -o /dev/null -s -w '%{http_code}' -u #{user}#{i}:#{pass} \ - #{host_inventory['hostname']}:#{service_management_port}/api/healthchecks/node/rabbit@$(kubectl describe pods rabbitmq-cluster-#{i} \ - --namespace=#{service_namespace} | awk '/^IP:/ {print $2}')") do + #{host_inventory['hostname']}:#{service_management_port}/api/healthchecks/node/rabbit@rabbitmq-cluster-#{i}.#{service_name}.#{service_namespace}.svc.cluster.local") do it 'is expected to be equal' do expect(subject.stdout.to_i).to eq 200 end end describe command("curl -u #{user}#{i}:#{pass} \ - #{host_inventory['hostname']}:#{service_management_port}/api/healthchecks/node/rabbit@$(kubectl describe pods rabbitmq-cluster-#{i} \ - --namespace=#{service_namespace} | awk '/^IP:/ {print $2}')") do + #{host_inventory['hostname']}:#{service_management_port}/api/healthchecks/node/rabbit@rabbitmq-cluster-#{i}.#{service_name}.#{service_namespace}.svc.cluster.local") do its(:stdout_as_json) { should include('status' => /ok/) } its(:stdout_as_json) { should_not include('status' => /failed/) } its(:exit_status) { should eq 0 } diff --git a/tests/spec/spec/postgresql/postgresql_spec.rb b/tests/spec/spec/postgresql/postgresql_spec.rb index 7aa270b74d..7723cb4a07 100644 --- a/tests/spec/spec/postgresql/postgresql_spec.rb +++ b/tests/spec/spec/postgresql/postgresql_spec.rb @@ -38,6 +38,10 @@ max_wal_senders = config_docs[:postgresql]['specification']['config_file']['parameter_groups'].detect do |i| i['name'] == 'REPLICATION' end ['subgroups'].detect { |i| i['name'] == 'Sending Server(s)' }['parameters'].detect { |i| i['name'] == 'max_wal_senders' }['value'] +max_replication_slots = config_docs[:postgresql]['specification']['config_file']['parameter_groups'].detect do |i| + i['name'] == 'REPLICATION' +end ['subgroups'].detect { |i| i['name'] == 'Sending Server(s)' }['parameters'].detect { |i| i['name'] == 'max_replication_slots' }['value'] + pgaudit_enabled = config_docs[:postgresql]['specification']['extensions']['pgaudit']['enabled'] spec_doc = if upgradeRun? @@ -341,6 +345,10 @@ def queryForDropping its(:stdout) { should match(/^max_wal_senders = #{max_wal_senders}/) } its(:exit_status) { should eq 0 } end + describe command('cat /var/lib/pgsql/13/data/postgresql-epiphany.conf | grep max_replication_slots') do + its(:stdout) { should match(/^max_replication_slots = #{max_replication_slots}/) } + its(:exit_status) { should eq 0 } + end describe command('cat /var/lib/pgsql/13/data/postgresql-epiphany.conf | grep wal_keep_size') do its(:stdout) { should match(/^wal_keep_size = #{wal_keep_size}/) } its(:exit_status) { should eq 0 } @@ -367,6 +375,10 @@ def queryForDropping its(:stdout) { should match(/^max_wal_senders = #{max_wal_senders}/) } its(:exit_status) { should eq 0 } end + describe command('cat /etc/postgresql/13/main/postgresql-epiphany.conf | grep max_replication_slots') do + its(:stdout) { should match(/^max_replication_slots = #{max_replication_slots}/) } + its(:exit_status) { should eq 0 } + end describe command('cat /etc/postgresql/13/main/postgresql-epiphany.conf | grep wal_keep_size') do its(:stdout) { should match(/^wal_keep_size = #{wal_keep_size}/) } its(:exit_status) { should eq 0 }