diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 1a96a2d25a..be768c8fa3 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -17,7 +17,7 @@ RUN : INSTALL APT REQUIREMENTS \ autossh curl gcc git git-lfs iputils-ping \ jq libc6-dev libcap2-bin libffi-dev lsb-release \ make musl-dev openssh-client procps \ - psmisc ruby-full sudo tar \ + psmisc rsync ruby-full sudo tar \ unzip vim \ && apt-get -q autoremove -y \ && apt-get -q clean -y \ diff --git a/Dockerfile b/Dockerfile index c5158d375f..a99d54cac2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ COPY . /epicli RUN : INSTALL APT REQUIREMENTS \ && apt-get update \ && apt-get install --no-install-recommends -y \ - autossh curl gcc jq libcap2-bin libc6-dev libffi-dev make musl-dev openssh-client procps psmisc ruby-full sudo tar unzip vim \ + autossh curl gcc jq libcap2-bin libc6-dev libffi-dev make musl-dev openssh-client procps psmisc rsync ruby-full sudo tar unzip vim \ \ && : INSTALL HELM BINARY \ && curl -fsSLO https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ diff --git a/ansible/playbooks/roles/download/tasks/download_file.yml b/ansible/playbooks/roles/download/tasks/download_file.yml index 17103320b0..0959e66d5d 100644 --- a/ansible/playbooks/roles/download/tasks/download_file.yml +++ b/ansible/playbooks/roles/download/tasks/download_file.yml @@ -1,7 +1,7 @@ --- - name: "Download file {{ file_name }}" get_url: - url: "{{ url | default(repository_url + '/files/' + file_name) }}" + url: "{{ url | default(repository_url + '/' + _subdir | default('files') + '/' + file_name) }}" dest: "{{ download_directory }}" validate_certs: "{{ validate_certs }}" force: "{{ force | default(false) }}" diff --git a/ansible/playbooks/roles/grafana/tasks/dashboards.yml b/ansible/playbooks/roles/grafana/tasks/dashboards.yml index 1aea650bd7..c7ada9725c 100644 --- a/ansible/playbooks/roles/grafana/tasks/dashboards.yml +++ b/ansible/playbooks/roles/grafana/tasks/dashboards.yml @@ -18,6 +18,7 @@ url: "https://grafana.com/api/dashboards/{{ item.dashboard_id }}/revisions/{{ item.revision_id }}/download" file_name: "grafana_dashboard_{{ item.dashboard_id }}.json" download_directory: "{{ tmp_dashboards.path }}/{{ item.dashboard_id }}.json" + _subdir: "grafana_dashboards" retries: 5 delay: 2 loop: "{{ grafana_online_dashboards }}" @@ -45,6 +46,7 @@ file_name: "grafana_dashboard_{{ item.dashboard_id }}.json" download_directory: "{{ tmp_dashboards.path }}/{{ item.dashboard_id }}.json" force: true # to keep indepotency (avoid HTTP Error 304: Not Modified) + _subdir: grafana_dashboards loop: "{{ grafana_external_dashboards }}" when: grafana_external_dashboards != [] diff --git a/ansible/playbooks/roles/repository/defaults/main.yml b/ansible/playbooks/roles/repository/defaults/main.yml new file mode 100644 index 0000000000..5a0a7db45d --- /dev/null +++ b/ansible/playbooks/roles/repository/defaults/main.yml @@ -0,0 +1,4 @@ +--- +download_requirements_dir: "/var/tmp/epi-download-requirements" +download_requirements_script: "{{ download_requirements_dir }}/download-requirements.py" +download_requirements_flag: "{{ download_requirements_dir }}/download-requirements-done.flag" diff --git a/ansible/playbooks/roles/repository/files/client/Debian/enable-system-repos.sh b/ansible/playbooks/roles/repository/files/client/Debian/enable-system-repos.sh index 12fa5c1aba..deeaf0d6c3 100644 --- a/ansible/playbooks/roles/repository/files/client/Debian/enable-system-repos.sh +++ b/ansible/playbooks/roles/repository/files/client/Debian/enable-system-repos.sh @@ -2,4 +2,4 @@ REPOS_BACKUP_FILE=/var/tmp/enabled-system-repos.tar -tar -C / --absolute-name -xvf ${REPOS_BACKUP_FILE} 2>&1 \ No newline at end of file +tar -C / --absolute-name -xvf ${REPOS_BACKUP_FILE} 2>&1 diff --git a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/add-repositories.aarch64.sh b/ansible/playbooks/roles/repository/files/download-requirements/centos-7/add-repositories.aarch64.sh deleted file mode 100644 index 637aadfc83..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/add-repositories.aarch64.sh +++ /dev/null @@ -1 +0,0 @@ -#!/usr/bin/env bash -eu diff --git a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/add-repositories.multiarch.sh b/ansible/playbooks/roles/repository/files/download-requirements/centos-7/add-repositories.multiarch.sh deleted file mode 100644 index c59a6a3e74..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/add-repositories.multiarch.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env bash -eu - -DOCKER_CE_PATCHED_REPO_CONF=$(cat <<'EOF' -[docker-ce-stable-patched] -name=Docker CE Stable - patched centos/7/$basearch/stable -baseurl=https://download.docker.com/linux/centos/7/$basearch/stable -enabled=1 -gpgcheck=1 -gpgkey=https://download.docker.com/linux/centos/gpg -EOF -) - -ELASTIC_6_REPO_CONF=$(cat <<'EOF' -[elastic-6] -name=Elastic repository for 6.x packages -baseurl=https://artifacts.elastic.co/packages/oss-6.x/yum -gpgcheck=1 -gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch -enabled=1 -autorefresh=1 -type=rpm-md -EOF -) - -ELASTICSEARCH_7_REPO_CONF=$(cat <<'EOF' -[elasticsearch-7.x] -name=Elasticsearch repository for 7.x packages -baseurl=https://artifacts.elastic.co/packages/oss-7.x/yum -gpgcheck=1 -gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch -enabled=1 -autorefresh=1 -type=rpm-md -EOF -) - -ELASTICSEARCH_CURATOR_REPO_CONF=$(cat <<'EOF' -[curator-5] -name=CentOS/RHEL 7 repository for Elasticsearch Curator 5.x packages -baseurl=https://packages.elastic.co/curator/5/centos/7 -gpgcheck=1 -gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch -enabled=1 -EOF -) - -KUBERNETES_REPO_CONF=$(cat <<'EOF' -[kubernetes] -name=Kubernetes -baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg -EOF -) - -OPENDISTRO_REPO_CONF=$(cat <<'EOF' -[opendistroforelasticsearch-artifacts-repo] -name=Release RPM artifacts of OpenDistroForElasticsearch -baseurl=https://d3g5vo6xdbdb9a.cloudfront.net/yum/noarch/ -enabled=1 -gpgkey=https://d3g5vo6xdbdb9a.cloudfront.net/GPG-KEY-opendistroforelasticsearch -gpgcheck=1 -repo_gpgcheck=1 -autorefresh=1 -type=rpm-md -EOF -) - -POSTGRESQL_REPO_CONF=$(cat <<'EOF' -[pgdg13] -name=PostgreSQL 13 for RHEL/CentOS $releasever - $basearch -baseurl=https://download.postgresql.org/pub/repos/yum/13/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG -EOF -) - -POSTGRESQL_COMMON_REPO_CONF=$(cat <<'EOF' -[pgdg-common] -name=PostgreSQL common for RHEL/CentOS $releasever - $basearch -baseurl=https://download.postgresql.org/pub/repos/yum/common/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG -EOF -) - -RABBITMQ_SERVER_REPO_CONF=$(cat <<'EOF' -[rabbitmq-server] -name=rabbitmq-rpm -baseurl=https://packagecloud.io/rabbitmq/rabbitmq-server/el/7/$basearch -gpgcheck=1 -gpgkey=https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey -repo_gpgcheck=1 -sslcacert=/etc/pki/tls/certs/ca-bundle.crt -enabled=1 -EOF -) - -# Official Docker CE repository, added with https://download.docker.com/linux/centos/docker-ce.repo, -# has broken URL (https://download.docker.com/linux/centos/7Server/x86_64/stable) for longer time. -# So direct (patched) link is used first if available. -add_repo_as_file 'docker-ce-stable-patched' "$DOCKER_CE_PATCHED_REPO_CONF" -if ! is_repo_available "docker-ce-stable-patched"; then - disable_repo "docker-ce-stable-patched" - add_repo 'docker-ce' 'https://download.docker.com/linux/centos/docker-ce.repo' -fi -add_repo_as_file 'elastic-6' "$ELASTIC_6_REPO_CONF" -add_repo_as_file 'elasticsearch-7' "$ELASTICSEARCH_7_REPO_CONF" -add_repo_as_file 'elasticsearch-curator-5' "$ELASTICSEARCH_CURATOR_REPO_CONF" -add_repo_as_file 'kubernetes' "$KUBERNETES_REPO_CONF" -add_repo_as_file 'opendistroforelasticsearch' "$OPENDISTRO_REPO_CONF" -add_repo_as_file 'postgresql-13' "$POSTGRESQL_REPO_CONF" -add_repo_as_file 'rabbitmq' "$RABBITMQ_SERVER_REPO_CONF" diff --git a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/add-repositories.x86_64.sh b/ansible/playbooks/roles/repository/files/download-requirements/centos-7/add-repositories.x86_64.sh deleted file mode 100644 index 20bed5ddf5..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/add-repositories.x86_64.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -eu - -add_repo_from_script 'https://dl.2ndquadrant.com/default/release/get/10/rpm' # for repmgr -add_repo_from_script 'https://dl.2ndquadrant.com/default/release/get/13/rpm' - -disable_repo '2ndquadrant-dl-default-release-pg10-debug' # script adds 2 repositories, only 1 is required -disable_repo '2ndquadrant-dl-default-release-pg13-debug' diff --git a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/download-requirements.sh b/ansible/playbooks/roles/repository/files/download-requirements/centos-7/download-requirements.sh deleted file mode 100644 index 07665aeec8..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/download-requirements.sh +++ /dev/null @@ -1,799 +0,0 @@ -#!/usr/bin/env bash - -# VERSION 1.0.5 - -# NOTE: You can run only one instance of this script, new instance kills the previous one -# This limitation is for Ansible - -set -euo pipefail - -# set variables needed by common_functions -readonly internet_access_checks_enabled="yes" -readonly script_path="$(readlink -f $(dirname $0))" -. "${script_path}/common/common_functions.sh" - -# === Functions (in alphabetical order) === - -# params: -add_repo() { - local repo_id="$1" - local repo_url="$2" - - if ! is_repo_enabled "$repo_id"; then - echol "Adding repository: $repo_id" - yum-config-manager --add-repo "$repo_url" || - exit_with_error "Command failed: yum-config-manager --add-repo \"$repo_url\"" - # to accept import of GPG keys - yum -y repolist > /dev/null || - exit_with_error "Command failed: yum -y repolist" - fi -} - -# params: -add_repo_as_file() { - local repo_id="$1" - local config_file_content="$2" - local config_file_name="$repo_id.repo" - - if ! is_repo_enabled "$repo_id"; then - echol "Adding repository: $repo_id" - cat <<< "$config_file_content" > "/etc/yum.repos.d/$config_file_name" || - exit_with_error "Function add_repo_as_file failed for repo: $repo_id" - local -a gpg_key_urls - IFS=" " read -r -a gpg_key_urls \ - <<< "$(grep -i --only-matching --perl-regexp '(?<=^gpgkey=)http[^#\n]+' <<< "$config_file_content")" - if (( ${#gpg_key_urls[@]} > 0 )); then - import_repo_gpg_keys "${gpg_key_urls[@]}" 3 - fi - # to accept import of repo's GPG key (for repo_gpgcheck=1) - yum -y repolist > /dev/null || exit_with_error "Command failed: yum -y repolist" - fi -} - -# params: -add_repo_from_script() { - local script_url="$1" - - echol "Running: curl $script_url | bash" - curl "$script_url" | bash -} - -# params: ... [path_N_to_backup] -backup_files() { - local backup_file_path="$1" - shift - local paths_to_backup=("$@") - - # --directory='/' is for tar --verify - tar --create --verbose --verify --directory="/" --file="$backup_file_path" "${paths_to_backup[@]}" -} - -# params: -create_directory() { - local dir_path="$1" - - if [[ -d "$dir_path" ]]; then - echol "Directory $dir_path already exists" - else - echol "Creating directory: $dir_path" - mkdir -p "$dir_path" || exit_with_error "Command failed: mkdir -p \"$dir_path\"" - fi -} - -# params: -disable_repo() { - local repo_id="$1" - - if yum repolist enabled | grep --quiet "$repo_id"; then - echol "Disabling repository: $repo_id" - yum-config-manager --disable "$repo_id" || - exit_with_error "Command failed: yum-config-manager --disable \"$repo_id\"" - fi -} - -# params: [new_filename] -download_file() { - local file_url="$1" - local dest_dir="$2" - - if [[ ${3-} ]]; then - local file_name=$3 - else - local file_name - file_name=$(basename "$file_url") - fi - - local dest_path="${dest_dir}/${file_name}" - local retries=3 - - if [[ ${3-} ]]; then - echol "Downloading file: $file_url as $file_name" - run_cmd_with_retries wget --quiet --directory-prefix="$dest_dir" "$file_url" -O "$dest_path" $retries || \ - exit_with_error "Command failed: wget --no-verbose --directory-prefix=$dest_dir $file_url $retries" - else - echol "Downloading file: $file_url" - run_cmd_with_retries wget --quiet --directory-prefix="$dest_dir" "$file_url" $retries || \ - exit_with_error "Command failed: wget --no-verbose --directory-prefix=$dest_dir $file_url $retries" - fi -} - -# params: -download_image() { - local image_name="$1" - local dest_dir="$2" - - local splited_image=(${image_name//:/ }) - local repository=${splited_image[0]} - local tag=${splited_image[1]} - local repo_basename=$(basename -- "$repository") - local dest_path="${dest_dir}/${repo_basename}-${tag}.tar" - local retries=3 - - if [[ -f $dest_path ]]; then - echol "Image file: $dest_path already exists. Skipping..." - else - # use temporary file for downloading to be safe from sudden interruptions (network, ctrl+c) - local tmp_file_path=$(mktemp) - local crane_cmd="$CRANE_BIN pull --insecure --platform=${DOCKER_PLATFORM} --format=legacy ${image_name} ${tmp_file_path}" - echol "Downloading image: $image" - { run_cmd_with_retries $crane_cmd $retries && chmod 644 $tmp_file_path && mv $tmp_file_path $dest_path; } || - exit_with_error "crane failed, command was: $crane_cmd && chmod 644 $tmp_file_path && mv $tmp_file_path $dest_path" - fi -} - -# params: ... [package_N] -download_packages() { - local dest_dir="$1" - shift - local packages="$@" - local retries=3 - - if [[ -n $packages ]]; then - # when using --archlist=x86_64 yumdownloader (yum-utils-1.1.31-52) also downloads i686 packages - run_cmd_with_retries yumdownloader --quiet --archlist="$ARCH" --exclude='*i686' --destdir="$dest_dir" $packages $retries - fi -} - -# params: -enable_repo() { - local repo_id="$1" - - if ! yum repolist enabled | grep --quiet "$repo_id"; then - echol "Enabling repository: $repo_id" - yum-config-manager --enable "$repo_id" || - exit_with_error "Command failed: yum-config-manager --enable \"$repo_id\"" - fi -} - -# params: -get_package_dependencies_with_arch() { - # $1 reserved for result - local package="$2" - - local query_output=$(repoquery --requires --resolve --queryformat '%{name}.%{arch}' --archlist=$ARCH,noarch "$package") || - exit_with_error "repoquery failed for dependencies of package: $package with exit code: $?, output was: $query_output" - - if [[ -z $query_output ]]; then - echol "No dependencies found for package: $package" - elif grep --ignore-case --perl-regexp '\b(? -get_package_with_version_arch() { - # $1 reserved for result - local package="$2" - - local query_output=$(repoquery --queryformat '%{ui_nevra}' --archlist=$ARCH,noarch "$package") || - exit_with_error "repoquery failed for package: $package with exit code: $?, output was: $query_output" - - # yumdownloader doesn't set error code if repoquery returns empty output - [[ -n $query_output ]] || exit_with_error "repoquery failed: package $package not found" - if grep --ignore-case --perl-regexp '\b(? -get_packages_with_version_arch() { - local result_var_name="$1" - shift - local packages=("$@") - local packages_with_version_arch=() - - for package in "${packages[@]}"; do - get_package_with_version_arch 'QUERY_OUTPUT' "$package" - packages_with_version_arch+=("$QUERY_OUTPUT") - done - - eval $result_var_name='("${packages_with_version_arch[@]}")' -} - -# params: -get_requirements_from_group() { - # $1 reserved for result - local group_name="$2" - local requirements_file_path="$3" - local all_requirements=$(grep --only-matching '^[^#]*' "$requirements_file_path" | sed -e 's/[[:space:]]*$//') - - if [[ $group_name == "files" ]]; then - local requirements_from_group=$(awk "/^$/ {next}; /\[${group_name}\]/ {f=1; f=2; next}; /^\[/ {f=0}; f {print \$0}" <<< "$all_requirements") || - exit_with_error "Function get_requirements_from_group failed for group: $group_name" - else - local requirements_from_group=$(awk "/^$/ {next}; /\[${group_name}\]/ {f=1; next}; /^\[/ {f=0}; f {print \$0}" <<< "$all_requirements") || - exit_with_error "Function get_requirements_from_group failed for group: $group_name" - fi - - [[ -n $requirements_from_group ]] || echol "No requirements found for group: $group_name" - - eval $1='$requirements_from_group' -} - -# params: -get_unique_array() { - local result_var_name="$1" - shift - local array=("$@") - - # filter out duplicates - array=($(echo "${array[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) - - eval $result_var_name='("${array[@]}")' -} - -# params: -import_repo_gpg_keys() { - local retries=${!#} # get last arg - local urls=( "${@:1:$# - 1}" ) # remove last arg - - for url in "${urls[@]}"; do - run_cmd_with_retries rpm --import "$url" "$retries" - done -} - -# params: [package_name] -install_package() { - local package_name_or_url="$1" - local package_name="$1" - - [ $# -gt 1 ] && package_name="$2" - - echol "Installing package: $package_name" - if yum install -y "$package_name_or_url"; then - echo "$package_name" >> "$INSTALLED_PACKAGES_FILE_PATH" - else - exit_with_error "Command failed: yum install -y \"$package_name_or_url\"" - fi -} - -# params: -is_package_installed() { - local package="$1" - - if rpm --query --quiet "$package"; then - echol "Package $package already installed" - return 0 - else - return 1 - fi -} - -# params: -is_repo_available() { - local repo_id="$1" - - echol "Checking if '$repo_id' repo is available" - yum -q --disablerepo=* --enablerepo="$repo_id" repoinfo > /dev/null # returns 1 when 'Error 404 - Not Found' -} - -# params: -is_repo_enabled() { - local repo_id="$1" - - if yum repolist | grep --quiet "$repo_id"; then - echol "Repository $repo_id already enabled" - return 0 - else - return 1 - fi -} - -# params: -remove_package() { - local package="$1" - - if rpm --query --quiet "$package"; then - echol "Removing package: $package" - yum remove -y "$package" || exit_with_error "Command failed: yum remove -y \"$package\"" - fi -} - -# params: -remove_added_repos() { - local yum_repos_backup_tar_file_path="$1" - - declare -A initial_yum_repo_files - for repo_config_file in $(tar -tf "$yum_repos_backup_tar_file_path" | grep '.repo$' | xargs -L 1 --no-run-if-empty basename); do - initial_yum_repo_files["$repo_config_file"]=1 - done - - for repo_config_file in $(find /etc/yum.repos.d/ -maxdepth 1 -type f -name '*.repo' -printf "%f\n"); do - if (( ${initial_yum_repo_files["$repo_config_file"]:-0} == 0)); then - # remove only if not owned by a package - if ! rpm --quiet --query --file "/etc/yum.repos.d/$repo_config_file"; then - remove_file "/etc/yum.repos.d/$repo_config_file" - fi - fi - done -} - -# params: -remove_file() { - local file_path="$1" - - echol "Removing file: $file_path" - rm -f "$file_path" || exit_with_error "Command failed: rm -f \"$file_path\"" -} - -# params: -remove_installed_packages() { - local installed_packages_list_file="$1" - - if [ -f "$installed_packages_list_file" ]; then - for package in $(cat $installed_packages_list_file | sort --unique); do - remove_package "$package" - done - remove_file "$installed_packages_list_file" - fi -} - -remove_yum_cache_for_untracked_repos() { - local basearch releasever - basearch=$(uname --machine) - releasever=$(rpm -q --provides "$(rpm -q --whatprovides 'system-release(releasever)')" | grep "system-release(releasever)" | cut -d ' ' -f 3) - local cachedir find_output - cachedir=$(grep --only-matching --perl-regexp '(?<=^cachedir=)[^#\n]+' /etc/yum.conf) - cachedir="${cachedir/\$basearch/$basearch}" - cachedir="${cachedir/\$releasever/$releasever}" - find_output=$(find "$cachedir" -mindepth 1 -maxdepth 1 -type d -exec basename '{}' ';') - local -a repos_with_cache=() - if [ -n "$find_output" ]; then - readarray -t repos_with_cache <<< "$find_output" - fi - local all_repos_output - all_repos_output=$(yum repolist -v all | grep --only-matching --perl-regexp '(?<=^Repo-id)[^/]+' | sed -e 's/^[[:space:]:]*//') - local -a all_repos=() - readarray -t all_repos <<< "$all_repos_output" - if (( ${#repos_with_cache[@]} > 0 )); then - for cached_repo in "${repos_with_cache[@]}"; do - if ! _in_array "$cached_repo" "${all_repos[@]}"; then - run_cmd rm -rf "$cachedir/$cached_repo" - fi - done - fi -} - -# Runs command as array with printing it, doesn't support commands with shell operators (such as pipe or redirection) -# params: [--no-exit-on-error] -run_cmd() { - local cmd_arr=("$@") - - local exit_on_error=1 - if [[ ${cmd_arr[-1]} == '--no-exit-on-error' ]]; then - exit_on_error=0 - cmd_arr=( "${cmd_arr[@]:0:$# - 1}" ) # remove last item - fi - - local escaped_string return_code - escaped_string=$(_print_array_as_shell_escaped_string "${cmd_arr[@]}") - echol "Executing: ${escaped_string}" - "${cmd_arr[@]}"; return_code=$? - if (( return_code != 0 )) && (( exit_on_error )); then - exit_with_error "Command failed: ${escaped_string}" - else - return $return_code - fi -} - -# Runs command with retries, doesn't support commands with shell operators (such as pipe or redirection) -# params: -run_cmd_with_retries() { - # pop 'retries' argument - local retries=${!#} # get last arg (indirect expansion) - set -- "${@:1:$#-1}" # set new "$@" - - local cmd_arr=("$@") - ( # sub-shell is used to limit scope for 'set +e' - set +e - trap - ERR # disable global trap locally - for ((i=0; i <= retries; i++)); do - run_cmd "${cmd_arr[@]}" '--no-exit-on-error' - return_code=$? - if (( return_code == 0 )); then - break - elif (( i < retries )); then - sleep 1 - echol "retrying ($(( i+1 ))/${retries})" - else - echol "ERROR: all attempts failed" - local escaped_string - escaped_string=$(_print_array_as_shell_escaped_string "${cmd_arr[@]}") - exit_with_error "Command failed: ${escaped_string}" - fi - done - return $return_code - ) -} - -usage() { - echo "usage: ./$(basename $0) [--no-logfile]" - echo "example: ./$(basename $0) /tmp/downloads" - exit 1 -} - -validate_bash_version() { - local major_version=${BASH_VERSINFO[0]} - local minor_version=${BASH_VERSINFO[1]} - local required_version=(4 2) # (minor major) - if (( major_version < ${required_version[0]} )) || (( minor_version < ${required_version[1]} )); then - exit_with_error "This script requires Bash version ${required_version[0]}.${required_version[1]} or higher." - fi -} - -# === Helper functions (in alphabetical order) === - -_get_shell_escaped_array() { - if (( $# > 0 )); then - printf '%q\n' "$@" - fi -} - -# params: -_in_array() { - local value=${1} - shift - local array=( "$@" ) - - (( ${#array[@]} > 0 )) && printf '%s\n' "${array[@]}" | grep -q -Fx "$value" -} - -# Prints string in format that can be reused as shell input (escapes non-printable characters) -_print_array_as_shell_escaped_string() { - local output - output=$(_get_shell_escaped_array "$@") - local escaped=() - if [ -n "$output" ]; then - readarray -t escaped <<< "$output" - fi - if (( ${#escaped[@]} > 0 )); then - printf '%s\n' "${escaped[*]}" - fi -} - -# === Start === - -validate_bash_version - -if [[ $# -lt 1 ]]; then - usage >&2 -fi - -readonly START_TIME=$(date +%s) - -# --- Parse arguments --- - -POSITIONAL_ARGS=() -CREATE_LOGFILE='yes' -while [[ $# -gt 0 ]]; do - case $1 in - --no-logfile) - CREATE_LOGFILE='no' - shift # past argument - ;; - *) # unknown option - POSITIONAL_ARGS+=("$1") # save it in an array for later - shift - ;; - esac -done -set -- "${POSITIONAL_ARGS[@]}" # restore positional arguments - -# --- Global variables --- - -# dirs -readonly DOWNLOADS_DIR="$1" # root directory for downloads -readonly FILES_DIR="${DOWNLOADS_DIR}/files" -readonly PACKAGES_DIR="${DOWNLOADS_DIR}/packages" -readonly IMAGES_DIR="${DOWNLOADS_DIR}/images" -readonly REPO_PREREQ_PACKAGES_DIR="${PACKAGES_DIR}/repo-prereqs" -readonly SCRIPT_DIR="$(dirname $(readlink -f $0))" # want absolute path - -# files -readonly SCRIPT_FILE_NAME=$(basename "$0") -readonly LOG_FILE_NAME="${SCRIPT_FILE_NAME}.log" -readonly LOG_FILE_PATH="${SCRIPT_DIR}/${LOG_FILE_NAME}" -readonly YUM_CONFIG_BACKUP_FILE_PATH="${SCRIPT_DIR}/${SCRIPT_FILE_NAME}-yum-repos-backup-tmp-do-not-remove.tar" -readonly CRANE_BIN="${SCRIPT_DIR}/crane" -readonly INSTALLED_PACKAGES_FILE_PATH="${SCRIPT_DIR}/${SCRIPT_FILE_NAME}-installed-packages-list-do-not-remove.tmp" -readonly PID_FILE_PATH="/var/run/${SCRIPT_FILE_NAME}.pid" -readonly ADD_MULTIARCH_REPOSITORIES_SCRIPT="${SCRIPT_DIR}/add-repositories.multiarch.sh" - -#arch -readonly ARCH=$(uname -m) -echol "Detected arch: ${ARCH}" -readonly REQUIREMENTS_FILE_PATH="${SCRIPT_DIR}/requirements.${ARCH}.txt" -readonly ADD_ARCH_REPOSITORIES_SCRIPT="${SCRIPT_DIR}/add-repositories.${ARCH}.sh" -case $ARCH in -x86_64) - readonly DOCKER_PLATFORM="linux/amd64" - ;; - -aarch64) - readonly DOCKER_PLATFORM="linux/arm64" - ;; - -*) - exit_with_error "Arch ${ARCH} unsupported" - ;; -esac -echol "Docker platform: ${DOCKER_PLATFORM}" - -# --- Checks --- - -[ $EUID -eq 0 ] || { echo "You have to run as root" && exit 1; } - -[[ -f $REQUIREMENTS_FILE_PATH ]] || exit_with_error "File not found: $REQUIREMENTS_FILE_PATH" - -# --- Want to have only one instance for Ansible --- - -if [ -f $PID_FILE_PATH ]; then - readonly PID_FROM_FILE=$(cat $PID_FILE_PATH 2> /dev/null) - if [[ -n $PID_FROM_FILE ]] && kill -0 $PID_FROM_FILE > /dev/null 2>&1; then - echol "Found running process with pid: $PID_FROM_FILE, cmd: $(ps -p $PID_FROM_FILE -o cmd=)" - if ps -p $PID_FROM_FILE -o cmd= | grep --quiet $SCRIPT_FILE_NAME; then - echol "Killing old instance using SIGTERM" - kill -s SIGTERM $PID_FROM_FILE # try gracefully - if sleep 3 && kill -0 $PID_FROM_FILE > /dev/null 2>&1; then - echol "Still running, killing old instance using SIGKILL" - kill -s SIGKILL $PID_FROM_FILE # forcefully - fi - else - remove_file $PID_FILE_PATH - exit_with_error "Process with pid: $PID_FILE_PATH seems to be not an instance of this script" - fi - else - echol "Process with pid: $PID_FROM_FILE not found" - fi - remove_file $PID_FILE_PATH -fi - -echol "PID is: $$, creating file: $PID_FILE_PATH" -echo $$ > $PID_FILE_PATH || exit_with_error "Command failed: echo $$ > $PID_FILE_PATH" - -# --- Parse requirements file --- - -# Requirements are grouped using sections: [packages-repo-prereqs], [packages], [files], [images] -get_requirements_from_group 'REPO_PREREQ_PACKAGES' 'packages-repo-prereqs' "$REQUIREMENTS_FILE_PATH" -get_requirements_from_group 'CRANE' 'crane' "$REQUIREMENTS_FILE_PATH" -get_requirements_from_group 'PACKAGES' 'packages' "$REQUIREMENTS_FILE_PATH" -get_requirements_from_group 'FILES' 'files' "$REQUIREMENTS_FILE_PATH" -get_requirements_from_group 'IMAGES' 'images' "$REQUIREMENTS_FILE_PATH" - -# === Packages === - -check_connection yum $(yum repolist --quiet | tail -n +2 | cut -d' ' -f1 | cut -d'/' -f1 | sed 's/^!//') - -# --- Backup yum repositories --- - -if [ -f "$YUM_CONFIG_BACKUP_FILE_PATH" ]; then - echol "Backup aleady exists: $YUM_CONFIG_BACKUP_FILE_PATH" -else - echol "Backuping /etc/yum.repos.d/ to $YUM_CONFIG_BACKUP_FILE_PATH" - if backup_files "$YUM_CONFIG_BACKUP_FILE_PATH" '/etc/yum.repos.d/'; then - echol "Backup done" - else - if [ -f "$YUM_CONFIG_BACKUP_FILE_PATH" ]; then - remove_file "$YUM_CONFIG_BACKUP_FILE_PATH" - fi - exit_with_error "Backup of yum repositories failed" - fi -fi - -# --- Restore system repositories in case epirepo is enabled - -enable_system_repos_script="/var/tmp/epi-repository-setup-scripts/enable-system-repos.sh" -disable_epirepo_client_script="/var/tmp/epi-repository-setup-scripts/disable-epirepo-client.sh" - -if [[ -f /etc/yum.repos.d/epirepo.repo ]]; then - if [[ -f /var/tmp/enabled-system-repos.txt && -f $enable_system_repos_script ]]; then - echol "OS repositories seems missing, restoring..." - $enable_system_repos_script || exit_with_error "Could not restore system repositories" - $disable_epirepo_client_script || exit_with_error "Could not disable epirepo" - else - echol "/var/tmp/enabled-system-repos.txt or $enable_system_repos_script seems missing, you either know what you're doing or you need to fix your repositories" - fi -fi - -# --- Install required packages unless present --- - -# repos can be enabled or disabled using the yum-config-manager command, which is provided by yum-utils package -for package in 'yum-utils' 'wget' 'curl' 'tar'; do - if ! is_package_installed "$package"; then - install_package "$package" - fi -done - -# --- Download and setup Crane for downloading images --- - -if [[ -z "${CRANE}" ]] || [ $(wc -l <<< "${CRANE}") -ne 1 ] ; then - exit_with_error "Crane binary download path undefined or more than one download path defined" -else - if [[ -x $CRANE_BIN ]]; then - echol "Crane binary already exists" - else - file_url=$(head -n 1 <<< "${CRANE}") - - check_connection wget $file_url - - echol "Downloading crane from: ${file_url}" - download_file "${file_url}" "${SCRIPT_DIR}" - tar_path="${SCRIPT_DIR}/${file_url##*/}" - echol "Unpacking crane from ${tar_path} to ${CRANE_BIN}" - run_cmd tar -xzf "${tar_path}" --directory "${SCRIPT_DIR}" "crane" --overwrite - [[ -x "${CRANE_BIN}" ]] || run_cmd chmod +x "${CRANE_BIN}" - remove_file "${tar_path}" - fi -fi - -# --- Enable CentOS repos --- - -# -> CentOS-7 - Extras # for container-selinux and centos-release-scl packages -enable_repo 'extras' -# -> CentOS-7 - Base # for python dependencies -enable_repo 'base' - -# --- Add repos --- - -# noarch repositories -. ${ADD_MULTIARCH_REPOSITORIES_SCRIPT} - -# arch specific repositories -. ${ADD_ARCH_REPOSITORIES_SCRIPT} -# -> Software Collections (SCL) https://wiki.centos.org/AdditionalResources/Repositories/SCL -if ! is_package_installed 'centos-release-scl'; then - # from extras repo - install_package 'centos-release-scl-rh' - install_package 'centos-release-scl' -fi - -# some packages are from EPEL repo -if ! is_package_installed 'epel-release'; then - install_package 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm' 'epel-release' -fi - -# clean metadata for upgrades (when the same package can be downloaded from changed repo) -run_cmd remove_yum_cache_for_untracked_repos - -run_cmd_with_retries yum -y makecache fast 3 - -# --- Download packages --- - -# 1) packages required to create repository - -create_directory "$REPO_PREREQ_PACKAGES_DIR" - -# prepare lists -PREREQ_PACKAGES=() -for package in $REPO_PREREQ_PACKAGES; do - echol "Processing package: $package" - get_package_with_version_arch 'QUERY_OUTPUT' "$package" - PREREQ_PACKAGES+=("$QUERY_OUTPUT") -done - -# download requirements (fixed versions) -if [[ ${#PREREQ_PACKAGES[@]} -gt 0 ]]; then - echol "Downloading repository prerequisite packages (${#PREREQ_PACKAGES[@]})..." - download_packages "$REPO_PREREQ_PACKAGES_DIR" "${PREREQ_PACKAGES[@]}" -fi - -# 2) non-prerequisite packages - -create_directory "$PACKAGES_DIR" - -# prepare lists -NON_PREREQ_PACKAGES=() -DEPENDENCIES_OF_NON_PREREQ_PACKAGES=() -for package in $PACKAGES; do - echol "Processing package: $package" - get_package_with_version_arch 'QUERY_OUTPUT' "$package" - NON_PREREQ_PACKAGES+=("$QUERY_OUTPUT") - get_package_dependencies_with_arch 'DEPENDENCIES' "$package" - if [[ ${#DEPENDENCIES[@]} -gt 0 ]]; then - for dependency in "${DEPENDENCIES[@]}"; do - DEPENDENCIES_OF_NON_PREREQ_PACKAGES+=("$dependency") - done - fi -done - -if [[ ${#NON_PREREQ_PACKAGES[@]} -gt 0 ]]; then - # download requirements (fixed versions) - echol "Downloading packages (${#NON_PREREQ_PACKAGES[@]})..." - download_packages "$PACKAGES_DIR" "${NON_PREREQ_PACKAGES[@]}" - # download dependencies (latest versions) - get_unique_array 'DEPENDENCIES' "${DEPENDENCIES_OF_NON_PREREQ_PACKAGES[@]}" - get_packages_with_version_arch 'DEPENDENCIES' "${DEPENDENCIES[@]}" - echol "Downloading dependencies of packages (${#DEPENDENCIES[@]})..." - download_packages "$PACKAGES_DIR" "${DEPENDENCIES[@]}" -fi - -# --- Clean up yum repos --- - -remove_added_repos "$YUM_CONFIG_BACKUP_FILE_PATH" - -# --- Restore yum repos --- - -echol "Restoring /etc/yum.repos.d/*.repo from: $YUM_CONFIG_BACKUP_FILE_PATH" -echol "Executing: tar --extract --verbose --file $YUM_CONFIG_BACKUP_FILE_PATH" -if tar --extract --verbose --file "$YUM_CONFIG_BACKUP_FILE_PATH" --directory /etc/yum.repos.d \ - --strip-components=2 'etc/yum.repos.d/*.repo'; then - echol "Restored: yum repositories" -else - exit_with_error "Extracting tar failed: $YUM_CONFIG_BACKUP_FILE_PATH" -fi - -# === Files === - -check_connection wget $FILES - -create_directory "$FILES_DIR" - -if [[ -z "$FILES" ]]; then - echol "No files to download" -else - # list of all files that will be downloaded - echol "Files to be downloaded:" - cat -n <<< "${FILES}" - - printf "\n" - - while IFS=' ' read -r url new_filename; do - # download files, check if new filename is provided - if [[ -z $new_filename ]]; then - download_file "$url" "$FILES_DIR" - elif [[ $new_filename = *" "* ]]; then - exit_with_error "wrong new filename for file: $url" - else - download_file "$url" "$FILES_DIR" "$new_filename" - fi - done <<< "$FILES" -fi - -# === Images === - -check_connection crane $(for image in $IMAGES; do splitted=(${image//:/ }); echo "${splitted[0]}"; done) - -create_directory "$IMAGES_DIR" - -for image in $IMAGES; do - download_image "$image" "$IMAGES_DIR" -done - -# --- Clean up --- - -remove_installed_packages "$INSTALLED_PACKAGES_FILE_PATH" - -remove_file "$YUM_CONFIG_BACKUP_FILE_PATH" - -remove_file "$PID_FILE_PATH" - -readonly END_TIME=$(date +%s) - -echol "$SCRIPT_FILE_NAME finished, execution time: $(date -u -d @$((END_TIME-START_TIME)) +'%Hh:%Mm:%Ss')" diff --git a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/requirements.aarch64.txt b/ansible/playbooks/roles/repository/files/download-requirements/centos-7/requirements.aarch64.txt deleted file mode 100644 index c75b511532..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/requirements.aarch64.txt +++ /dev/null @@ -1,253 +0,0 @@ -# Put this file in the same directory as download script - -[packages-repo-prereqs] -apr # for httpd -apr-util # for httpd -centos-logos -createrepo -deltarpm # for createrepo -httpd -httpd-tools # for httpd -libxml2 # for libxml2-python -libxml2-python # for createrepo -mailcap # for httpd -mod_ssl # for httpd -python-chardet # for createrepo -python-deltarpm # for createrepo -python-kitchen # for createrepo -yum-utils - -[crane] -https://github.com/google/go-containerregistry/releases/download/v0.4.1/go-containerregistry_Linux_arm64.tar.gz - -[packages] -audit # for docker-ce -bash-completion -ca-certificates -cifs-utils -conntrack-tools # for kubelet -containerd.io-1.4.12 -container-selinux -#cri-tools-1.13.0 -curl -dejavu-sans-fonts # for grafana -docker-ce-20.10.8 -docker-ce-cli-20.10.8 -docker-ce-rootless-extras-20.10.8 -ebtables -elasticsearch-oss-7.10.2 # for opendistroforelasticsearch & logging roles -ethtool -filebeat-7.9.2 -firewalld -fontconfig # for grafana -fping -fuse-overlayfs # for docker-ce-rootless-extras -gnutls # for cifs-utils -gssproxy # for nfs-utils -htop -iftop -ipset # for firewalld -java-1.8.0-openjdk-headless -javapackages-tools # for java-1.8.0-openjdk-headless -jq -libini_config # for nfs-utils -libselinux-python -libsemanage-python -libX11 # for grafana -libxcb # for grafana -libXcursor # for grafana -libXt # for grafana -logrotate -net-tools -nfs-utils -nmap-ncat -# Open Distro for Elasticsearch plugins are installed individually to not download them twice in different versions (as dependencies of opendistroforelasticsearch package) -opendistro-alerting-1.13.1.* -opendistro-index-management-1.13.1.* -opendistro-job-scheduler-1.13.0.* -opendistro-performance-analyzer-1.13.0.* -opendistro-security-1.13.1.* -opendistro-sql-1.13.0.* -opendistroforelasticsearch-kibana-1.13.1 # kibana has shorter version -openssl -unixODBC # for erlang -perl # for vim -perl-Getopt-Long # for vim -perl-libs # for vim -perl-Pod-Perldoc # for vim -perl-Pod-Simple # for vim -perl-Pod-Usage # for vim -pgaudit15_13-1.5.0 -policycoreutils-python # for container-selinux -pyldb # for cifs-utils -python-cffi # for python2-cryptography -python-firewall # for firewalld -python-kitchen # for yum-utils -python-lxml # for java-1.8.0-openjdk-headless -python-psycopg2 -python-pycparser # for python2-cryptography -python-setuptools -python-slip-dbus # for firewalld -python-ipaddress -python-backports -python2-cryptography # for Ansible (certificate modules) -python3-3.6.8 -quota # for nfs-utils -rabbitmq-server-3.8.9 -postgresql13-server -samba-client -samba-client-libs # for samba-client -samba-common -samba-libs # for cifs-utils -sysstat -tar -telnet -tmux -urw-base35-fonts # for grafana -unzip -vim-common # for vim -vim-enhanced -wget -xorg-x11-font-utils # for grafana -xorg-x11-server-utils # for grafana -yum-plugin-versionlock -yum-utils - -# to make remote-to-remote "synchronize" work in ansible -rsync - -# K8s v1.18.6 (Epiphany >= v0.7.1) -kubeadm-1.18.6 -kubectl-1.18.6 -kubelet-1.18.6 - -# K8s v1.19.15 (Epiphany >= v1.3, transitional version) -kubeadm-1.19.15 -kubectl-1.19.15 -kubelet-1.19.15 - -# K8s v1.20.12 (Epiphany >= v1.3, transitional version) -kubeadm-1.20.12 -kubectl-1.20.12 -kubelet-1.20.12 - -# K8s v1.21.7 (Epiphany >= v1.3, transitional version) -kubeadm-1.21.7 -kubectl-1.21.7 -kubelet-1.21.7 - -# K8s v1.22.4 -kubeadm-1.22.4 -kubectl-1.22.4 -kubelet-1.22.4 - -# Kubernetes Generic -kubernetes-cni-0.8.6-0 # since K8s v1.18.6 -# https://github.com/kubernetes/kubernetes/blob/v1.19.15/build/dependencies.yaml -kubernetes-cni-0.8.7-0 # since K8s v1.19.15 - -[files] -# --- Packages --- -# Github repository for erlang rpm is used since packagecloud repository is limited to a certain number of versions and erlang package from erlang-solutions repository is much more complex and bigger -https://packages.erlang-solutions.com/erlang/rpm/centos/7/aarch64/esl-erlang_23.1.5-1~centos~7_arm64.rpm -# Grafana package is not downloaded from repository since it was not reliable (issue #2449) -https://dl.grafana.com/oss/release/grafana-8.3.2-1.aarch64.rpm -# --- Exporters --- -https://github.com/danielqsj/kafka_exporter/releases/download/v1.4.0/kafka_exporter-1.4.0.linux-arm64.tar.gz -https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar -https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-arm64.tar.gz -https://github.com/prometheus-community/postgres_exporter/releases/download/v0.10.0/postgres_exporter-0.10.0.linux-arm64.tar.gz -# --- Misc --- -https://archive.apache.org/dist/kafka/2.6.0/kafka_2.12-2.6.0.tgz -https://github.com/prometheus/prometheus/releases/download/v2.31.1/prometheus-2.31.1.linux-arm64.tar.gz -https://github.com/prometheus/alertmanager/releases/download/v0.23.0/alertmanager-0.23.0.linux-arm64.tar.gz -https://archive.apache.org/dist/zookeeper/zookeeper-3.5.8/apache-zookeeper-3.5.8-bin.tar.gz -https://get.helm.sh/helm-v3.2.0-linux-arm64.tar.gz -https://archive.apache.org/dist/logging/log4j/2.17.1/apache-log4j-2.17.1-bin.tar.gz -# --- Helm charts --- -https://charts.bitnami.com/bitnami/node-exporter-2.3.17.tgz -https://helm.elastic.co/helm/filebeat/filebeat-7.9.2.tgz -# --- Grafana Dashboards --- -# Kubernetes Cluster -https://grafana.com/api/dashboards/7249/revisions/1/download grafana_dashboard_7249.json -# Kubernetes cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/315/revisions/3/download grafana_dashboard_315.json -# Node Exporter for Prometheus -https://grafana.com/api/dashboards/11074/revisions/9/download grafana_dashboard_11074.json -# Node Exporter Server Metrics -https://grafana.com/api/dashboards/405/revisions/8/download grafana_dashboard_405.json -# Postgres Overview -https://grafana.com/api/dashboards/455/revisions/2/download grafana_dashboard_455.json -# PostgreSQL Database -https://grafana.com/api/dashboards/9628/revisions/7/download grafana_dashboard_9628.json -# RabbitMQ Monitoring -https://grafana.com/api/dashboards/4279/revisions/4/download grafana_dashboard_4279.json -# Node Exporter Full -https://grafana.com/api/dashboards/1860/revisions/23/download grafana_dashboard_1860.json -# Kafka Exporter Overview -https://grafana.com/api/dashboards/7589/revisions/5/download grafana_dashboard_7589.json -# HaProxy backend (or frontend/servers) -https://grafana.com/api/dashboards/789/revisions/1/download grafana_dashboard_789.json -# Docker and Host Monitoring w/ Prometheus -https://grafana.com/api/dashboards/179/revisions/7/download grafana_dashboard_179.json -# Kubernetes pod and cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/6663/revisions/1/download grafana_dashboard_6663.json -# RabbitMQ cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/10991/revisions/11/download grafana_dashboard_10991.json - -[images] -haproxy:2.2.2-alpine -kubernetesui/dashboard:v2.3.1 -kubernetesui/metrics-scraper:v1.0.7 -registry:2 -# applications -epiphanyplatform/keycloak:14.0.0 -rabbitmq:3.8.9 -# K8s -## v1.18.6 -k8s.gcr.io/kube-apiserver:v1.18.6 -k8s.gcr.io/kube-controller-manager:v1.18.6 -k8s.gcr.io/kube-scheduler:v1.18.6 -k8s.gcr.io/kube-proxy:v1.18.6 -k8s.gcr.io/coredns:1.6.7 -k8s.gcr.io/etcd:3.4.3-0 -quay.io/coreos/flannel:v0.12.0-arm64 -quay.io/coreos/flannel:v0.12.0 -calico/cni:v3.15.0 -calico/kube-controllers:v3.15.0 -calico/node:v3.15.0 -calico/pod2daemon-flexvol:v3.15.0 -## v1.19.15 -k8s.gcr.io/kube-apiserver:v1.19.15 -k8s.gcr.io/kube-controller-manager:v1.19.15 -k8s.gcr.io/kube-scheduler:v1.19.15 -k8s.gcr.io/kube-proxy:v1.19.15 -## v1.20.12 -k8s.gcr.io/kube-apiserver:v1.20.12 -k8s.gcr.io/kube-controller-manager:v1.20.12 -k8s.gcr.io/kube-scheduler:v1.20.12 -k8s.gcr.io/kube-proxy:v1.20.12 -k8s.gcr.io/coredns:1.7.0 -k8s.gcr.io/pause:3.2 -## v1.21.7 -k8s.gcr.io/kube-apiserver:v1.21.7 -k8s.gcr.io/kube-controller-manager:v1.21.7 -k8s.gcr.io/kube-scheduler:v1.21.7 -k8s.gcr.io/kube-proxy:v1.21.7 -k8s.gcr.io/coredns/coredns:v1.8.0 -k8s.gcr.io/etcd:3.4.13-0 -k8s.gcr.io/pause:3.4.1 -## v1.22.4 -k8s.gcr.io/kube-apiserver:v1.22.4 -k8s.gcr.io/kube-controller-manager:v1.22.4 -k8s.gcr.io/kube-scheduler:v1.22.4 -k8s.gcr.io/kube-proxy:v1.22.4 -k8s.gcr.io/coredns/coredns:v1.8.4 -k8s.gcr.io/etcd:3.5.0-0 -k8s.gcr.io/pause:3.5 -quay.io/coreos/flannel:v0.14.0-arm64 -quay.io/coreos/flannel:v0.14.0 -calico/cni:v3.20.3 -calico/kube-controllers:v3.20.3 -calico/node:v3.20.3 -calico/pod2daemon-flexvol:v3.20.3 diff --git a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/requirements.x86_64.txt b/ansible/playbooks/roles/repository/files/download-requirements/centos-7/requirements.x86_64.txt deleted file mode 100644 index fa77cfea97..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/centos-7/requirements.x86_64.txt +++ /dev/null @@ -1,256 +0,0 @@ -# Put this file in the same directory as download script - -[packages-repo-prereqs] -apr # for httpd -apr-util # for httpd -centos-logos -createrepo -deltarpm # for createrepo -httpd -httpd-tools # for httpd -libxml2 # for libxml2-python -libxml2-python # for createrepo -mailcap # for httpd -mod_ssl # for httpd -python-chardet # for createrepo -python-deltarpm # for createrepo -python-kitchen # for createrepo -yum-utils - -[crane] -https://github.com/google/go-containerregistry/releases/download/v0.4.1/go-containerregistry_Linux_x86_64.tar.gz - -[packages] -audit # for docker-ce -bash-completion -ca-certificates -cifs-utils -conntrack-tools # for kubelet -containerd.io-1.4.12 -container-selinux -cri-tools-1.13.0 -curl -dejavu-sans-fonts # for grafana -docker-ce-20.10.8 -docker-ce-cli-20.10.8 -docker-ce-rootless-extras-20.10.8 -ebtables -elasticsearch-curator-5.8.3 -elasticsearch-oss-7.10.2 # for opendistroforelasticsearch & logging roles -ethtool -filebeat-7.9.2 -firewalld -fontconfig # for grafana -fping -fuse-overlayfs # for docker-ce-rootless-extras -gnutls # for cifs-utils -gssproxy # for nfs-utils -htop -iftop -ipset # for firewalld -java-1.8.0-openjdk-headless -javapackages-tools # for java-1.8.0-openjdk-headless -jq -libini_config # for nfs-utils -libselinux-python -libsemanage-python -libX11 # for grafana -libxcb # for grafana -libXcursor # for grafana -libXt # for grafana -logrotate -net-tools -nfs-utils -nmap-ncat -# Open Distro for Elasticsearch plugins are installed individually to not download them twice in different versions (as dependencies of opendistroforelasticsearch package) -opendistro-alerting-1.13.1.* -opendistro-index-management-1.13.1.* -opendistro-job-scheduler-1.13.0.* -opendistro-performance-analyzer-1.13.0.* -opendistro-security-1.13.1.* -opendistro-sql-1.13.0.* -opendistroforelasticsearch-kibana-1.13.1 # kibana has shorter version -openssl -perl # for vim -perl-Getopt-Long # for vim -perl-libs # for vim -perl-Pod-Perldoc # for vim -perl-Pod-Simple # for vim -perl-Pod-Usage # for vim -pgaudit15_13-1.5.0 -policycoreutils-python # for container-selinux -pyldb # for cifs-utils -python-cffi # for python2-cryptography -python-firewall # for firewalld -python-kitchen # for yum-utils -python-lxml # for java-1.8.0-openjdk-headless -python-psycopg2 -python-pycparser # for python2-cryptography -python-setuptools -python-slip-dbus # for firewalld -python-ipaddress -python-backports -python2-cryptography # for Ansible (certificate modules) -python3-3.6.8 -quota # for nfs-utils -rabbitmq-server-3.8.9 -postgresql13-server -repmgr10-5.2.1 # used to upgrade only repmgr -repmgr13-5.2.1 -samba-client -samba-client-libs # for samba-client -samba-common -samba-libs # for cifs-utils -sysstat -tar -telnet -tmux -urw-base35-fonts # for grafana -unzip -vim-common # for vim -vim-enhanced -wget -xorg-x11-font-utils # for grafana -xorg-x11-server-utils # for grafana -yum-plugin-versionlock -yum-utils - -# to make remote-to-remote "synchronize" work in ansible -rsync - -# K8s v1.18.6 (Epiphany >= v0.7.1) -kubeadm-1.18.6 -kubectl-1.18.6 -kubelet-1.18.6 - -# K8s v1.19.15 (Epiphany >= v1.3, transitional version) -kubeadm-1.19.15 -kubectl-1.19.15 -kubelet-1.19.15 - -# K8s v1.20.12 (Epiphany >= v1.3, transitional version) -kubeadm-1.20.12 -kubectl-1.20.12 -kubelet-1.20.12 - -# K8s v1.21.7 (Epiphany >= v1.3, transitional version) -kubeadm-1.21.7 -kubectl-1.21.7 -kubelet-1.21.7 - -# K8s v1.22.4 -kubeadm-1.22.4 -kubectl-1.22.4 -kubelet-1.22.4 - -# Kubernetes Generic -kubernetes-cni-0.8.6-0 # since K8s v1.18.6 -kubernetes-cni-0.8.7-0 # since K8s v1.19.15 - -[files] -# --- Packages --- -# Github repository for erlang rpm is used since packagecloud repository is limited to a certain number of versions and erlang package from erlang-solutions repository is much more complex and bigger -https://github.com/rabbitmq/erlang-rpm/releases/download/v23.1.5/erlang-23.1.5-1.el7.x86_64.rpm -# Grafana package is not downloaded from repository since it was not reliable (issue #2449) -https://dl.grafana.com/oss/release/grafana-8.3.2-1.x86_64.rpm -# --- Exporters --- -https://github.com/danielqsj/kafka_exporter/releases/download/v1.4.0/kafka_exporter-1.4.0.linux-amd64.tar.gz -https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar -https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz -https://github.com/prometheus-community/postgres_exporter/releases/download/v0.10.0/postgres_exporter-0.10.0.linux-amd64.tar.gz -# --- Misc --- -https://archive.apache.org/dist/kafka/2.6.0/kafka_2.12-2.6.0.tgz -https://github.com/prometheus/prometheus/releases/download/v2.31.1/prometheus-2.31.1.linux-amd64.tar.gz -https://github.com/prometheus/alertmanager/releases/download/v0.23.0/alertmanager-0.23.0.linux-amd64.tar.gz -https://archive.apache.org/dist/zookeeper/zookeeper-3.5.8/apache-zookeeper-3.5.8-bin.tar.gz -https://get.helm.sh/helm-v3.2.0-linux-amd64.tar.gz -https://archive.apache.org/dist/logging/log4j/2.17.1/apache-log4j-2.17.1-bin.tar.gz -# --- Helm charts --- -https://helm.elastic.co/helm/filebeat/filebeat-7.9.2.tgz -https://charts.bitnami.com/bitnami/node-exporter-2.3.17.tgz -# --- Grafana Dashboards --- -# Kubernetes Cluster -https://grafana.com/api/dashboards/7249/revisions/1/download grafana_dashboard_7249.json -# Kubernetes cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/315/revisions/3/download grafana_dashboard_315.json -# Node Exporter for Prometheus -https://grafana.com/api/dashboards/11074/revisions/9/download grafana_dashboard_11074.json -# Node Exporter Server Metrics -https://grafana.com/api/dashboards/405/revisions/8/download grafana_dashboard_405.json -# Postgres Overview -https://grafana.com/api/dashboards/455/revisions/2/download grafana_dashboard_455.json -# PostgreSQL Database -https://grafana.com/api/dashboards/9628/revisions/7/download grafana_dashboard_9628.json -# RabbitMQ Monitoring -https://grafana.com/api/dashboards/4279/revisions/4/download grafana_dashboard_4279.json -# Node Exporter Full -https://grafana.com/api/dashboards/1860/revisions/23/download grafana_dashboard_1860.json -# Kafka Exporter Overview -https://grafana.com/api/dashboards/7589/revisions/5/download grafana_dashboard_7589.json -# HaProxy backend (or frontend/servers) -https://grafana.com/api/dashboards/789/revisions/1/download grafana_dashboard_789.json -# Docker and Host Monitoring w/ Prometheus -https://grafana.com/api/dashboards/179/revisions/7/download grafana_dashboard_179.json -# Kubernetes pod and cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/6663/revisions/1/download grafana_dashboard_6663.json -# RabbitMQ cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/10991/revisions/11/download grafana_dashboard_10991.json - -[images] -haproxy:2.2.2-alpine -kubernetesui/dashboard:v2.3.1 -kubernetesui/metrics-scraper:v1.0.7 -registry:2 -# applications -bitnami/pgpool:4.2.4 -bitnami/pgbouncer:1.16.0 -epiphanyplatform/keycloak:14.0.0 -rabbitmq:3.8.9 -# K8s -## v1.18.6 -k8s.gcr.io/kube-apiserver:v1.18.6 -k8s.gcr.io/kube-controller-manager:v1.18.6 -k8s.gcr.io/kube-scheduler:v1.18.6 -k8s.gcr.io/kube-proxy:v1.18.6 -k8s.gcr.io/coredns:1.6.7 -k8s.gcr.io/etcd:3.4.3-0 -quay.io/coreos/flannel:v0.12.0-amd64 -quay.io/coreos/flannel:v0.12.0 -calico/cni:v3.15.0 -calico/kube-controllers:v3.15.0 -calico/node:v3.15.0 -calico/pod2daemon-flexvol:v3.15.0 -## v1.19.15 -k8s.gcr.io/kube-apiserver:v1.19.15 -k8s.gcr.io/kube-controller-manager:v1.19.15 -k8s.gcr.io/kube-scheduler:v1.19.15 -k8s.gcr.io/kube-proxy:v1.19.15 -## v1.20.12 -k8s.gcr.io/kube-apiserver:v1.20.12 -k8s.gcr.io/kube-controller-manager:v1.20.12 -k8s.gcr.io/kube-scheduler:v1.20.12 -k8s.gcr.io/kube-proxy:v1.20.12 -k8s.gcr.io/coredns:1.7.0 -k8s.gcr.io/pause:3.2 -## v1.21.7 -k8s.gcr.io/kube-apiserver:v1.21.7 -k8s.gcr.io/kube-controller-manager:v1.21.7 -k8s.gcr.io/kube-scheduler:v1.21.7 -k8s.gcr.io/kube-proxy:v1.21.7 -k8s.gcr.io/coredns/coredns:v1.8.0 -k8s.gcr.io/etcd:3.4.13-0 -k8s.gcr.io/pause:3.4.1 -## v1.22.4 -k8s.gcr.io/kube-apiserver:v1.22.4 -k8s.gcr.io/kube-controller-manager:v1.22.4 -k8s.gcr.io/kube-scheduler:v1.22.4 -k8s.gcr.io/kube-proxy:v1.22.4 -k8s.gcr.io/coredns/coredns:v1.8.4 -k8s.gcr.io/etcd:3.5.0-0 -k8s.gcr.io/pause:3.5 -quay.io/coreos/flannel:v0.14.0-amd64 -quay.io/coreos/flannel:v0.14.0 -calico/cni:v3.20.3 -calico/kube-controllers:v3.20.3 -calico/node:v3.20.3 -calico/pod2daemon-flexvol:v3.20.3 diff --git a/ansible/playbooks/roles/repository/files/download-requirements/common/common_functions.sh b/ansible/playbooks/roles/repository/files/download-requirements/common/common_functions.sh deleted file mode 100644 index b9c7534993..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/common/common_functions.sh +++ /dev/null @@ -1,246 +0,0 @@ -last_error='' # holds last occurred error msg - - -echol() { -# -# Print to stdout, optionally to a log file. -# Requires $CREATE_LOGFILE and $LOG_FILE_PATH to be defined. -# -# :param $@: args to be printed -# - echo -e "$@" - if [[ $CREATE_LOGFILE == "yes" ]]; then - local timestamp=$(date +"%b %e %H:%M:%S") - echo -e "${timestamp}: $@" >> "$LOG_FILE_PATH" - fi -} - - -exit_with_error() { - echol $@ - exit 1 -} - - -__at_least_one_test_pass() { -# -# Iterate over all arguments each time call test $function and check result. -# If at least one call passes, function will yield success. -# -# :param $1: test function -# :param $@: arguments to be tested -# :return: 0 - success, 1 - failure - local function=$1 - shift - - local args=$@ - local total_count=$# - local failed_count=0 - - for arg in $args; do - echol "- $arg..." - $function $arg - - (( $? == 0 )) && return 0 - - failed_count=$(( $failed_count + 1 )) - done - - (( $total_count != $failed_count )) || return 1 - return 0 -} - - -__test_address_curl() { -# -# Test address connection without downloading any resource. -# -# :param $1: url to be tested -# :return: curl exit value -# - last_error=$(curl --show-error --silent $1 2>&1 >/dev/null) - return $? -} - - -__check_curl() { -# -# Use curl in silent mode to check if target `url` is available. -# -# :param $@: urls to be tested -# :return: 0 - success, 1 - failure -# - echol "Testing curl connection:" - - (( $# > 0 )) || exit_with_error "_check_curl: no url provided" - - __at_least_one_test_pass __test_address_curl $@ -} - - -__test_address_wget() { -# -# Test address connection without downloading any resource. -# -# :param $1: url to be tested -# :return: wget exit value -# - last_error=$(wget --spider $1 2>&1 >/dev/null) - return $? -} - - -__check_wget() { -# -# Use wget in spider mode (without downloading resources) to check if target `url` -# is available. -# -# :param $@: urls to be tested -# :return: 0 - success, 1 - failure -# - echol "Testing wget connection:" - - (( $# > 0 )) || exit_with_error "_check_wget: no url provided" - - __at_least_one_test_pass __test_address_wget $@ -} - - -__test_apt_repo() { -# -# Update a single repository. -# -# :param $1: repository to be updated -# :return: apt return value -# - last_error=$(apt update -o Dir::Etc::sourcelist=$1 2>&1 >/dev/null) - local ret_val=$? - - (( $ret_val == 0 )) || return $ret_val - - # make sure that there were no error messages: - [[ -z $(echo "$last_error" | tr '[:upper:]' '[:lower:]' | grep 'err\|fail') ]] || return 1 - - return 0 -} - - -__check_apt() { -# -# Use `apt update` to make sure that there is connection to repositories. -# -# :param $@: repos to be tested -# :return: 0 - success, 1 - failure -# - echol "Testing apt connection:" - - (( $# > 0 )) || exit_with_error "_check_apt: no repositories provided" - local repos=$@ - - (( $UID == 0 )) || exit_with_error "apt needs to be run as a root" - - __at_least_one_test_pass __test_apt_repo $repos - return $? -} - - -__test_yum_repo() { -# -# List packages from a single repository. -# -# :param $1: repository to be listed -# :return: yum return value -# - last_error=$(yum --quiet --disablerepo=* --enablerepo=$1 list available 2>&1 >/dev/null) - return $? -} - - -__check_yum() { -# -# Use `yum list` to make sure that there is connection to repositories. -# Query available packages for each repository. -# -# :param $@: repositories to be tested -# :return: 0 - success, 1 - failure -# - echol "Testing yum connection:" - - (( $# > 0 )) || exit_with_error "_check_yum: no repositories provided" - local repos=$@ - - __at_least_one_test_pass __test_yum_repo $repos - return $? -} - - -__test_crane_repo() { -# -# List packages from a single repository. -# Requires $CRANE_BIN to be defined -# -# :param $1: repository to be listed -# :return: crane return value -# - last_error=$($CRANE_BIN ls $1 2>&1 >/dev/null) - return $? -} - - -__check_crane() { -# -# Use `crane ls` to make sure that there is connection to repositories. -# Query available packages for each repository. -# -# :param $@: repositories to be tested -# :return: 0 - success, 1 - failure -# - echol "Testing crane connection:" - - (( $# > 0 )) || exit_with_error "_check_crane: no repository provided" - local repos=$@ - - __at_least_one_test_pass __test_crane_repo $repos - return $? -} - - -# Tools which can be tested: -declare -A tools=( -[curl]=__check_curl -[wget]=__check_wget -[apt]=__check_apt -[yum]=__check_yum -[crane]=__check_crane -) - - -check_connection() { -# -# Run connection test for target `tool` with optional `url` parameter. -# Requires $internet_access_checks_enabled to be defined. -# -# :param $1: which `tool` to test -# :param $@: optional parameters used by some tools such as `url` -# - [[ $internet_access_checks_enabled == "no" ]] && return 0 - - [[ $# -lt 1 ]] && exit_with_error '"tool" argument not provided' - local tool=$1 - - shift # discard tool variable - - [[ ! -n ${tools[$tool]} ]] && exit_with_error "no such tool: \"$tool\"" - - ( # disable -e in order to handle non-zero return values - set +e - - ${tools[$tool]} $@ - - if (( $? == 0 )); then - echol "Connection successful." - else - exit_with_error "Connection failure, reason: [$last_error]" - fi - ) -} diff --git a/ansible/playbooks/roles/repository/files/download-requirements/download-requirements.py b/ansible/playbooks/roles/repository/files/download-requirements/download-requirements.py new file mode 100644 index 0000000000..ea7bf10654 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/download-requirements.py @@ -0,0 +1,92 @@ +#!/usr/bin/python3 +import datetime +import logging +from os import execv, getuid +from typing import List + +import sys + +from src.command.toolchain import TOOLCHAINS +from src.config import Config, OSType +from src.error import CriticalError + + +def install_missing_modules(config: Config): + """ + Install 3rd party missing modules. + Used for offline mode. + """ + tools = TOOLCHAINS[config.os_type](config.retries) + config.pip_installed = tools.ensure_pip() + config.poyo_installed = tools.pip.install('poyo', '==0.5.0', user=True) + + if config.poyo_installed: + logging.debug('Installed `poyo==0.5.0` library') + + +def rerun_download_requirements(config: Config): + """ + Rerun download-requirements after installing missing modules. + This step is required because python interpreter needs to reload modules. + Used for offline mode. + """ + additional_args: List[str] = ['--rerun'] + + # carry over info about installed 3rd party tools and modules: + if config.pip_installed: + additional_args.append('--pip-installed') + + if config.poyo_installed: + additional_args.append('--poyo-installed') + + execv(__file__, sys.argv + additional_args) + + +def cleanup(config: Config): + """ + Remove any 3rd party modules and tools. + Used for offline mode. + """ + tools = TOOLCHAINS[config.os_type](config.retries) + + if config.poyo_installed: + logging.info('Uninstalling 3rd party python modules:') + tools.pip.uninstall('poyo', '==0.5.0') + logging.info('Done.') + + if config.pip_installed: + logging.info('Uninstalling pip3...') + tools.uninstall_pip() + logging.info('Done.') + + +def main(argv: List[str]) -> int: + try: + time_begin = datetime.datetime.now() + + if getuid() != 0: + print('Error: Needs to be run as root.') + return 1 + + config = Config(argv) + + try: # make sure that 3rd party modules are installed + from src.run import run + run(config) + + except ModuleNotFoundError: + install_missing_modules(config) + rerun_download_requirements(config) + + cleanup(config) + + time_end = datetime.datetime.now() - time_begin + logging.info(f'Total execution time: {str(time_end).split(".")[0]}') + except CriticalError: + return 1 + + return 0 + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/add-repositories.aarch64.sh b/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/add-repositories.aarch64.sh deleted file mode 100644 index 637aadfc83..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/add-repositories.aarch64.sh +++ /dev/null @@ -1 +0,0 @@ -#!/usr/bin/env bash -eu diff --git a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/add-repositories.multiarch.sh b/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/add-repositories.multiarch.sh deleted file mode 100644 index c59a6a3e74..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/add-repositories.multiarch.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env bash -eu - -DOCKER_CE_PATCHED_REPO_CONF=$(cat <<'EOF' -[docker-ce-stable-patched] -name=Docker CE Stable - patched centos/7/$basearch/stable -baseurl=https://download.docker.com/linux/centos/7/$basearch/stable -enabled=1 -gpgcheck=1 -gpgkey=https://download.docker.com/linux/centos/gpg -EOF -) - -ELASTIC_6_REPO_CONF=$(cat <<'EOF' -[elastic-6] -name=Elastic repository for 6.x packages -baseurl=https://artifacts.elastic.co/packages/oss-6.x/yum -gpgcheck=1 -gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch -enabled=1 -autorefresh=1 -type=rpm-md -EOF -) - -ELASTICSEARCH_7_REPO_CONF=$(cat <<'EOF' -[elasticsearch-7.x] -name=Elasticsearch repository for 7.x packages -baseurl=https://artifacts.elastic.co/packages/oss-7.x/yum -gpgcheck=1 -gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch -enabled=1 -autorefresh=1 -type=rpm-md -EOF -) - -ELASTICSEARCH_CURATOR_REPO_CONF=$(cat <<'EOF' -[curator-5] -name=CentOS/RHEL 7 repository for Elasticsearch Curator 5.x packages -baseurl=https://packages.elastic.co/curator/5/centos/7 -gpgcheck=1 -gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch -enabled=1 -EOF -) - -KUBERNETES_REPO_CONF=$(cat <<'EOF' -[kubernetes] -name=Kubernetes -baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg -EOF -) - -OPENDISTRO_REPO_CONF=$(cat <<'EOF' -[opendistroforelasticsearch-artifacts-repo] -name=Release RPM artifacts of OpenDistroForElasticsearch -baseurl=https://d3g5vo6xdbdb9a.cloudfront.net/yum/noarch/ -enabled=1 -gpgkey=https://d3g5vo6xdbdb9a.cloudfront.net/GPG-KEY-opendistroforelasticsearch -gpgcheck=1 -repo_gpgcheck=1 -autorefresh=1 -type=rpm-md -EOF -) - -POSTGRESQL_REPO_CONF=$(cat <<'EOF' -[pgdg13] -name=PostgreSQL 13 for RHEL/CentOS $releasever - $basearch -baseurl=https://download.postgresql.org/pub/repos/yum/13/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG -EOF -) - -POSTGRESQL_COMMON_REPO_CONF=$(cat <<'EOF' -[pgdg-common] -name=PostgreSQL common for RHEL/CentOS $releasever - $basearch -baseurl=https://download.postgresql.org/pub/repos/yum/common/redhat/rhel-$releasever-$basearch -enabled=1 -gpgcheck=1 -gpgkey=https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG -EOF -) - -RABBITMQ_SERVER_REPO_CONF=$(cat <<'EOF' -[rabbitmq-server] -name=rabbitmq-rpm -baseurl=https://packagecloud.io/rabbitmq/rabbitmq-server/el/7/$basearch -gpgcheck=1 -gpgkey=https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey -repo_gpgcheck=1 -sslcacert=/etc/pki/tls/certs/ca-bundle.crt -enabled=1 -EOF -) - -# Official Docker CE repository, added with https://download.docker.com/linux/centos/docker-ce.repo, -# has broken URL (https://download.docker.com/linux/centos/7Server/x86_64/stable) for longer time. -# So direct (patched) link is used first if available. -add_repo_as_file 'docker-ce-stable-patched' "$DOCKER_CE_PATCHED_REPO_CONF" -if ! is_repo_available "docker-ce-stable-patched"; then - disable_repo "docker-ce-stable-patched" - add_repo 'docker-ce' 'https://download.docker.com/linux/centos/docker-ce.repo' -fi -add_repo_as_file 'elastic-6' "$ELASTIC_6_REPO_CONF" -add_repo_as_file 'elasticsearch-7' "$ELASTICSEARCH_7_REPO_CONF" -add_repo_as_file 'elasticsearch-curator-5' "$ELASTICSEARCH_CURATOR_REPO_CONF" -add_repo_as_file 'kubernetes' "$KUBERNETES_REPO_CONF" -add_repo_as_file 'opendistroforelasticsearch' "$OPENDISTRO_REPO_CONF" -add_repo_as_file 'postgresql-13' "$POSTGRESQL_REPO_CONF" -add_repo_as_file 'rabbitmq' "$RABBITMQ_SERVER_REPO_CONF" diff --git a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/add-repositories.x86_64.sh b/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/add-repositories.x86_64.sh deleted file mode 100644 index 20bed5ddf5..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/add-repositories.x86_64.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -eu - -add_repo_from_script 'https://dl.2ndquadrant.com/default/release/get/10/rpm' # for repmgr -add_repo_from_script 'https://dl.2ndquadrant.com/default/release/get/13/rpm' - -disable_repo '2ndquadrant-dl-default-release-pg10-debug' # script adds 2 repositories, only 1 is required -disable_repo '2ndquadrant-dl-default-release-pg13-debug' diff --git a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/download-requirements.sh b/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/download-requirements.sh deleted file mode 100644 index 7688fd061a..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/download-requirements.sh +++ /dev/null @@ -1,827 +0,0 @@ -#!/usr/bin/env bash - -# VERSION 1.0.5 - -# NOTE: You can run only one instance of this script, new instance kills the previous one -# This limitation is for Ansible - -set -euo pipefail - -# set variables needed by common_functions -readonly internet_access_checks_enabled="yes" -readonly script_path="$(readlink -f $(dirname $0))" -. "${script_path}/common/common_functions.sh" - -# === Functions (in alphabetical order) === - -# params: -add_repo() { - local repo_id="$1" - local repo_url="$2" - - if ! is_repo_enabled "$repo_id"; then - echol "Adding repository: $repo_id" - yum-config-manager --add-repo "$repo_url" || - exit_with_error "Command failed: yum-config-manager --add-repo \"$repo_url\"" - # to accept import of GPG keys - yum -y repolist > /dev/null || - exit_with_error "Command failed: yum -y repolist" - fi -} - -# params: -add_repo_as_file() { - local repo_id="$1" - local config_file_content="$2" - local config_file_name="$repo_id.repo" - - if ! is_repo_enabled "$repo_id"; then - echol "Adding repository: $repo_id" - cat <<< "$config_file_content" > "/etc/yum.repos.d/$config_file_name" || - exit_with_error "Function add_repo_as_file failed for repo: $repo_id" - local -a gpg_key_urls - IFS=" " read -r -a gpg_key_urls \ - <<< "$(grep -i --only-matching --perl-regexp '(?<=^gpgkey=)http[^#\n]+' <<< "$config_file_content")" - if (( ${#gpg_key_urls[@]} > 0 )); then - import_repo_gpg_keys "${gpg_key_urls[@]}" 3 - fi - # to accept import of repo's GPG key (for repo_gpgcheck=1) - yum -y repolist > /dev/null || exit_with_error "Command failed: yum -y repolist" - fi -} - -# params: -add_repo_from_script() { - local script_url="$1" - - echol "Running: curl $script_url | bash" - curl "$script_url" | bash -} - -# params: ... [path_N_to_backup] -backup_files() { - local backup_file_path="$1" - shift - local paths_to_backup=("$@") - - # --directory='/' is for tar --verify - tar --create --verbose --verify --directory="/" --file="$backup_file_path" "${paths_to_backup[@]}" -} - -# params: -create_directory() { - local dir_path="$1" - - if [[ -d "$dir_path" ]]; then - echol "Directory $dir_path already exists" - else - echol "Creating directory: $dir_path" - mkdir -p "$dir_path" || exit_with_error "Command failed: mkdir -p \"$dir_path\"" - fi -} - -# params: -disable_repo() { - local repo_id="$1" - - if yum repolist enabled | grep --quiet "$repo_id"; then - echol "Disabling repository: $repo_id" - yum-config-manager --disable "$repo_id" || - exit_with_error "Command failed: yum-config-manager --disable \"$repo_id\"" - fi -} - -# params: [new_filename] -download_file() { - local file_url="$1" - local dest_dir="$2" - - if [[ ${3-} ]]; then - local file_name=$3 - else - local file_name - file_name=$(basename "$file_url") - fi - - local dest_path="${dest_dir}/${file_name}" - local retries=3 - - if [[ ${3-} ]]; then - echol "Downloading file: $file_url as $file_name" - run_cmd_with_retries wget --quiet --directory-prefix="$dest_dir" "$file_url" -O "$dest_path" $retries || \ - exit_with_error "Command failed: wget --no-verbose --directory-prefix=$dest_dir $file_url $retries" - else - echol "Downloading file: $file_url" - run_cmd_with_retries wget --quiet --directory-prefix="$dest_dir" "$file_url" $retries || \ - exit_with_error "Command failed: wget --no-verbose --directory-prefix=$dest_dir $file_url $retries" - fi -} - -# params: -download_image() { - local image_name="$1" - local dest_dir="$2" - - local splited_image=(${image_name//:/ }) - local repository=${splited_image[0]} - local tag=${splited_image[1]} - local repo_basename=$(basename -- "$repository") - local dest_path="${dest_dir}/${repo_basename}-${tag}.tar" - local retries=3 - - if [[ -f $dest_path ]]; then - echol "Image file: $dest_path already exists. Skipping..." - else - # use temporary file for downloading to be safe from sudden interruptions (network, ctrl+c) - local tmp_file_path=$(mktemp) - local crane_cmd="$CRANE_BIN pull --insecure --platform=${DOCKER_PLATFORM} --format=legacy ${image_name} ${tmp_file_path}" - echol "Downloading image: $image" - { run_cmd_with_retries $crane_cmd $retries && chmod 644 $tmp_file_path && mv $tmp_file_path $dest_path; } || - exit_with_error "crane failed, command was: $crane_cmd && chmod 644 $tmp_file_path && mv $tmp_file_path $dest_path" - fi -} - -# params: ... [package_N] -download_packages() { - local dest_dir="$1" - shift - local packages="$@" - local retries=3 - - if [[ -n $packages ]]; then - # when using --archlist=x86_64 yumdownloader (yum-utils-1.1.31-52) also downloads i686 packages - run_cmd_with_retries yumdownloader --quiet --archlist="$ARCH" --exclude='*i686' --destdir="$dest_dir" $packages $retries - fi -} - -# params: -enable_repo() { - local repo_id="$1" - - if ! yum repolist enabled | grep --quiet "$repo_id"; then - echol "Enabling repository: $repo_id" - yum-config-manager --enable "$repo_id" || - exit_with_error "Command failed: yum-config-manager --enable \"$repo_id\"" - fi -} - -# desc: find repo id (set $1) based on given pattern -# params: -find_rhel_repo_id() { - # $1 reserved for result - local rhel_on_prem_repo_id="$2" - local pattern="$3" - local repo_id - - if yum repolist all | egrep --quiet "$pattern"; then - repo_id=$(yum repolist all | egrep --only-matching "$pattern") - else - exit_with_error "RHEL yum repository not found, pattern was: $pattern" - fi - - eval $1='$repo_id' -} - -# params: -get_package_dependencies_with_arch() { - # $1 reserved for result - local package="$2" - - local query_output=$(repoquery --requires --resolve --queryformat '%{name}.%{arch}' --archlist=$ARCH,noarch "$package") || - exit_with_error "repoquery failed for dependencies of package: $package with exit code: $?, output was: $query_output" - - if [[ -z $query_output ]]; then - echol "No dependencies found for package: $package" - elif grep --ignore-case --perl-regexp '\b(? -get_package_with_version_arch() { - # $1 reserved for result - local package="$2" - - local query_output=$(repoquery --queryformat '%{ui_nevra}' --archlist=$ARCH,noarch "$package") || - exit_with_error "repoquery failed for package: $package with exit code: $?, output was: $query_output" - - # yumdownloader doesn't set error code if repoquery returns empty output - [[ -n $query_output ]] || exit_with_error "repoquery failed: package $package not found" - if grep --ignore-case --perl-regexp '\b(? -get_packages_with_version_arch() { - local result_var_name="$1" - shift - local packages=("$@") - local packages_with_version_arch=() - - for package in "${packages[@]}"; do - get_package_with_version_arch 'QUERY_OUTPUT' "$package" - packages_with_version_arch+=("$QUERY_OUTPUT") - done - - eval $result_var_name='("${packages_with_version_arch[@]}")' -} - -# params: -get_requirements_from_group() { - # $1 reserved for result - local group_name="$2" - local requirements_file_path="$3" - local all_requirements=$(grep --only-matching '^[^#]*' "$requirements_file_path" | sed -e 's/[[:space:]]*$//') - - if [[ $group_name == "files" ]]; then - local requirements_from_group=$(awk "/^$/ {next}; /\[${group_name}\]/ {f=1; f=2; next}; /^\[/ {f=0}; f {print \$0}" <<< "$all_requirements") || - exit_with_error "Function get_requirements_from_group failed for group: $group_name" - else - local requirements_from_group=$(awk "/^$/ {next}; /\[${group_name}\]/ {f=1; next}; /^\[/ {f=0}; f {print \$0}" <<< "$all_requirements") || - exit_with_error "Function get_requirements_from_group failed for group: $group_name" - fi - - [[ -n $requirements_from_group ]] || echol "No requirements found for group: $group_name" - - eval $1='$requirements_from_group' -} - -# params: -get_unique_array() { - local result_var_name="$1" - shift - local array=("$@") - - # filter out duplicates - array=($(echo "${array[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) - - eval $result_var_name='("${array[@]}")' -} - -# params: -import_repo_gpg_keys() { - local retries=${!#} # get last arg - local urls=( "${@:1:$# - 1}" ) # remove last arg - - for url in "${urls[@]}"; do - run_cmd_with_retries rpm --import "$url" "$retries" - done -} - -# params: [package_name] -install_package() { - local package_name_or_url="$1" - local package_name="$1" - - [ $# -gt 1 ] && package_name="$2" - - echol "Installing package: $package_name" - if yum install -y "$package_name_or_url"; then - echo "$package_name" >> "$INSTALLED_PACKAGES_FILE_PATH" - else - exit_with_error "Command failed: yum install -y \"$package_name_or_url\"" - fi -} - -# params: -is_package_installed() { - local package="$1" - - if rpm --query --quiet "$package"; then - echol "Package $package already installed" - return 0 - else - return 1 - fi -} - -# params: -is_repo_available() { - local repo_id="$1" - - echol "Checking if '$repo_id' repo is available" - yum -q --disablerepo=* --enablerepo="$repo_id" repoinfo > /dev/null # returns 1 when 'Error 404 - Not Found' -} - -# params: -is_repo_enabled() { - local repo_id="$1" - - if yum repolist | grep --quiet "$repo_id"; then - echol "Repository $repo_id already enabled" - return 0 - else - return 1 - fi -} - -# params: -remove_package() { - local package="$1" - - if rpm --query --quiet "$package"; then - echol "Removing package: $package" - yum remove -y "$package" || exit_with_error "Command failed: yum remove -y \"$package\"" - fi -} - -# params: -remove_added_repos() { - local yum_repos_backup_tar_file_path="$1" - - declare -A initial_yum_repo_files - for repo_config_file in $(tar -tf "$yum_repos_backup_tar_file_path" | grep '.repo$' | xargs -L 1 --no-run-if-empty basename); do - initial_yum_repo_files["$repo_config_file"]=1 - done - - for repo_config_file in $(find /etc/yum.repos.d/ -maxdepth 1 -type f -name '*.repo' -printf "%f\n"); do - if (( ${initial_yum_repo_files["$repo_config_file"]:-0} == 0)); then - # remove only if not owned by a package - if ! rpm --quiet --query --file "/etc/yum.repos.d/$repo_config_file"; then - remove_file "/etc/yum.repos.d/$repo_config_file" - fi - fi - done -} - -# params: -remove_file() { - local file_path="$1" - - echol "Removing file: $file_path" - rm -f "$file_path" || exit_with_error "Command failed: rm -f \"$file_path\"" -} - -# params: -remove_installed_packages() { - local installed_packages_list_file="$1" - - if [ -f "$installed_packages_list_file" ]; then - for package in $(cat $installed_packages_list_file | sort --unique); do - remove_package "$package" - done - remove_file "$installed_packages_list_file" - fi -} - -remove_yum_cache_for_untracked_repos() { - local basearch releasever - basearch=$(uname --machine) - releasever=$(rpm -q --provides "$(rpm -q --whatprovides 'system-release(releasever)')" | grep "system-release(releasever)" | cut -d ' ' -f 3) - local cachedir find_output - cachedir=$(grep --only-matching --perl-regexp '(?<=^cachedir=)[^#\n]+' /etc/yum.conf) - cachedir="${cachedir/\$basearch/$basearch}" - cachedir="${cachedir/\$releasever/$releasever}" - find_output=$(find "$cachedir" -mindepth 1 -maxdepth 1 -type d -exec basename '{}' ';') - local -a repos_with_cache=() - if [ -n "$find_output" ]; then - readarray -t repos_with_cache <<< "$find_output" - fi - local all_repos_output - all_repos_output=$(yum repolist -v all | grep --only-matching --perl-regexp '(?<=^Repo-id)[^/]+' | sed -e 's/^[[:space:]:]*//') - local -a all_repos=() - readarray -t all_repos <<< "$all_repos_output" - if (( ${#repos_with_cache[@]} > 0 )); then - for cached_repo in "${repos_with_cache[@]}"; do - if ! _in_array "$cached_repo" "${all_repos[@]}"; then - run_cmd rm -rf "$cachedir/$cached_repo" - fi - done - fi -} - -# Runs command as array with printing it, doesn't support commands with shell operators (such as pipe or redirection) -# params: [--no-exit-on-error] -run_cmd() { - local cmd_arr=("$@") - - local exit_on_error=1 - if [[ ${cmd_arr[-1]} == '--no-exit-on-error' ]]; then - exit_on_error=0 - cmd_arr=( "${cmd_arr[@]:0:$# - 1}" ) # remove last item - fi - - local escaped_string return_code - escaped_string=$(_print_array_as_shell_escaped_string "${cmd_arr[@]}") - echol "Executing: ${escaped_string}" - "${cmd_arr[@]}"; return_code=$? - if (( return_code != 0 )) && (( exit_on_error )); then - exit_with_error "Command failed: ${escaped_string}" - else - return $return_code - fi -} - -# Runs command with retries, doesn't support commands with shell operators (such as pipe or redirection) -# params: -run_cmd_with_retries() { - # pop 'retries' argument - local retries=${!#} # get last arg (indirect expansion) - set -- "${@:1:$#-1}" # set new "$@" - - local cmd_arr=("$@") - ( # sub-shell is used to limit scope for 'set +e' - set +e - trap - ERR # disable global trap locally - for ((i=0; i <= retries; i++)); do - run_cmd "${cmd_arr[@]}" '--no-exit-on-error' - return_code=$? - if (( return_code == 0 )); then - break - elif (( i < retries )); then - sleep 1 - echol "retrying ($(( i+1 ))/${retries})" - else - echol "ERROR: all attempts failed" - local escaped_string - escaped_string=$(_print_array_as_shell_escaped_string "${cmd_arr[@]}") - exit_with_error "Command failed: ${escaped_string}" - fi - done - return $return_code - ) -} - -usage() { - echo "usage: ./$(basename $0) [--no-logfile]" - echo "example: ./$(basename $0) /tmp/downloads" - exit 1 -} - -validate_bash_version() { - local major_version=${BASH_VERSINFO[0]} - local minor_version=${BASH_VERSINFO[1]} - local required_version=(4 2) # (minor major) - if (( major_version < ${required_version[0]} )) || (( minor_version < ${required_version[1]} )); then - exit_with_error "This script requires Bash version ${required_version[0]}.${required_version[1]} or higher." - fi -} - -# === Helper functions (in alphabetical order) === - -_get_shell_escaped_array() { - if (( $# > 0 )); then - printf '%q\n' "$@" - fi -} - -# params: -_in_array() { - local value=${1} - shift - local array=( "$@" ) - - (( ${#array[@]} > 0 )) && printf '%s\n' "${array[@]}" | grep -q -Fx "$value" -} - -# Prints string in format that can be reused as shell input (escapes non-printable characters) -_print_array_as_shell_escaped_string() { - local output - output=$(_get_shell_escaped_array "$@") - local escaped=() - if [ -n "$output" ]; then - readarray -t escaped <<< "$output" - fi - if (( ${#escaped[@]} > 0 )); then - printf '%s\n' "${escaped[*]}" - fi -} - -# === Start === - -validate_bash_version - -if [[ $# -lt 1 ]]; then - usage >&2 -fi - -readonly START_TIME=$(date +%s) - -# --- Parse arguments --- - -POSITIONAL_ARGS=() -CREATE_LOGFILE='yes' -while [[ $# -gt 0 ]]; do - case $1 in - --no-logfile) - CREATE_LOGFILE='no' - shift # past argument - ;; - *) # unknown option - POSITIONAL_ARGS+=("$1") # save it in an array for later - shift - ;; - esac -done -set -- "${POSITIONAL_ARGS[@]}" # restore positional arguments - -# --- Global variables --- - -# dirs -readonly DOWNLOADS_DIR="$1" # root directory for downloads -readonly FILES_DIR="${DOWNLOADS_DIR}/files" -readonly PACKAGES_DIR="${DOWNLOADS_DIR}/packages" -readonly IMAGES_DIR="${DOWNLOADS_DIR}/images" -readonly REPO_PREREQ_PACKAGES_DIR="${PACKAGES_DIR}/repo-prereqs" -readonly SCRIPT_DIR="$(dirname $(readlink -f $0))" # want absolute path - -# files -readonly SCRIPT_FILE_NAME=$(basename "$0") -readonly LOG_FILE_NAME="${SCRIPT_FILE_NAME}.log" -readonly LOG_FILE_PATH="${SCRIPT_DIR}/${LOG_FILE_NAME}" -readonly YUM_CONFIG_BACKUP_FILE_PATH="${SCRIPT_DIR}/${SCRIPT_FILE_NAME}-yum-repos-backup-tmp-do-not-remove.tar" -readonly CRANE_BIN="${SCRIPT_DIR}/crane" -readonly INSTALLED_PACKAGES_FILE_PATH="${SCRIPT_DIR}/${SCRIPT_FILE_NAME}-installed-packages-list-do-not-remove.tmp" -readonly PID_FILE_PATH="/var/run/${SCRIPT_FILE_NAME}.pid" -readonly ADD_MULTIARCH_REPOSITORIES_SCRIPT="${SCRIPT_DIR}/add-repositories.multiarch.sh" - -#arch -readonly ARCH=$(uname -m) -echol "Detected arch: ${ARCH}" -readonly REQUIREMENTS_FILE_PATH="${SCRIPT_DIR}/requirements.${ARCH}.txt" -readonly ADD_ARCH_REPOSITORIES_SCRIPT="${SCRIPT_DIR}/add-repositories.${ARCH}.sh" -case $ARCH in -x86_64) - readonly DOCKER_PLATFORM="linux/amd64" - ;; - -aarch64) - readonly DOCKER_PLATFORM="linux/arm64" - ;; - -*) - exit_with_error "Arch ${ARCH} unsupported" - ;; -esac -echol "Docker platform: ${DOCKER_PLATFORM}" - -# --- Checks --- - -[ $EUID -eq 0 ] || { echo "You have to run as root" && exit 1; } - -[[ -f $REQUIREMENTS_FILE_PATH ]] || exit_with_error "File not found: $REQUIREMENTS_FILE_PATH" - -# --- Want to have only one instance for Ansible --- - -if [ -f $PID_FILE_PATH ]; then - readonly PID_FROM_FILE=$(cat $PID_FILE_PATH 2> /dev/null) - if [[ -n $PID_FROM_FILE ]] && kill -0 $PID_FROM_FILE > /dev/null 2>&1; then - echol "Found running process with pid: $PID_FROM_FILE, cmd: $(ps -p $PID_FROM_FILE -o cmd=)" - if ps -p $PID_FROM_FILE -o cmd= | grep --quiet $SCRIPT_FILE_NAME; then - echol "Killing old instance using SIGTERM" - kill -s SIGTERM $PID_FROM_FILE # try gracefully - if sleep 3 && kill -0 $PID_FROM_FILE > /dev/null 2>&1; then - echol "Still running, killing old instance using SIGKILL" - kill -s SIGKILL $PID_FROM_FILE # forcefully - fi - else - remove_file $PID_FILE_PATH - exit_with_error "Process with pid: $PID_FILE_PATH seems to be not an instance of this script" - fi - else - echol "Process with pid: $PID_FROM_FILE not found" - fi - remove_file $PID_FILE_PATH -fi - -echol "PID is: $$, creating file: $PID_FILE_PATH" -echo $$ > $PID_FILE_PATH || exit_with_error "Command failed: echo $$ > $PID_FILE_PATH" - -# --- Parse requirements file --- - -# Requirements are grouped using sections: [packages-repo-prereqs], [packages], [files], [images] -get_requirements_from_group 'REPO_PREREQ_PACKAGES' 'packages-repo-prereqs' "$REQUIREMENTS_FILE_PATH" -get_requirements_from_group 'CRANE' 'crane' "$REQUIREMENTS_FILE_PATH" -get_requirements_from_group 'PACKAGES' 'packages' "$REQUIREMENTS_FILE_PATH" -get_requirements_from_group 'FILES' 'files' "$REQUIREMENTS_FILE_PATH" -get_requirements_from_group 'IMAGES' 'images' "$REQUIREMENTS_FILE_PATH" - -# === Packages === - -# --- Backup yum repositories --- - -check_connection yum $(yum repolist --quiet | tail -n +2 | cut -d' ' -f1 | cut -d'/' -f1 | sed 's/^!//') - -if [ -f "$YUM_CONFIG_BACKUP_FILE_PATH" ]; then - echol "Backup aleady exists: $YUM_CONFIG_BACKUP_FILE_PATH" -else - echol "Backuping /etc/yum.repos.d/ to $YUM_CONFIG_BACKUP_FILE_PATH" - if backup_files "$YUM_CONFIG_BACKUP_FILE_PATH" '/etc/yum.repos.d/'; then - echol "Backup done" - else - if [ -f "$YUM_CONFIG_BACKUP_FILE_PATH" ]; then - remove_file "$YUM_CONFIG_BACKUP_FILE_PATH" - fi - exit_with_error "Backup of yum repositories failed" - fi -fi - -# --- Restore system repositories in case epirepo is enabled - -enable_system_repos_script="/var/tmp/epi-repository-setup-scripts/enable-system-repos.sh" -disable_epirepo_client_script="/var/tmp/epi-repository-setup-scripts/disable-epirepo-client.sh" - -if [[ -f /etc/yum.repos.d/epirepo.repo ]]; then - if [[ -f /var/tmp/enabled-system-repos.txt && -f $enable_system_repos_script ]]; then - echol "OS repositories seems missing, restoring..." - $enable_system_repos_script || exit_with_error "Could not restore system repositories" - $disable_epirepo_client_script || exit_with_error "Could not disable epirepo" - else - echol "/var/tmp/enabled-system-repos.txt or $enable_system_repos_script seems missing, you either know what you're doing or you need to fix your repositories" - fi -fi - -# Fix for RHUI client certificate expiration [#2318] -if is_repo_enabled "rhui-microsoft-azure-rhel"; then - run_cmd_with_retries yum update -y --disablerepo='*' --enablerepo='rhui-microsoft-azure-rhel*' 3 -fi - -# --- Install required packages unless present --- - -# repos can be enabled or disabled using the yum-config-manager command, which is provided by yum-utils package -for package in 'yum-utils' 'wget' 'curl' 'tar'; do - if ! is_package_installed "$package"; then - install_package "$package" - fi -done - -# --- Download and setup Crane for downloading images --- - -if [[ -z "${CRANE}" ]] || [ $(wc -l <<< "${CRANE}") -ne 1 ] ; then - exit_with_error "Crane binary download path undefined or more than one download path defined" -else - if [[ -x $CRANE_BIN ]]; then - echol "Crane binary already exists" - else - file_url=$(head -n 1 <<< "${CRANE}") - - check_connection wget $file_url - - echol "Downloading crane from: ${file_url}" - download_file "${file_url}" "${SCRIPT_DIR}" - tar_path="${SCRIPT_DIR}/${file_url##*/}" - echol "Unpacking crane from ${tar_path} to ${CRANE_BIN}" - run_cmd tar -xzf "${tar_path}" --directory "${SCRIPT_DIR}" "crane" --overwrite - [[ -x "${CRANE_BIN}" ]] || run_cmd chmod +x "${CRANE_BIN}" - remove_file "${tar_path}" - fi -fi - -# --- Enable RHEL repos --- - -# -> rhel-7-server-extras-rpms # for container-selinux package, this repo has different id names on clouds -# About rhel-7-server-extras-rpms: https://access.redhat.com/solutions/3418891 - -ON_PREM_REPO_ID='rhel-7-server-extras-rpms' -REPO_ID_PATTERN="$ON_PREM_REPO_ID|rhui-rhel-7-server-rhui-extras-rpms|rhui-REGION-rhel-server-extras|rhel-7-server-rhui-extras-rpms" # on-prem|Azure|AWS7.8|AWS7.9 -find_rhel_repo_id 'REPO_ID' "$ON_PREM_REPO_ID" "$REPO_ID_PATTERN" -enable_repo "$REPO_ID" - -# -> rhel-server-rhscl-7-rpms # for Red Hat Software Collections (RHSCL), this repo has different id names on clouds -# About rhel-server-rhscl-7-rpms: https://access.redhat.com/solutions/472793 - -ON_PREM_REPO_ID='rhel-server-rhscl-7-rpms' -REPO_ID_PATTERN="$ON_PREM_REPO_ID|rhui-rhel-server-rhui-rhscl-7-rpms|rhui-REGION-rhel-server-rhscl|rhel-server-rhui-rhscl-7-rpms" # on-prem|Azure|AWS7.8|AWS7.9 -find_rhel_repo_id 'REPO_ID' "$ON_PREM_REPO_ID" "$REPO_ID_PATTERN" -enable_repo "$REPO_ID" - -# --- Add repos --- - -# noarch repositories -. ${ADD_MULTIARCH_REPOSITORIES_SCRIPT} - -# arch specific repositories -. ${ADD_ARCH_REPOSITORIES_SCRIPT} - -# some packages are from EPEL repo -if ! is_package_installed 'epel-release'; then - install_package 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm' 'epel-release' -fi - -# clean metadata for upgrades (when the same package can be downloaded from changed repo) -run_cmd remove_yum_cache_for_untracked_repos - -run_cmd_with_retries yum -y makecache fast 3 - -# --- Download packages --- - -# 1) packages required to create repository - -create_directory "$REPO_PREREQ_PACKAGES_DIR" - -# prepare lists -PREREQ_PACKAGES=() -for package in $REPO_PREREQ_PACKAGES; do - echol "Processing package: $package" - get_package_with_version_arch 'QUERY_OUTPUT' "$package" - PREREQ_PACKAGES+=("$QUERY_OUTPUT") -done - -# download requirements (fixed versions) -if [[ ${#PREREQ_PACKAGES[@]} -gt 0 ]]; then - echol "Downloading repository prerequisite packages (${#PREREQ_PACKAGES[@]})..." - download_packages "$REPO_PREREQ_PACKAGES_DIR" "${PREREQ_PACKAGES[@]}" -fi - -# 2) non-prerequisite packages - -create_directory "$PACKAGES_DIR" - -# prepare lists -NON_PREREQ_PACKAGES=() -DEPENDENCIES_OF_NON_PREREQ_PACKAGES=() -for package in $PACKAGES; do - echol "Processing package: $package" - get_package_with_version_arch 'QUERY_OUTPUT' "$package" - NON_PREREQ_PACKAGES+=("$QUERY_OUTPUT") - get_package_dependencies_with_arch 'DEPENDENCIES' "$package" - if [[ ${#DEPENDENCIES[@]} -gt 0 ]]; then - for dependency in "${DEPENDENCIES[@]}"; do - DEPENDENCIES_OF_NON_PREREQ_PACKAGES+=("$dependency") - done - fi -done - -if [[ ${#NON_PREREQ_PACKAGES[@]} -gt 0 ]]; then - # download requirements (fixed versions) - echol "Downloading packages (${#NON_PREREQ_PACKAGES[@]})..." - download_packages "$PACKAGES_DIR" "${NON_PREREQ_PACKAGES[@]}" - # download dependencies (latest versions) - get_unique_array 'DEPENDENCIES' "${DEPENDENCIES_OF_NON_PREREQ_PACKAGES[@]}" - get_packages_with_version_arch 'DEPENDENCIES' "${DEPENDENCIES[@]}" - echol "Downloading dependencies of packages (${#DEPENDENCIES[@]})..." - download_packages "$PACKAGES_DIR" "${DEPENDENCIES[@]}" -fi - -# --- Clean up yum repos --- - -remove_added_repos "$YUM_CONFIG_BACKUP_FILE_PATH" - -# --- Restore yum repos --- - -echol "Restoring /etc/yum.repos.d/*.repo from: $YUM_CONFIG_BACKUP_FILE_PATH" -echol "Executing: tar --extract --verbose --file $YUM_CONFIG_BACKUP_FILE_PATH" -if tar --extract --verbose --file "$YUM_CONFIG_BACKUP_FILE_PATH" --directory /etc/yum.repos.d \ - --strip-components=2 'etc/yum.repos.d/*.repo'; then - echol "Restored: yum repositories" -else - exit_with_error "Extracting tar failed: $YUM_CONFIG_BACKUP_FILE_PATH" -fi - -# === Files === - -create_directory "$FILES_DIR" - -check_connection wget $FILES - -if [[ -z "$FILES" ]]; then - echol "No files to download" -else - - # list of all files that will be downloaded - echol "Files to be downloaded:" - cat -n <<< "${FILES}" - - printf "\n" - - while IFS=' ' read -r url new_filename; do - # download files, check if new filename is provided - if [[ -z $new_filename ]]; then - download_file "$url" "$FILES_DIR" - elif [[ $new_filename = *" "* ]]; then - exit_with_error "wrong new filename for file: $url" - else - download_file "$url" "$FILES_DIR" "$new_filename" - fi - done <<< "$FILES" -fi - -# === Images === - -check_connection crane $(for image in $IMAGES; do splitted=(${image//:/ }); echo "${splitted[0]}"; done) - -create_directory "$IMAGES_DIR" - -for image in $IMAGES; do - download_image "$image" "$IMAGES_DIR" -done - -# --- Clean up --- - -remove_installed_packages "$INSTALLED_PACKAGES_FILE_PATH" - -remove_file "$YUM_CONFIG_BACKUP_FILE_PATH" - -remove_file "$PID_FILE_PATH" - -readonly END_TIME=$(date +%s) - -echol "$SCRIPT_FILE_NAME finished, execution time: $(date -u -d @$((END_TIME-START_TIME)) +'%Hh:%Mm:%Ss')" diff --git a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/requirements.x86_64.txt b/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/requirements.x86_64.txt deleted file mode 100644 index 0d76c63999..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/redhat-7/requirements.x86_64.txt +++ /dev/null @@ -1,254 +0,0 @@ -# Put this file in the same directory as download script - -[packages-repo-prereqs] -apr # for httpd -apr-util # for httpd -createrepo -deltarpm # for createrepo -httpd -httpd-tools # for httpd -mailcap # for httpd -mod_ssl # for httpd -python-chardet # for createrepo -python-deltarpm # for createrepo -python-kitchen # for createrepo -redhat-logos # for httpd -yum-utils - -[crane] -https://github.com/google/go-containerregistry/releases/download/v0.4.1/go-containerregistry_Linux_x86_64.tar.gz - -[packages] -audit # for docker-ce -bash-completion -ca-certificates -cifs-utils -conntrack-tools # for kubelet -containerd.io-1.4.12 -container-selinux -cri-tools-1.13.0 -curl -dejavu-sans-fonts # for grafana -docker-ce-20.10.8 -docker-ce-cli-20.10.8 -docker-ce-rootless-extras-20.10.8 -ebtables -elasticsearch-curator-5.8.3 -elasticsearch-oss-7.10.2 # for opendistroforelasticsearch & logging roles -ethtool -filebeat-7.9.2 -firewalld -fontconfig # for grafana -fping -fuse-overlayfs # for docker-ce-rootless-extras -gnutls # for cifs-utils -gssproxy # for nfs-utils -htop -iftop -ipset # for firewalld -java-1.8.0-openjdk-headless -javapackages-tools # for java-1.8.0-openjdk-headless -jq -libini_config # for nfs-utils -libselinux-python -libsemanage-python -libX11 # for grafana -libxcb # for grafana -libXcursor # for grafana -libXt # for grafana -logrotate -net-tools -nfs-utils -nmap-ncat -# Open Distro for Elasticsearch plugins are installed individually to not download them twice in different versions (as dependencies of opendistroforelasticsearch package) -opendistro-alerting-1.13.1.* -opendistro-index-management-1.13.1.* -opendistro-job-scheduler-1.13.0.* -opendistro-performance-analyzer-1.13.0.* -opendistro-security-1.13.1.* -opendistro-sql-1.13.0.* -opendistroforelasticsearch-kibana-1.13.1 # kibana has shorter version -openssl -perl # for vim -perl-Getopt-Long # for vim -perl-libs # for vim -perl-Pod-Perldoc # for vim -perl-Pod-Simple # for vim -perl-Pod-Usage # for vim -pgaudit15_13-1.5.0 -policycoreutils-python # for container-selinux -pyldb # for cifs-utils -python-cffi # for python2-cryptography -python-firewall # for firewalld -python-kitchen # for yum-utils -python-lxml # for java-1.8.0-openjdk-headless -python-psycopg2 -python-pycparser # for python2-cryptography -python-setuptools -python-slip-dbus # for firewalld -python2-cryptography # for Ansible (certificate modules) -python3-3.6.8 -quota # for nfs-utils -rabbitmq-server-3.8.9 -rh-haproxy18 -rh-haproxy18-haproxy-syspaths -postgresql13-server -repmgr10-5.2.1 # used to upgrade repmgr first -repmgr13-5.2.1 -samba-client -samba-client-libs # for samba-client -samba-common -samba-libs # for cifs-utils -sysstat -tar -telnet -tmux -urw-base35-fonts # for grafana -unzip -vim-common # for vim -vim-enhanced -wget -xorg-x11-font-utils # for grafana -xorg-x11-server-utils # for grafana -yum-plugin-versionlock -yum-utils - -# to make remote-to-remote "synchronize" work in ansible -rsync - -# K8s v1.18.6 (Epiphany >= v0.7.1) -kubeadm-1.18.6 -kubectl-1.18.6 -kubelet-1.18.6 - -# K8s v1.19.15 (Epiphany >= v1.3, transitional version) -kubeadm-1.19.15 -kubectl-1.19.15 -kubelet-1.19.15 - -# K8s v1.20.12 (Epiphany >= v1.3, transitional version) -kubeadm-1.20.12 -kubectl-1.20.12 -kubelet-1.20.12 - -# K8s v1.21.7 (Epiphany >= v1.3, transitional version) -kubeadm-1.21.7 -kubectl-1.21.7 -kubelet-1.21.7 - -# K8s v1.22.4 -kubeadm-1.22.4 -kubectl-1.22.4 -kubelet-1.22.4 - -# Kubernetes Generic -kubernetes-cni-0.8.6-0 # since K8s v1.18.6 -kubernetes-cni-0.8.7-0 # since K8s v1.19.15 - -[files] -# --- Packages --- -# Github repository for erlang rpm is used since packagecloud repository is limited to a certain number of versions and erlang package from erlang-solutions repository is much more complex and bigger -https://github.com/rabbitmq/erlang-rpm/releases/download/v23.1.5/erlang-23.1.5-1.el7.x86_64.rpm -# Grafana package is not downloaded from repository since it was not reliable (issue #2449) -https://dl.grafana.com/oss/release/grafana-8.3.2-1.x86_64.rpm -# --- Exporters --- -https://github.com/danielqsj/kafka_exporter/releases/download/v1.4.0/kafka_exporter-1.4.0.linux-amd64.tar.gz -https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar -https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz -https://github.com/prometheus-community/postgres_exporter/releases/download/v0.10.0/postgres_exporter-0.10.0.linux-amd64.tar.gz -# --- Misc --- -https://archive.apache.org/dist/kafka/2.6.0/kafka_2.12-2.6.0.tgz -https://github.com/prometheus/prometheus/releases/download/v2.31.1/prometheus-2.31.1.linux-amd64.tar.gz -https://github.com/prometheus/alertmanager/releases/download/v0.23.0/alertmanager-0.23.0.linux-amd64.tar.gz -https://archive.apache.org/dist/zookeeper/zookeeper-3.5.8/apache-zookeeper-3.5.8-bin.tar.gz -https://get.helm.sh/helm-v3.2.0-linux-amd64.tar.gz -https://archive.apache.org/dist/logging/log4j/2.17.1/apache-log4j-2.17.1-bin.tar.gz -# --- Helm charts --- -https://helm.elastic.co/helm/filebeat/filebeat-7.9.2.tgz -https://charts.bitnami.com/bitnami/node-exporter-2.3.17.tgz -# --- Grafana Dashboards --- -# Kubernetes Cluster -https://grafana.com/api/dashboards/7249/revisions/1/download grafana_dashboard_7249.json -# Kubernetes cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/315/revisions/3/download grafana_dashboard_315.json -# Node Exporter for Prometheus -https://grafana.com/api/dashboards/11074/revisions/9/download grafana_dashboard_11074.json -# Node Exporter Server Metrics -https://grafana.com/api/dashboards/405/revisions/8/download grafana_dashboard_405.json -# Postgres Overview -https://grafana.com/api/dashboards/455/revisions/2/download grafana_dashboard_455.json -# PostgreSQL Database -https://grafana.com/api/dashboards/9628/revisions/7/download grafana_dashboard_9628.json -# RabbitMQ Monitoring -https://grafana.com/api/dashboards/4279/revisions/4/download grafana_dashboard_4279.json -# Node Exporter Full -https://grafana.com/api/dashboards/1860/revisions/23/download grafana_dashboard_1860.json -# Kafka Exporter Overview -https://grafana.com/api/dashboards/7589/revisions/5/download grafana_dashboard_7589.json -# HaProxy backend (or frontend/servers) -https://grafana.com/api/dashboards/789/revisions/1/download grafana_dashboard_789.json -# Docker and Host Monitoring w/ Prometheus -https://grafana.com/api/dashboards/179/revisions/7/download grafana_dashboard_179.json -# Kubernetes pod and cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/6663/revisions/1/download grafana_dashboard_6663.json -# RabbitMQ cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/10991/revisions/11/download grafana_dashboard_10991.json - -[images] -haproxy:2.2.2-alpine -kubernetesui/dashboard:v2.3.1 -kubernetesui/metrics-scraper:v1.0.7 -registry:2 -# applications -bitnami/pgpool:4.2.4 -bitnami/pgbouncer:1.16.0 -epiphanyplatform/keycloak:14.0.0 -rabbitmq:3.8.9 -# K8s -## v1.18.6 -k8s.gcr.io/kube-apiserver:v1.18.6 -k8s.gcr.io/kube-controller-manager:v1.18.6 -k8s.gcr.io/kube-scheduler:v1.18.6 -k8s.gcr.io/kube-proxy:v1.18.6 -k8s.gcr.io/coredns:1.6.7 -k8s.gcr.io/etcd:3.4.3-0 -quay.io/coreos/flannel:v0.12.0-amd64 -quay.io/coreos/flannel:v0.12.0 -calico/cni:v3.15.0 -calico/kube-controllers:v3.15.0 -calico/node:v3.15.0 -calico/pod2daemon-flexvol:v3.15.0 -## v1.19.15 -k8s.gcr.io/kube-apiserver:v1.19.15 -k8s.gcr.io/kube-controller-manager:v1.19.15 -k8s.gcr.io/kube-scheduler:v1.19.15 -k8s.gcr.io/kube-proxy:v1.19.15 -## v1.20.12 -k8s.gcr.io/kube-apiserver:v1.20.12 -k8s.gcr.io/kube-controller-manager:v1.20.12 -k8s.gcr.io/kube-scheduler:v1.20.12 -k8s.gcr.io/kube-proxy:v1.20.12 -k8s.gcr.io/coredns:1.7.0 -k8s.gcr.io/pause:3.2 -## v1.21.7 -k8s.gcr.io/kube-apiserver:v1.21.7 -k8s.gcr.io/kube-controller-manager:v1.21.7 -k8s.gcr.io/kube-scheduler:v1.21.7 -k8s.gcr.io/kube-proxy:v1.21.7 -k8s.gcr.io/coredns/coredns:v1.8.0 -k8s.gcr.io/etcd:3.4.13-0 -k8s.gcr.io/pause:3.4.1 -## v1.22.4 -k8s.gcr.io/kube-apiserver:v1.22.4 -k8s.gcr.io/kube-controller-manager:v1.22.4 -k8s.gcr.io/kube-scheduler:v1.22.4 -k8s.gcr.io/kube-proxy:v1.22.4 -k8s.gcr.io/coredns/coredns:v1.8.4 -k8s.gcr.io/etcd:3.5.0-0 -k8s.gcr.io/pause:3.5 -quay.io/coreos/flannel:v0.14.0-amd64 -quay.io/coreos/flannel:v0.14.0 -calico/cni:v3.20.3 -calico/kube-controllers:v3.20.3 -calico/node:v3.20.3 -calico/pod2daemon-flexvol:v3.20.3 diff --git a/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/redhat-7.yml b/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/redhat-7.yml new file mode 100644 index 0000000000..91009d92f0 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/redhat-7.yml @@ -0,0 +1,92 @@ +--- +repositories: + docker-ce-stable-patched: + data: | + [docker-ce-stable-patched] + name=Docker CE Stable - patched centos/7/$basearch/stable + baseurl=https://download.docker.com/linux/centos/7/$basearch/stable + enabled=1 + gpgcheck=1 + gpg_keys: + - https://download.docker.com/linux/centos/gpg + + elastic-6: + data: | + [elastic-6] + name=Elastic repository for 6.x packages + baseurl=https://artifacts.elastic.co/packages/oss-6.x/yum + gpgcheck=1 + enabled=1 + autorefresh=1 + type=rpm-md + gpg_keys: + - https://artifacts.elastic.co/GPG-KEY-elasticsearch + + elasticsearch-7: + data: | + [elasticsearch-7.x] + name=Elasticsearch repository for 7.x packages + baseurl=https://artifacts.elastic.co/packages/oss-7.x/yum + gpgcheck=1 + enabled=1 + autorefresh=1 + type=rpm-md + gpg_keys: + - https://artifacts.elastic.co/GPG-KEY-elasticsearch + + elasticsearch-curator-5: + data: | + [curator-5] + name=CentOS/RHEL 7 repository for Elasticsearch Curator 5.x packages + baseurl=https://packages.elastic.co/curator/5/centos/7 + gpgcheck=1 + enabled=1 + gpg_keys: + - https://packages.elastic.co/GPG-KEY-elasticsearch + + kubernetes: + data: | + [kubernetes] + name=Kubernetes + baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpg_keys: + - https://packages.cloud.google.com/yum/doc/yum-key.gpg + - https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + + opendistroforelasticsearch: + data: | + [opendistroforelasticsearch-artifacts-repo] + name=Release RPM artifacts of OpenDistroForElasticsearch + baseurl=https://d3g5vo6xdbdb9a.cloudfront.net/yum/noarch/ + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + autorefresh=1 + type=rpm-md + gpg_keys: + - https://d3g5vo6xdbdb9a.cloudfront.net/GPG-KEY-opendistroforelasticsearch + + postgresql-13: + data: | + [pgdg13] + name=PostgreSQL 13 for RHEL/CentOS $releasever - $basearch + baseurl=https://download.postgresql.org/pub/repos/yum/13/redhat/rhel-$releasever-$basearch + enabled=1 + gpgcheck=1 + gpg_keys: + - https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG + + rabbitmq: + data: | + [rabbitmq-server] + name=rabbitmq-rpm + baseurl=https://packagecloud.io/rabbitmq/rabbitmq-server/el/7/$basearch + gpgcheck=1 + repo_gpgcheck=1 + sslcacert=/etc/pki/tls/certs/ca-bundle.crt + enabled=1 + gpg_keys: + - https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey diff --git a/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/ubuntu-20.04.yml b/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/ubuntu-20.04.yml new file mode 100644 index 0000000000..aaa42da37d --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/repositories/x86_64/ubuntu-20.04.yml @@ -0,0 +1,44 @@ +--- +repositories: + elastic-6.x: + content: 'deb https://artifacts.elastic.co/packages/oss-6.x/apt stable main' + key: 'https://artifacts.elastic.co/GPG-KEY-elasticsearch' + + kubernetes: + content: 'deb http://apt.kubernetes.io/ kubernetes-xenial main' + key: 'https://packages.cloud.google.com/apt/doc/apt-key.gpg' + + erlang-23.x: + content: 'deb https://packages.erlang-solutions.com/ubuntu focal contrib' + key: 'https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc' + + rabbitmq: + content: 'deb https://packagecloud.io/rabbitmq/rabbitmq-server/ubuntu bionic main' + key: 'https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey' + + docker-ce: + content: 'deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable' + key: 'https://download.docker.com/linux/ubuntu/gpg' + + elastic-7.x: + content: 'deb https://artifacts.elastic.co/packages/oss-7.x/apt stable main' + key: 'https://artifacts.elastic.co/GPG-KEY-elasticsearch' + + opendistroforelasticsearch: + content: 'deb https://d3g5vo6xdbdb9a.cloudfront.net/apt stable main' + key: 'https://d3g5vo6xdbdb9a.cloudfront.net/GPG-KEY-opendistroforelasticsearch' + + # postgresql + pgdg: + content: 'deb http://apt.postgresql.org/pub/repos/apt focal-pgdg main' + key: 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' + + # postgresql historical packages from apt.postgresql.org + pgdg-archive: + content: 'deb http://apt-archive.postgresql.org/pub/repos/apt focal-pgdg-archive main' + key: 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' + + # Provides repmgr + 2ndquadrant-dl-default-release: + content: 'deb https://dl.2ndquadrant.com/default/release/apt focal-2ndquadrant main' + key: 'https://dl.2ndquadrant.com/gpg-key.asc' diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/grafana-dashboards.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/grafana-dashboards.yml new file mode 100644 index 0000000000..853cc0ff56 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/grafana-dashboards.yml @@ -0,0 +1,66 @@ +--- +grafana-dashboards: + # Kubernetes Cluster + grafana_dashboard_7249: + url: 'https://grafana.com/api/dashboards/7249/revisions/1/download' + sha256: 41cc2794b1cc9fc537baf045fee12d086d23632b4c8b2e88985274bb9862e731 + + # Kubernetes cluster monitoring (via Prometheus) + grafana_dashboard_315: + url: 'https://grafana.com/api/dashboards/315/revisions/3/download' + sha256: ee46dd6e68a9950aa78e8c88ae5e565c8ebde6cbdbe08972a70f06c5486618fb + + # Node Exporter for Prometheus + grafana_dashboard_11074: + url: 'https://grafana.com/api/dashboards/11074/revisions/9/download' + sha256: 151b23305da46eab84930e99175e1c07e375af73dbbb4b8f501ca25f5ac62785 + + # Node Exporter Server Metrics + grafana_dashboard_405: + url: 'https://grafana.com/api/dashboards/405/revisions/8/download' + sha256: 97675027cbd5b7241e93a2b598654c4b466bc909eeb6358ba123d500094d913c + + # Postgres Overview + grafana_dashboard_455: + url: 'https://grafana.com/api/dashboards/455/revisions/2/download' + sha256: c66b91ab8d258b0dc005d3ee4dac3a5634a627c79cc8053875f76ab1e369a362 + + # PostgreSQL Database + grafana_dashboard_9628: + url: 'https://grafana.com/api/dashboards/9628/revisions/7/download' + sha256: c64cc38ad9ebd7af09551ee83e669a38f62a76e7c80929af5668a5852732b376 + + # RabbitMQ Monitoring + grafana_dashboard_4279: + url: 'https://grafana.com/api/dashboards/4279/revisions/4/download' + sha256: 74d47be868da52c145240ab5586d91ace9e9218ca775af988f9d60e501907a25 + + # Node Exporter Full + grafana_dashboard_1860: + url: 'https://grafana.com/api/dashboards/1860/revisions/23/download' + sha256: 225faab8bf35c1723af14d4c069882ccb92b455d1941c6b1cf3d95a1576c13d7 + + # Kafka Exporter Overview + grafana_dashboard_7589: + url: 'https://grafana.com/api/dashboards/7589/revisions/5/download' + sha256: cf020e14465626360418e8b5746818c80d77c0301422f3060879fddc099c2151 + + # HaProxy backend (or frontend/servers) + grafana_dashboard_789: + url: 'https://grafana.com/api/dashboards/789/revisions/1/download' + sha256: 6a9b4bdc386062287af4f7d56781103a2e45a51813596a65f03c1ae1d4d3e919 + + # Docker and Host Monitoring w/ Prometheus + grafana_dashboard_179: + url: 'https://grafana.com/api/dashboards/179/revisions/7/download' + sha256: 8d67350ff74e715fb1463f2406f24a73377357d90344f8200dad9d1b2a8133c2 + + # Kubernetes pod and cluster monitoring (via Prometheus) + grafana_dashboard_6663: + url: 'https://grafana.com/api/dashboards/6663/revisions/1/download' + sha256: d544d88069e1b793ff3d8f6970df641ad9a66217e69b629621e1ecbb2f06aa05 + + # RabbitMQ cluster monitoring (via Prometheus) + grafana_dashboard_10991: + url: 'https://grafana.com/api/dashboards/10991/revisions/11/download' + sha256: 66340fa3256d432287cba75ab5177eb058c77afa7d521a75d58099f95b1bff50 diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/cranes.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/cranes.yml new file mode 100644 index 0000000000..f249ac6581 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/cranes.yml @@ -0,0 +1,4 @@ +--- +cranes: + 'https://github.com/google/go-containerregistry/releases/download/v0.4.1/go-containerregistry_Linux_x86_64.tar.gz': + sha256: def1364f9483d133ccc6b1c4876f59a653d024c8866d96ecda026561d38c349b diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml new file mode 100644 index 0000000000..27734b80ec --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/files.yml @@ -0,0 +1,40 @@ +--- +files: + # --- Exporters --- + 'https://github.com/danielqsj/kafka_exporter/releases/download/v1.4.0/kafka_exporter-1.4.0.linux-amd64.tar.gz': + sha256: ffda682e82daede726da8719257a088f8e23dcaa4e2ac8b2b2748a129aea85f0 + + 'https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar': + sha256: 0ddc6834f854c03d5795305193c1d33132a24fbd406b4b52828602f5bc30777e + + 'https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz': + sha256: 68f3802c2dd3980667e4ba65ea2e1fb03f4a4ba026cca375f15a0390ff850949 + + 'https://github.com/prometheus-community/postgres_exporter/releases/download/v0.10.0/postgres_exporter-0.10.0.linux-amd64.tar.gz': + sha256: 1d1a008c5e29673b404a9ce119b7516fa59974aeda2f47d4a0446d102abce8a1 + + # --- Misc --- + 'https://archive.apache.org/dist/kafka/2.6.0/kafka_2.12-2.6.0.tgz': + sha256: 086bf9ca1fcbe2abe5c62e73d6f172adb1ee5a5b42732e153fb4d4ec82dab69f + + 'https://archive.apache.org/dist/zookeeper/zookeeper-3.5.8/apache-zookeeper-3.5.8-bin.tar.gz': + sha256: c35ed6786d59b73920243f1a324d24c2ddfafb379041d7a350cc9a341c52caf3 + + 'https://github.com/prometheus/alertmanager/releases/download/v0.23.0/alertmanager-0.23.0.linux-amd64.tar.gz': + sha256: 77793c4d9bb92be98f7525f8bc50cb8adb8c5de2e944d5500e90ab13918771fc + + 'https://github.com/prometheus/prometheus/releases/download/v2.31.1/prometheus-2.31.1.linux-amd64.tar.gz': + sha256: 7852dc11cfaa039577c1804fe6f082a07c5eb06be50babcffe29214aedf318b3 + + 'https://get.helm.sh/helm-v3.2.0-linux-amd64.tar.gz': + sha256: 4c3fd562e64005786ac8f18e7334054a24da34ec04bbd769c206b03b8ed6e457 + + 'https://archive.apache.org/dist/logging/log4j/2.17.1/apache-log4j-2.17.1-bin.tar.gz': + sha256: b876c20c9d318d77a39c0c2e095897b2bb1cd100c7859643f8c7c8b0fc6d5961 + + # --- Helm charts --- + 'https://charts.bitnami.com/bitnami/node-exporter-2.3.17.tgz': + sha256: ec586fabb775a4f05510386899cf348391523c89ff5a1d4097b0592e675ade7f + + 'https://helm.elastic.co/helm/filebeat/filebeat-7.9.2.tgz': + sha256: 5140b4c4473ca33a0af4c3f70545dcc89735c0a179d974ebc150f1f28ac229ab diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/images.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/images.yml new file mode 100644 index 0000000000..7a7756cd61 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/images.yml @@ -0,0 +1,158 @@ +--- +images: + 'haproxy:2.2.2-alpine': + sha1: dff8993b065b7f7846adb553548bcdcfcd1b6e8e + + 'kubernetesui/dashboard:v2.3.1': + sha1: 8c8a4ac7a643f9c5dd9e5d22876c434187312db8 + + 'kubernetesui/metrics-scraper:v1.0.7': + sha1: 5a0052e2afd3eef3ae638be21938b29b1d608ebe + + 'registry:2': + sha1: 6afd2766e6880c62cb80d1ea85dbcc20d6ef16a4 + + # applications + 'bitnami/pgpool:4.2.4': + sha1: 66741f3cf4a508bd1f80e2965b0086a4c0fc3580 + + 'bitnami/pgbouncer:1.16.0': + sha1: f2e37eecbf9aed44d5566f06dcc101c1ba9edff9 + + 'epiphanyplatform/keycloak:14.0.0': + sha1: b59d75a967cedd3a4cf5867eced2fb5dff52f60e + + 'rabbitmq:3.8.9': + sha1: c64408bf5bb522f47d5323652dd5e60560dcb5bc + + # K8s + # v1.18.6 + 'k8s.gcr.io/kube-apiserver:v1.18.6': + sha1: 164968226f4617abaa31e6108ed9034a1e302f4f + + 'k8s.gcr.io/kube-controller-manager:v1.18.6': + sha1: ebea3fecab9e5693d31438fa37dc4d02c6914d67 + + 'k8s.gcr.io/kube-scheduler:v1.18.6': + sha1: 183d29c4fdcfda7478d08240934fdb6845e2e3ec + + 'k8s.gcr.io/kube-proxy:v1.18.6': + sha1: 62da886e36efff0c03a16e19c1442a1c3040fbf1 + + 'k8s.gcr.io/coredns:1.6.7': + sha1: 76615ffabb22fd4fb3d562cb6ebcd243f8826e48 + + 'k8s.gcr.io/etcd:3.4.3-0': + sha1: 6ee82ddb1bbc7f1831c42046612b8bcfbb171b45 + + 'quay.io/coreos/flannel:v0.12.0-amd64': + sha1: 3516522e779373983992095e61eb6615edd50d1f + + 'quay.io/coreos/flannel:v0.12.0': + sha1: 2cb6ce8f1361886225526767c4a0422c039453c8 + + 'calico/cni:v3.15.0': + sha1: aa59f624c223bc398a42c7ba9e628e8143718e58 + + 'calico/kube-controllers:v3.15.0': + sha1: f8921f5d67ee7db1c619aa9fdb74114569684ceb + + 'calico/node:v3.15.0': + sha1: b15308e1aa8b9c56253c142e4361e47125bb4ac5 + + 'calico/pod2daemon-flexvol:v3.15.0': + sha1: dd1a6525bde05937a28e3d9176b826162ae489af + + # v1.19.15 + 'k8s.gcr.io/kube-apiserver:v1.19.15': + sha1: e01c8d778e4e693a0ea09cdbbe041a65cf070c6f + + 'k8s.gcr.io/kube-controller-manager:v1.19.15': + sha1: d1f5cc6a861b2259861fb78b2b83e9a07b788e31 + + 'k8s.gcr.io/kube-scheduler:v1.19.15': + sha1: b07fdd17205bc071ab108851d245689642244f92 + + 'k8s.gcr.io/kube-proxy:v1.19.15': + sha1: 9e2e7a8d40840bbade3a1f2dc743b9226491b6c2 + + # v1.20.12 + 'k8s.gcr.io/kube-apiserver:v1.20.12': + sha1: bbb037b9452db326aaf09988cee080940f3c418a + + 'k8s.gcr.io/kube-controller-manager:v1.20.12': + sha1: 4a902578a0c548edec93e0f4afea8b601fa54b93 + + 'k8s.gcr.io/kube-scheduler:v1.20.12': + sha1: ed5ceb21d0f5bc350db69550fb7feac7a6f1e50b + + 'k8s.gcr.io/kube-proxy:v1.20.12': + sha1: f937aba709f52be88360361230840e7bca756b2e + + 'k8s.gcr.io/coredns:1.7.0': + sha1: 5aa15f4cb942885879955b98a0a824833d9f66eb + + 'k8s.gcr.io/pause:3.2': + sha1: ae4799e1a1ec9cd0dda8ab643b6e50c9fe505fef + + # v1.21.7 + 'k8s.gcr.io/kube-apiserver:v1.21.7': + sha1: edb26859b3485808716982deccd90ca420828649 + + 'k8s.gcr.io/kube-controller-manager:v1.21.7': + sha1: 9abf1841da5b113b377c1471880198259ec2d246 + + 'k8s.gcr.io/kube-scheduler:v1.21.7': + sha1: 996d25351afc96a10e9008c04418db07a99c76b7 + + 'k8s.gcr.io/kube-proxy:v1.21.7': + sha1: 450af22a892ffef276d4d58332b7817a1dde34e7 + + 'k8s.gcr.io/coredns/coredns:v1.8.0': + sha1: 03114a98137e7cc2dcf4983b919e6b93ac8d1189 + + 'k8s.gcr.io/etcd:3.4.13-0': + sha1: d37a2efafcc4aa86e6dc497e87e80b5d7f326115 + + 'k8s.gcr.io/pause:3.4.1': + sha1: 7f57ae28d733f99c0aab8f4e27d4b0c034cd0c04 + + # v1.22.4 + 'k8s.gcr.io/kube-apiserver:v1.22.4': + sha1: 2bf4ddb2e1f1530cf55ebaf8e8d0c56ad378b9ec + + 'k8s.gcr.io/kube-controller-manager:v1.22.4': + sha1: 241924fa3dc4671fe6644402f7beb60028c02c71 + + 'k8s.gcr.io/kube-scheduler:v1.22.4': + sha1: 373e2939072b03cf5b1e115820b7fb6b749b0ebb + + 'k8s.gcr.io/kube-proxy:v1.22.4': + sha1: fecfb88509a430c29267a99b83f60f4a7c333583 + + 'k8s.gcr.io/coredns/coredns:v1.8.4': + sha1: 69c8e14ac3941fd5551ff22180be5f4ea2742d7f + + 'k8s.gcr.io/etcd:3.5.0-0': + sha1: 9d9ee2df54a201dcc9c7a10ea763b9a5dce875f1 + + 'k8s.gcr.io/pause:3.5': + sha1: bf3e3420df62f093f94c41d2b7a62b874dcbfc28 + + 'quay.io/coreos/flannel:v0.14.0-amd64': + sha1: cff47465996a51de4632b53abf1fca873f147027 + + 'quay.io/coreos/flannel:v0.14.0': + sha1: a487a36f7b31677e50e74b96b944f27fbce5ac13 + + 'calico/cni:v3.20.3': + sha1: 95e4cf79e92715b13e500a0efcfdb65590de1e04 + + 'calico/kube-controllers:v3.20.3': + sha1: 5769bae60830abcb3c5d97eb86b8f9938a587b2d + + 'calico/node:v3.20.3': + sha1: cc3c8727ad30b4850e8d0042681342a4f2351eff + + 'calico/pod2daemon-flexvol:v3.20.3': + sha1: 97c1b7ac90aa5a0f5c52e7f137549e598ff80f3e diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat-7/files.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat-7/files.yml new file mode 100644 index 0000000000..bacfe05f06 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat-7/files.yml @@ -0,0 +1,8 @@ +--- +files: + # Github repository for erlang rpm is used since packagecloud repository is limited to a certain number of versions and erlang package from erlang-solutions repository is much more complex and bigger + 'https://github.com/rabbitmq/erlang-rpm/releases/download/v23.1.5/erlang-23.1.5-1.el7.x86_64.rpm': + sha256: c336ef007a027aebb6975cadcb134e541fb6f34b17699f067186d180246d1a51 + # Grafana package is not downloaded from repository since it was not reliable (issue #2449) + 'https://dl.grafana.com/oss/release/grafana-8.3.2-1.x86_64.rpm': + sha256: 5ad3cc7137bb8599f21eca40d08d671f3c508d856aea01b5a29576fda7f3c827 diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat-7/packages.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat-7/packages.yml new file mode 100644 index 0000000000..9f6ddb80e9 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/redhat-7/packages.yml @@ -0,0 +1,144 @@ +--- +prereq-packages: + # packages-repo-prereqs + - 'apr' # for httpd + - 'apr-util' # for httpd + - 'createrepo' + - 'deltarpm' # for createrepo + - 'httpd' + - 'httpd-tools' # for httpd + - 'mailcap' # for httpd + - 'mod_ssl' # for httpd + - 'python-chardet' # for createrepo + - 'python-deltarpm' # for createrepo + - 'python-kitchen' # for createrepo + - 'redhat-logos' # for httpd + - 'yum-utils' + +packages: + - 'audit' # for docker-ce + - 'bash-completion' + - 'ca-certificates' + - 'cifs-utils' + - 'conntrack-tools' # for kubelet + - 'containerd.io-1.4.12' + - 'container-selinux' + - 'cri-tools-1.13.0' + - 'curl' + - 'dejavu-sans-fonts' # for grafana + - 'docker-ce-20.10.8' + - 'docker-ce-cli-20.10.8' + - 'docker-ce-rootless-extras-20.10.8' + - 'ebtables' + - 'elasticsearch-curator-5.8.3' + - 'elasticsearch-oss-7.10.2' # for opendistroforelasticsearch & logging roles + - 'ethtool' + - 'filebeat-7.9.2' + - 'firewalld' + - 'fontconfig' # for grafana + - 'fping' + - 'fuse-overlayfs' # for docker-ce-rootless-extras + - 'gnutls' # for cifs-utils + - 'gssproxy' # for nfs-utils + - 'htop' + - 'iftop' + - 'ipset' # for firewalld + - 'java-1.8.0-openjdk-headless' + - 'javapackages-tools' # for java-1.8.0-openjdk-headless + - 'jq' + - 'libini_config' # for nfs-utils + - 'libselinux-python' + - 'libsemanage-python' + - 'libX11' # for grafana + - 'libxcb' # for grafana + - 'libXcursor' # for grafana + - 'libXt' # for grafana + - 'logrotate' + - 'net-tools' + - 'nfs-utils' + - 'nmap-ncat' + + # Open Distro for Elasticsearch plugins are installed individually to not download them twice in different versions (as dependencies of opendistroforelasticsearch package) + - 'opendistro-alerting-1.13.1.*' + - 'opendistro-index-management-1.13.1.*' + - 'opendistro-job-scheduler-1.13.0.*' + - 'opendistro-performance-analyzer-1.13.0.*' + - 'opendistro-security-1.13.1.*' + - 'opendistro-sql-1.13.0.*' + - 'opendistroforelasticsearch-kibana-1.13.1' # kibana has shorter version + - 'openssl' + - 'perl' # for vim + - 'perl-Getopt-Long' # for vim + - 'perl-libs' # for vim + - 'perl-Pod-Perldoc' # for vim + - 'perl-Pod-Simple' # for vim + - 'perl-Pod-Usage' # for vim + - 'pgaudit15_13-1.5.0' + - 'policycoreutils-python' # for container-selinux + - 'pyldb' # for cifs-utils + - 'python-cffi' # for python2-cryptography + - 'python-firewall' # for firewalld + - 'python-kitchen' # for yum-utils + - 'python-lxml' # for java-1.8.0-openjdk-headless + - 'python-psycopg2' + - 'python-pycparser' # for python2-cryptography + - 'python-setuptools' + - 'python-slip-dbus' # for firewalld + - 'python2-cryptography' # for Ansible (certificate modules) + - 'python3-3.6.8' + - 'quota' # for nfs-utils + - 'rabbitmq-server-3.8.9' + - 'rh-haproxy18' + - 'rh-haproxy18-haproxy-syspaths' + - 'postgresql13-server' + - 'repmgr10-5.2.1' # used to upgrade repmgr first + - 'repmgr13-5.2.1' + - 'samba-client' + - 'samba-client-libs' # for samba-client + - 'samba-common' + - 'samba-libs' # for cifs-utils + - 'sysstat' + - 'tar' + - 'telnet' + - 'tmux' + - 'urw-base35-fonts' # for grafana + - 'unzip' + - 'vim-common' # for vim + - 'vim-enhanced' + - 'wget' + - 'xorg-x11-font-utils' # for grafana + - 'xorg-x11-server-utils' # for grafana + - 'yum-plugin-versionlock' + - 'yum-utils' + + # to make remote-to-remote "synchronize" work in ansible + - 'rsync' + + # K8s v1.18.6 (Epiphany >= v0.7.1) + - 'kubeadm-1.18.6' + - 'kubectl-1.18.6' + - 'kubelet-1.18.6' + + # K8s v1.19.15 (Epiphany >= v1.3 transitional version) + - 'kubeadm-1.19.15' + - 'kubectl-1.19.15' + - 'kubelet-1.19.15' + + # K8s v1.20.12 + - 'kubeadm-1.20.12' + - 'kubectl-1.20.12' + - 'kubelet-1.20.12' + + # K8s v1.21.7 (Epiphany >= v1.3, transitional version) + - 'kubeadm-1.21.7' + - 'kubectl-1.21.7' + - 'kubelet-1.21.7' + + # K8s v1.22.4 + - 'kubeadm-1.22.4' + - 'kubectl-1.22.4' + - 'kubelet-1.22.4' + + # Kubernetes Generic + - 'kubernetes-cni-0.8.6-0' # since K8s v1.18.6 + - 'kubernetes-cni-0.8.7-0' # since K8s v1.19.15 diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/ubuntu-20.04/files.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/ubuntu-20.04/files.yml new file mode 100644 index 0000000000..1eb03346ee --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/ubuntu-20.04/files.yml @@ -0,0 +1,8 @@ +--- +files: + # Switched from APT repo because there was only one (the latest) version available (issue #2262) + 'https://packages.elastic.co/curator/5/debian9/pool/main/e/elasticsearch-curator/elasticsearch-curator_5.8.3_amd64.deb': + sha256: 575a41184899678d9769a8ea120134ec329c41967c94586c1aa6439aa68d4829 + # Grafana package is not downloaded from repository since it was not reliable (issue #2449) + 'https://dl.grafana.com/oss/release/grafana_8.3.2_amd64.deb': + sha256: 3f5ecf5726223314aa3147a24c732cc9ccede86b7d703d4835a6fc69d0fffff8 diff --git a/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/ubuntu-20.04/packages.yml b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/ubuntu-20.04/packages.yml new file mode 100644 index 0000000000..7c59894d4f --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/requirements/x86_64/ubuntu-20.04/packages.yml @@ -0,0 +1,176 @@ +--- +packages: + - 'adduser' + - 'apt-transport-https' + - 'auditd' + - 'bash-completion' + - 'ca-certificates' + - 'cifs-utils' + - 'containerd.io=1.4.12*' + - 'cri-tools=1.13.0*' + - 'curl' + - 'docker-ce=5:20.10.8*' + - 'docker-ce-cli=5:20.10.8*' + - 'docker-ce-rootless-extras=5:20.10.8*' + - 'ebtables' + # for opendistroforelasticsearch & logging roles + - 'elasticsearch-oss=7.10.2*' + + # Erlang packages must be compatible with RabbitMQ version. + # Metapackages such as erlang and erlang-nox must only be used + # with apt version pinning. They do not pin their dependency versions. + # List based on: https://www.rabbitmq.com/install-debian.html#installing-erlang-package + - 'erlang-asn1=1:23.1.5*' + - 'erlang-base=1:23.1.5*' + - 'erlang-crypto=1:23.1.5*' + - 'erlang-eldap=1:23.1.5*' + - 'erlang-ftp=1:23.1.5*' + - 'erlang-inets=1:23.1.5*' + - 'erlang-mnesia=1:23.1.5*' + - 'erlang-os-mon=1:23.1.5*' + - 'erlang-parsetools=1:23.1.5*' + - 'erlang-public-key=1:23.1.5*' + - 'erlang-runtime-tools=1:23.1.5*' + - 'erlang-snmp=1:23.1.5*' + - 'erlang-ssl=1:23.1.5*' + - 'erlang-syntax-tools=1:23.1.5*' + - 'erlang-tftp=1:23.1.5*' + - 'erlang-tools=1:23.1.5*' + - 'erlang-xmerl=1:23.1.5*' + - 'ethtool' + - 'filebeat=7.9.2*' + - 'firewalld' + - 'fping' + - 'gnupg2' + - 'htop' + - 'iftop' + - 'jq' + + # for dpkg-scanpackages + - 'libdpkg-perl' + - 'libfontconfig1' + - 'logrotate' + - 'netcat' + - 'net-tools' + - 'nfs-common' + + # for nfs-common + - 'libtirpc3' + - 'opendistro-alerting=1.13.1*' + - 'opendistro-index-management=1.13.1*' + - 'opendistro-job-scheduler=1.13.0*' + - 'opendistro-performance-analyzer=1.13.0*' + - 'opendistro-security=1.13.1*' + - 'opendistro-sql=1.13.0*' + - 'opendistroforelasticsearch-kibana=1.13.1*' + - 'openjdk-8-jre-headless' + - 'openssl' + - 'postgresql-13' + - 'python3-psycopg2' + - 'python3-selinux' + - 'rabbitmq-server=3.8.9*' + - 'smbclient' + - 'samba-common' + - 'software-properties-common' + - 'sshpass' + - 'sysstat' + + # for sysstat + - 'libsensors5' + - 'tar' + - 'telnet' + - 'tmux' + - 'unzip' + - 'vim' + + # to make remote-to-remote "synchronize" work in ansible + - 'rsync' + + # for curl, issue #869 + - 'libcurl4' + + # for openjdk-8-jre-headless + - 'libnss3' + - 'libcups2' + - 'libavahi-client3' + - 'libavahi-common3' + - 'libjpeg8' + - 'libxtst6' + - 'fontconfig-config' + + # for rabbit/erlang + - 'libodbc1' + + # for air-gap repo installation + - 'apache2' + - 'apache2-bin' + - 'apache2-utils' + + # for jq + - 'libjq1' + + # for gnupg2 + - 'gnupg' + - 'gpg' + - 'gpg-agent' + - 'samba-libs' + - 'libsmbclient' + + # postgres related packages + # if version is not specified, it's not related to postgres version and the latest is used + - 'pgdg-keyring' + - 'postgresql-13-pgaudit=1.5.0*' + - 'postgresql-10-repmgr=5.2.1*' + - 'postgresql-13-repmgr=5.2.1*' + - 'postgresql-client-13' + - 'postgresql-client-common' + - 'postgresql-common' + - 'repmgr-common=5.2.1*' + + # for firewalld + - 'ipset' + - 'python3-decorator' + - 'python3-slip' + - 'python3-slip-dbus' + + # for ansible module postgresql_query in role postgres-exporter + - 'libpq5' + - 'python3-jmespath' + + # for Ansible (certificate modules) + - 'python3-cryptography' + + # for python3-cryptography + - 'python3-cffi-backend' + + # K8s v1.18.6 (Epiphany >= v0.7.1) + - 'kubeadm=1.18.6*' + - 'kubectl=1.18.6*' + - 'kubelet=1.18.6*' + + # K8s v1.19.15 (Epiphany >= v1.3, transitional version) + - 'kubeadm=1.19.15*' + - 'kubectl=1.19.15*' + - 'kubelet=1.19.15*' + + # K8s v1.20.12 (Epiphany >= v1.3, transitional version) + - 'kubeadm=1.20.12*' + - 'kubectl=1.20.12*' + - 'kubelet=1.20.12*' + + # K8s v1.21.7 (Epiphany >= v1.3, transitional version) + - 'kubeadm=1.21.7*' + - 'kubectl=1.21.7*' + - 'kubelet=1.21.7*' + + # K8s v1.22.4 + - 'kubeadm=1.22.4*' + - 'kubectl=1.22.4*' + - 'kubelet=1.22.4*' + + # Kubernetes Generic + # kubernetes-cni-0.8.6 since K8s v1.18.6 + - 'kubernetes-cni=0.8.6-00*' + + # kubernetes-cni-0.8.7 since K8s v1.19.15 + - 'kubernetes-cni=0.8.7-00*' diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/apt.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/apt.py new file mode 100644 index 0000000000..746eb27e13 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/apt.py @@ -0,0 +1,64 @@ +from typing import List + +from src.command.command import Command + + +class Apt(Command): + """ + Interface for `apt` tool. + """ + + def __init__(self, retries: int): + super().__init__('apt', retries) + + def update(self): + """ + Interface for `apt-get update` + """ + self.run(['update']) + + def download(self, package: str): + """ + Interface for `apt download package` + + :param package: package to be downloaded + """ + self.run(['download', package]) + + def install(self, package: str, assume_yes: bool = True): + """ + Interface for `apt install package` + + :param package: package to be installed + :param assume_yes: if set to True `-y` flag will be added + """ + no_ask: str = '-y' if assume_yes else '' + self.run(['install', no_ask, package]) + + def remove(self, package: str, assume_yes: bool = True): + """ + Interface for `apt remove package` + + :param package: package to be removed + :param assume_yes: if set to True `-y` flag will be added + """ + no_ask: str = '-y' if assume_yes else '' + self.run(['remove', no_ask, package]) + + def list_installed_packages(self) -> List[str]: + """ + List all installed packages on the current OS. + + :returns: packages installed on the machine + """ + args: List[str] = ['list', + '--installed'] + + raw_output = self.run(args).stdout + + packages: List[str] = [] + for line in raw_output.split('\n'): + if line: + packages.append(line.split('/')[0]) + + return packages diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/apt_cache.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/apt_cache.py new file mode 100644 index 0000000000..3db702fb23 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/apt_cache.py @@ -0,0 +1,117 @@ +from typing import Dict, List + +from src.command.command import Command +from src.error import CriticalError + + +class AptCache(Command): + """ + Interface for `apt-cache` tool. + """ + def __init__(self, retries: int): + super().__init__('apt-cache', retries) + + def __get_package_candidate_version(self, package: str, version: str = '') -> str: + """ + Use cache to find out `package` candidate version number. + + :param package: for which candidate version to return + :param version: optional argument to use specific `package`'s version + :raises: + :class:`CriticalError`: can be raised when package candidate was not found + :returns: candidate version number + """ + policy_args: List[str] = ['policy', package] + policy_output = self.run(policy_args).stdout + + output_lines: List[str] = policy_output.split('\n') + if version: # confirm that the wanted version is available + for line in output_lines: + if version in line: + return version + else: + for line in output_lines: # get candidate version + if 'Candidate' in line: + return line.split(': ')[-1] + + raise CriticalError(f'Candidate for {package} not found.') + + def get_package_info(self, package: str, version: str = '') -> Dict[str, str]: + """ + Get cached data for `package` and return it in a formatted form. + + :param package: for which data to return + :param version: optional argument to use specific `package`'s version + :returns: structured cached `package` info + """ + show_args: List[str] = ['show', package] + show_output = self.run(show_args).stdout + + version_info: str = '' + candidate_version: str = self.__get_package_candidate_version(package, version) + for ver_info in show_output.split('\n\n'): + if candidate_version in ver_info: + version_info = ver_info + break + + info: Dict[str, str] = {} + for line in version_info.split('\n'): + if line: + try: + key, value = line.split(': ') + info[key] = value + except ValueError: + pass + + return info + + def get_package_dependencies(self, package: str) -> List[str]: + """ + Interface for `apt-cache depends` + + :param package: for which dependencies will be gathered + :returns: all required dependencies for `package` + """ + args: List[str] = ['depends', + '--no-recommends', + '--no-suggests', + '--no-conflicts', + '--no-breaks', + '--no-replaces', + '--no-enhances', + '--no-pre-depends', + package] + + raw_output = self.run(args).stdout + + virt_pkg: bool = False # True - virtual package detected, False - otherwise + virt_pkgs: List[str] = [] # cached virtual packages options + deps: List[str] = [] + for dep in raw_output.split('\n'): + if not dep: # skip empty lines + continue + + dep = dep.replace(' ', '') # remove white spaces + + if virt_pkg: + virt_pkgs.append(dep) # cache virtual package option + + if '<' in dep and '>' in dep: # virtual package, more than one dependency to choose + virt_pkg = True + continue + + if 'Depends:' in dep: # new dependency found + virt_pkg = False + + if virt_pkgs: # previous choices cached + # avoid conflicts by choosing only non-cached dependency: + if not any(map(lambda elem: elem in deps, virt_pkgs)): + deps.append(virt_pkgs[0].split('Depends:')[-1]) # pick first from the list + virt_pkgs.clear() + + dep = dep.split('Depends:')[-1] # remove "Depends: + + if not virt_pkg and dep != package: # avoid adding package itself + deps.append(dep) + + return list(set(deps)) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/apt_key.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/apt_key.py new file mode 100644 index 0000000000..bfc079da68 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/apt_key.py @@ -0,0 +1,20 @@ +from pathlib import Path + +from src.command.command import Command + + +class AptKey(Command): + """ + Interface for `apt-key` tool. + """ + + def __init__(self, retries: int): + super().__init__('apt-key', retries) + + def add(self, key: Path): + """ + Interface for `apt-key add` + + :key: key as file to be added + """ + self.run(['add', str(key)]) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/command.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/command.py new file mode 100644 index 0000000000..bfb7eb6b6b --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/command.py @@ -0,0 +1,97 @@ +import logging +import subprocess +from typing import List + +from src.error import CriticalError + + +class Command: + """ + Interface for running subprocesses + """ + + def __init__(self, process_name: str, retries: int, pipe_args: List[str] = None): + self.__proc_name: str = process_name + self.__retries: int = retries + self.__pipe_args: List[str] = pipe_args # used for __or__ + + def name(self) -> str: + return self.__proc_name + + def pipe_args(self) -> List[str]: + return self.__pipe_args or [] + + def run(self, args: List[str], + capture_output: bool = True, + accept_nonzero_returncode: bool = False) -> subprocess.CompletedProcess: + """ + Run subprocess with provided arguments + + :param args: additional args which will be used with __proc_name + :capture_output: save stdout/stderr to completed process object + :raises: :class:`CriticalError`: when number of retries exceeded + :returns: completed process object + """ + process_args = [self.__proc_name] + process_args.extend(args) + + additional_args = {'encoding': 'utf-8'} + if capture_output: + additional_args['stdout'] = subprocess.PIPE + additional_args['stderr'] = subprocess.PIPE + + for count in range(self.__retries): + logging.debug(f'[{count + 1}/{self.__retries}] Running: {self.__proc_name} {" ".join(args)} ') + + process = subprocess.run(process_args, **additional_args) + + if accept_nonzero_returncode: + return process + + if process.returncode == 0: + return process + + logging.warn(process.stderr) + + raise CriticalError('Retries count reached maximum!') + + def __or__(self, command) -> str: + """ + Run two subprocesses by piping output from the first process to the second process. + + :param command: process onto which output from the first process will be passed + :raises: :class:`CriticalError`: when number of retries exceeded + :returns: final stdout + """ + lproc_name = f'{self.__proc_name} {" ".join(self.__pipe_args)}' + rproc_name = f'{command.name()} {" ".join(command.pipe_args())}' + whole_process_name = f'{lproc_name} | {rproc_name}' + + for count in range(self.__retries): + logging.debug(f'[{count + 1}/{self.__retries}] Running: {whole_process_name}') + + lproc = subprocess.Popen([self.__proc_name] + self.__pipe_args, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + rproc = subprocess.Popen([command.name()] + command.pipe_args(), stdin=lproc.stdout, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + lproc.stdout.close() # Allow proc1 to receive a SIGPIPE if proc2 exits. + + output = rproc.communicate()[0].decode() + if rproc.returncode == 0: + return output + + logging.warn(lproc.stderr if not lproc.returncode == 0 else rproc.stderr) + + raise CriticalError('Retries count reached maximum!') + + def _run_and_filter(self, args: List[str]) -> List[str]: + """ + Run subprocess and return list of filtered stdout lines + + :param args: run subprocess with these args + :returns: filtered output lines from the subprocess stdout + """ + raw_output = self.run(args).stdout + + elems: List[str] = [elem for elem in raw_output.split('\n')] + return list(filter(lambda elem: elem != '', elems)) # filter empty lines diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/crane.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/crane.py new file mode 100644 index 0000000000..80b824e10b --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/crane.py @@ -0,0 +1,52 @@ +from os import chmod +from pathlib import Path +from shutil import move +from tempfile import mkstemp +from typing import List + +from src.command.command import Command + + +class Crane(Command): + """ + Interface for Crane + """ + + def __init__(self, retries: int): + super().__init__('crane', retries) + + def pull(self, image_name: str, + destination: Path, + platform: str, + legacy_format: bool = True, + insecure: bool = True): + """ + Download target image file + + :param image_name: address to the image + :param destination: where to store the downloaded image + :param platform: for which platform file will be downloaded + :param legacy_format: use legacy format + :param insecure: allow image references to be fetched without TLS + """ + crane_params: List[str] = ['pull'] + + if insecure: + crane_params.append('--insecure') + + crane_params.append(f'--platform={platform}') + + if legacy_format: + crane_params.append('--format=legacy') + + crane_params.append(image_name) + + tmpfile = mkstemp() + + crane_params.append(tmpfile[1]) + + self.run(crane_params) + + chmod(tmpfile[1], 0o0644) + + move(tmpfile[1], str(destination)) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/pip.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/pip.py new file mode 100644 index 0000000000..3ccfddae0a --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/pip.py @@ -0,0 +1,61 @@ +from typing import List + +from src.command.command import Command + + +class Pip(Command): + """ + Interface for `pip` tool. + """ + + def __init__(self, retries: int): + super().__init__('pip3', retries) + + def install(self, package: str, + version: str = '', + user: bool = False) -> bool: + """ + Interface for `pip install` + + :param package: to install + :param version: in which version `package` to install + :param user: install in user's directory + :returns: True - package had to be installed, False - package already installed + """ + args: List[str] = ['install'] + + if version: + package = f'{package}{version}' + + args.append(package) + + if user: + args.append('--user') + + output = self.run(args).stdout + + if f'Requirement already satisfied: {package}' in output: + return False + + return True + + def uninstall(self, package: str, + version: str = '', + ensure_yes: bool = True): + """ + Interface for `pip uninstall` + + :param package: to uninstall + :param version: in which version `package` to uninstall + """ + args: List[str] = ['uninstall'] + + if version: + package = f'{package}{version}' + + if ensure_yes: + args.append('-y') + + args.append(package) + + self.run(args) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/repoquery.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/repoquery.py new file mode 100644 index 0000000000..ed7c65b28c --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/repoquery.py @@ -0,0 +1,97 @@ +from typing import Callable, List + +from src.command.command import Command +from src.error import CriticalError, PackageNotfound + + +class Repoquery(Command): + """ + Interface for `repoquery` + """ + + def __init__(self, retries: int): + super().__init__('repoquery', retries) + + def __query(self, package: str, + queryformat: str, + arch: str, + requires: bool, + resolve: bool, + output_handler: Callable) -> List[str]: + """ + Run generic query using `repoquery` tool. + + :param package: data will be returned for this `package` + :param queryformat: specify custom query output format + :param arch: limit query output to this architecture + :param requires: list groups required by group + :param resolve: resolve dependencies for `package` + :param output_handler: different queries produce different outputs, use specific output handler + :raises: + :class:`CriticalError`: can be raised on exceeding retries or when error occurred + :class:`PackageNotfound`: when query did not return any package info + :returns: query result + """ + args: List[str] = [] + + if requires: + args.append('--requires') + + if resolve: + args.append('--resolve') + + args.extend(['--queryformat', queryformat]) + args.append(f'--archlist={arch},noarch') + args.append(package) + + output = self.run(args).stdout + # yumdownloader doesn't set error code if repoquery returns empty output + output_handler(output) + + packages: List[str] = [] + for line in output.split('\n'): + if line: + packages.append(line) + + return packages + + def query(self, package: str, queryformat: str, arch: str) -> List[str]: + """ + Generic query to yum database. + + :param package: data will be returned for this `package` + :param queryformat: specify custom query output format + :param arch: limit query output to this architecture + :raises: + :class:`CriticalError`: can be raised on exceeding retries or when error occurred + :class:`PackageNotfound`: when query did not return any package info + :returns: query result + """ + + def output_handler(output: str): + """ In addition to errors, handle missing packages """ + if not output: + raise PackageNotfound(f'repoquery failed for package `{package}`, reason: package not found') + elif 'error' in output: + raise CriticalError(f'repoquery failed for package `{package}`, reason: `{output}`') + + return self.__query(package, queryformat, arch, False, False, output_handler) + + def get_dependencies(self, package: str, queryformat: str, arch: str) -> List[str]: + """ + Get all dependencies for `package`. + + :param package: data will be returned for this `package` + :param queryformat: specify custom query output format + :param arch: limit query output to this architecture + :raises: + :class:`CriticalError`: can be raised on exceeding retries or when error occurred + :returns: query result + """ + + def output_handler(output: str): + """ Handle errors """ + if 'error' in output: + raise CriticalError(f'repoquery failed for package `{package}`, reason: `{output}`') + + return self.__query(package, queryformat, arch, True, True, output_handler) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/rpm.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/rpm.py new file mode 100644 index 0000000000..3e9a6a8123 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/rpm.py @@ -0,0 +1,48 @@ +from typing import List + +from src.command.command import Command + + +class Rpm(Command): + """ + Interface for `rpm` + """ + + def __init__(self, retries: int): + super().__init__('rpm', retries) + + def is_package_installed(self, package: str) -> bool: + """ + Check if `package` is installed on the OS. + + :param package: to be checked if installed + :returns: True - package installed, False - otherwise + """ + args: List[str] = ['--query', + '--quiet', + f'{package}'] + + if self.run(args, accept_nonzero_returncode=True).returncode == 0: + return True + + return False + + def import_key(self, key: str): + """ + Import pgp key by the `rpm` + + :key: key to be added + """ + self.run(['--import', key]) + + def get_package_capabilities(self, filename: str) -> List[str]: + args: List[str] = ['-q', + '--provides', + filename] + return self._run_and_filter(args) + + def which_packages_provides_file(self, filename: str) -> List[str]: + args: List[str] = ['-q', + '--whatprovides', + filename] + return self._run_and_filter(args) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/tar.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/tar.py new file mode 100644 index 0000000000..5e4be35a67 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/tar.py @@ -0,0 +1,99 @@ +from pathlib import Path +from typing import List + +from src.command.command import Command + + +class Tar(Command): + """ + Interface for `tar` + """ + + def __init__(self): + super().__init__('tar', 1) + + def pack(self, filename: Path, + target: str, + directory: Path = None, + verbose: bool = False, + compress: bool = False, + verify: bool = False): + """ + Create a tar archive + + :param filename: name for the archive to be created + :param target: files to be archived + :param directory: change directory before doing any actions + :param verbose: use verbose mode + :param uncompress: use zlib compression + :param verify: check file integrity + """ + short_flags: List[str] = ['-c'] # -czvf flags + tar_params: List[str] = [str(filename)] # all the other params + + if compress: + short_flags.append('z') + + if verbose: + short_flags.append('v') + + short_flags.append('f') + + if verify: + tar_params.append('--verify') + + if directory is not None: + tar_params.extend(['--directory', str(directory)]) + + if target: + tar_params.append(target) + + self.run([''.join(short_flags)] + tar_params) + + def unpack(self, filename: Path, + target: str = '', + absolute_names: bool = False, + directory: Path = None, + overwrite: bool = True, + strip_components: int = 0, + uncompress: bool = True, + verbose: bool = False): + """ + Unpack a tar archive + + :param filename: file to be extracted + :param target: name for the output file + :param absolute_names: use abs path names + :param directory: change directory before doing any actions + :param overwrite: overwrite existing files when extracting + :param strip_components: strip leading components from file names on extraction + :param uncompress: use zlib compression + :param verbose: use verbose mode + """ + short_flags: List[str] = ['-x'] # -xzvf flags + tar_params: List[str] = [str(filename)] # all the other params + + if uncompress: + short_flags.append('z') + + if verbose: + short_flags.append('v') + + short_flags.append('f') + + if absolute_names: + tar_params.append('--absolute-names') + + if directory is not None: + tar_params.extend(['--directory', str(directory)]) + + if strip_components: + tar_params.append(f'--strip-components={str(strip_components)}') + + if target: + tar_params.append(target) + + if overwrite: + tar_params.append('--overwrite') + + self.run([''.join(short_flags)] + tar_params) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/toolchain.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/toolchain.py new file mode 100644 index 0000000000..4c759abdff --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/toolchain.py @@ -0,0 +1,102 @@ +import logging +from typing import Dict + +from src.command.apt import Apt +from src.command.apt_cache import AptCache +from src.command.apt_key import AptKey +from src.command.command import Command +from src.command.crane import Crane +from src.command.pip import Pip +from src.command.repoquery import Repoquery +from src.command.rpm import Rpm +from src.command.tar import Tar +from src.command.wget import Wget +from src.command.yum import Yum +from src.command.yum_config_manager import YumConfigManager +from src.command.yumdownloader import Yumdownloader +from src.config import OSType + + +class Toolchain: + """ + Common tools used across all distributions + """ + + def __init__(self, retries: int): + self.crane = Crane(retries) + self.tar = Tar() + self.wget = Wget(retries) + self.pip = Pip(retries) + + def install_pip(self): + """ + Used for offline mode, install pip package + """ + raise NotImplementedError + + def uninstall_pip(self): + """ + Used for offline mode, uninstall pip package + """ + raise NotImplementedError + + def ensure_pip(self) -> bool: + """ + Used for offline mode to ensure that pip is installed on target OS + :returns: True - pip had to be installed, False - pip already installed + """ + try: # check if pip is installed + import pip + return False + + except ModuleNotFoundError: # pip missing + logging.info('pip3 not installed, try installing...') + self.install_pip() + logging.info('Done.') + return True + + +class RedHatFamilyToolchain(Toolchain): + """ + Specific tools used by RedHat based distributions + """ + + def __init__(self, retries: int): + super().__init__(retries) + + self.repoquery = Repoquery(retries) + self.rpm = Rpm(retries) + self.yum = Yum(retries) + self.yum_config_manager = YumConfigManager(retries) + self.yumdownloader = Yumdownloader(retries) + + def install_pip(self): + self.yum.install('python3-pip') + + def uninstall_pip(self): + self.yum.remove('python3-pip') + + +class DebianFamilyToolchain(Toolchain): + """ + Specific tools used by Debian based distributions + """ + + def __init__(self, retries: int): + super().__init__(retries) + + self.apt = Apt(retries) + self.apt_cache = AptCache(retries) + self.apt_key = AptKey(retries) + + def install_pip(self): + self.apt.install('python3-pip') + + def uninstall_pip(self): + self.apt.remove('python3-pip') + + +TOOLCHAINS: Dict[OSType, Toolchain] = { + OSType.RedHat: RedHatFamilyToolchain, + OSType.Ubuntu: DebianFamilyToolchain +} diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/wget.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/wget.py new file mode 100644 index 0000000000..59316c1bb8 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/wget.py @@ -0,0 +1,51 @@ +from enum import Enum +from pathlib import Path +from typing import List + +from src.command.command import Command + + +class IPFamily(Enum): + IPV4 = 'IPv4' + IPV6 = 'IPv6' + + +class Wget(Command): + """ + Interface for `wget` + """ + + def __init__(self, retries: int): + super().__init__('wget', retries) + self.__download_params: List[str] = [ + '--no-use-server-timestamps', + '--continue', + '--show-progress' + ] + + def download(self, url: str, + output_document: Path = None, + directory_prefix: Path = None, + ip_family: IPFamily = IPFamily.IPV4, + additional_params: bool = True): + """ + Download target file + + :param url: file to be downloaded + :param output_document: downloaded file will be stored under this path + :param directory_prefix: downloaded file will be stored under this path, keep original filename + :param ip_family: which IP version to be used + """ + output_params: List[str] = [] + if additional_params: + output_params.extend(self.__download_params) + + if output_document is not None: + output_params.append(f'--output-document={str(output_document)}') + + if directory_prefix is not None: + output_params.append(f'--directory-prefix={str(directory_prefix)}') + + output_params.append(f'--prefer-family={ip_family.value}') + + self.run(output_params + [url]) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py new file mode 100644 index 0000000000..be3baf27f8 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py @@ -0,0 +1,110 @@ +from typing import List + +from src.command.command import Command + + +class Yum(Command): + """ + Interface for `yum` + """ + + def __init__(self, retries: int): + super().__init__('yum', retries) + + def update(self, enablerepo: str, + package: str = None, + disablerepo: str = '*', + assume_yes: bool = True): + """ + Interface for `yum update` + + :param enablerepo: + :param package: + :param disablerepo: + :param assume_yes: if set to True, -y flag will be used + """ + update_parameters: List[str] = ['update'] + + update_parameters.append('-y' if assume_yes else '') + + if package is not None: + update_parameters.append(package) + + update_parameters.append(f'--disablerepo={disablerepo}') + update_parameters.append(f'--enablerepo={enablerepo}') + + self.run(update_parameters) + + def install(self, package: str, + assume_yes: bool = True): + """ + Interface for `yum install -y` + + :param package: packaged to be installed + :param assume_yes: if set to True, -y flag will be used + """ + no_ask: str = '-y' if assume_yes else '' + self.run(['install', no_ask, package]) + + def remove(self, package: str, + assume_yes: bool = True): + """ + Interface for `yum remove -y` + + :param package: packaged to be removed + :param assume_yes: if set to True, -y flag will be used + """ + no_ask: str = '-y' if assume_yes else '' + self.run(['remove', no_ask, package]) + + def is_repo_enabled(self, repo: str) -> bool: + output = self.run(['repolist', + 'enabled']).stdout + if repo in output: + return True + + return False + + def find_rhel_repo_id(self, patterns: List[str]) -> List[str]: + output = self.run(['repolist', + 'all']).stdout + + repos: List[str] = [] + for line in output.split('\n'): + for pattern in patterns: + if pattern in line: + repos.append(pattern) + + return repos + + def accept_keys(self): + # to accept import of repo's GPG key (for repo_gpgcheck=1) + self.run(['-y', 'repolist']) + + def is_repo_available(self, repo: str) -> bool: + retval = self.run(['-q', + '--disablerepo=*', + f'--enablerepo={repo}', + 'repoinfo']).returncode + + if retval == 0: + return True + + return False + + def makecache(self, fast: bool = True, + assume_yes: bool = True): + args: List[str] = ['makecache'] + + args.append('-y' if assume_yes else '') + + if fast: + args.append('fast') + + self.run(args) + + def list_all_repo_info(self) -> List[str]: + args: List[str] = ['repolist', + '-v', + 'all'] + return self._run_and_filter(args) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/yum_config_manager.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/yum_config_manager.py new file mode 100644 index 0000000000..1815c79ec6 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/yum_config_manager.py @@ -0,0 +1,19 @@ +from src.command.command import Command + + +class YumConfigManager(Command): + """ + Interface for `yum-config-manager` + """ + + def __init__(self, retries: int): + super().__init__('yum-config-manager', retries) + + def enable_repo(self, repo: str): + self.run(['--enable', repo]) + + def add_repo(self, repo: str): + self.run(['--add-repo', repo]) + + def disable_repo(self, repo: str): + self.run(['--disable', repo]) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/command/yumdownloader.py b/ansible/playbooks/roles/repository/files/download-requirements/src/command/yumdownloader.py new file mode 100644 index 0000000000..632bcc15d6 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/command/yumdownloader.py @@ -0,0 +1,33 @@ +from pathlib import Path +from typing import List + +from src.command.command import Command + + +class Yumdownloader(Command): + """ + Interface for `yumdownloader` + """ + + def __init__(self, retries: int): + super().__init__('yumdownloader', retries) + + def download_packages(self, packages: List[str], + arch: str, + destdir: Path, + exclude: str = '', + quiet: bool = True): + args: List[str] = [] + + if quiet: + args.append('--quiet') + + args.append(f'--archlist={arch}') + + if exclude: + args.append(f'--exclude={exclude}') + + args.append(f'--destdir={str(destdir)}') + args.extend(packages) + + self.run(args) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/config.py b/ansible/playbooks/roles/repository/files/download-requirements/src/config.py new file mode 100644 index 0000000000..7f4540be42 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/config.py @@ -0,0 +1,221 @@ +import logging +import os +import sys +from argparse import ArgumentParser, RawTextHelpFormatter, SUPPRESS +from enum import Enum +from pathlib import Path +from typing import List + +from src.error import CriticalError + + +class OSType(Enum): + """ + Supported distribution types. + Values are lists of possible distro names. + Subdirs match always with value[0] name. + """ + Ubuntu = ['ubuntu-20.04'] + RedHat = ['redhat-7', 'rhel-7'] + + +class OSArch(Enum): + """ Supported architecture types """ + X86_64 = 'x86_64' + ARM64 = 'arm64' + + +class Config: + def __init__(self, argv: List[str]): + self.dest_crane_symlink: Path = None + self.dest_dir: Path + self.dest_files: Path + self.dest_grafana_dashboards: Path + self.dest_images: Path + self.dest_packages: Path + self.distro_subdir: Path + self.enable_backup: bool + self.is_log_file_enabled: bool + self.log_file: Path + self.os_arch: OSArch + self.os_type: OSType + self.pip_installed: bool = False + self.poyo_installed: bool = False + self.repo_path: Path + self.repos_backup_file: Path + self.reqs_path: Path + self.rerun: bool + self.retries: int + self.script_path: Path + + self.__add_args(argv) + + if not self.rerun: + self.__log_info_summary() + + def __log_info_summary(self): + """ + Helper function for printing all parsed arguments + """ + + lines: List[str] = ['Info summary:'] + LINE_SIZE: int = 50 + lines.append('-' * LINE_SIZE) + + lines.append(f'OS Arch: {self.os_arch.value}') + lines.append(f'OS Type: {self.os_type.value[0]}') + lines.append(f'Script location: {str(self.script_path.absolute())}') + lines.append('Directories used:') + lines.append(f'- files: {str(self.dest_files)}') + lines.append(f'- grafana dashboards: {str(self.dest_grafana_dashboards)}') + lines.append(f'- images: {str(self.dest_images)}') + lines.append(f'- packages: {str(self.dest_packages)}') + + lines.append(f'Enable repos backup: {"Yes" if self.enable_backup else "No"}') + if self.enable_backup: + lines.append(f'Repos backup file: {str(self.repos_backup_file)}') + + if self.is_log_file_enabled: + lines.append(f'Log file location: {str(self.log_file.absolute())}') + + lines.append(f'Retries count: {self.retries}') + + lines.append('-' * LINE_SIZE) + + logging.info('\n'.join(lines)) + + def __create_parser(self) -> ArgumentParser: + parser = ArgumentParser(description='Download Requirements', formatter_class=RawTextHelpFormatter) + + # required arguments: + parser.add_argument('destination_dir', metavar='DEST_DIR', type=Path, action='store', nargs='+', + help='requirements will be downloaded to this directory') + + supported_os: str = "|".join([os.value[0] for os in list(OSType)]) + parser.add_argument('os_type', metavar='OS_TYPE', type=str, action='store', nargs='+', + help=f'which of the supported OS will be used: ({supported_os}|detect)\n' + 'when using `detect`, script will try to find out which OS is being used') + + # optional arguments: + parser.add_argument('--enable-repos-backup', '-b', action='store_true', dest='enable_backup', default=False, + help=('when used, backup archive for packages will be created and used')), + parser.add_argument('--repos-backup-file', metavar='BACKUP_FILE', action='store', + dest='repos_backup_file', default='/var/tmp/enabled-system-repos.tar', + help='path to a backup file') + parser.add_argument('--retries-count', '-r', metavar='COUNT', type=int, action='store', dest='retries', + default=3, help='how many retries before stopping operation') + + parser.add_argument('--log-file', '-l', metavar='LOG_FILE', type=Path, action='store', dest='log_file', + default=Path('./download-requirements.log'), + help='logs will be saved to this file') + parser.add_argument('--log-level', metavar='LOG_LEVEL', type=str, action='store', dest='log_level', + default='info', help='set up log level, available levels: (error|warn|info|debug`)') + parser.add_argument('--no-logfile', action='store_true', dest='no_logfile', + help='no logfile will be created') + + # offline mode rerun options: + parser.add_argument('--rerun', action='store_true', dest='rerun', + default=False, help=SUPPRESS) + parser.add_argument('--pip-installed', action='store_true', dest='pip_installed', + default=False, help=SUPPRESS) + parser.add_argument('--poyo-installed', action='store_true', dest='poyo_installed', + default=False, help=SUPPRESS) + + return parser + + def __get_matching_os_type(self, os_type: str) -> OSType: + """ + Check if the parsed OS type fits supported distributons. + + :param os_type: distro type to be checked + :raise: on failure - CriticalError + """ + + for ost in OSType: + for os_name in ost.value: + if os_type.upper() in os_name.upper(): + logging.debug(f'Found Matching OS: `{ost.value[0]}`') + return ost + + raise CriticalError('Could not detect OS type') + + def __detect_os_type(self) -> OSType: + """ + On most modern GNU/Linux OSs info about current distribution + can be found at /etc/os-release. + Check this file to find out on which distro this script is ran. + """ + + os_release = Path('/etc/os-release') + + if os_release.exists(): + with open(os_release) as os_release_handler: + for line in os_release_handler.readlines(): + if 'ID' in line: + return self.__get_matching_os_type(line.split('=')[1].replace('"', '').strip()) + + raise CriticalError('Could not detect OS type') + + def __setup_logger(self, log_level: str, log_file: Path, no_logfile: bool): + logging.getLogger('poyo').setLevel(logging.WARNING) # remove poyo debugging + + # setup the logger: + log_levels = { + # map input log level to Python's logging library + 'error': logging.ERROR, + 'warn': logging.WARN, + 'info': logging.INFO, + 'debug': logging.DEBUG + } + + log_format = '%(asctime)s [%(levelname)s]: %(message)s' + + # add stdout logger: + logging.basicConfig(stream=sys.stdout, level=log_levels[log_level.lower()], + format=log_format) + + # add log file: + if not no_logfile: + root_logger = logging.getLogger() + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(log_levels[log_level.lower()]) + file_handler.setFormatter(logging.Formatter(fmt=log_format)) + root_logger.addHandler(file_handler) + + def __add_args(self, argv: List[str]): + """ + Run the parser and add all of the arguments to the Config object. + + :param argv: input arguments to be parsed + """ + + self.script_path = Path(argv[0]).absolute().parents[0] + self.repo_path = self.script_path / 'repositories' + self.reqs_path = self.script_path / 'requirements' + + args = self.__create_parser().parse_args(argv[1:]).__dict__ + + self.log_file = args['log_file'] + self.__setup_logger(args['log_level'], self.log_file, args['no_logfile']) + + # add required arguments: + self.os_type = self.__detect_os_type() if args['os_type'][0] == 'detect' else self.__get_matching_os_type(args['os_type'][0]) + self.dest_dir = args['destination_dir'][0].absolute() + self.dest_grafana_dashboards = self.dest_dir / 'grafana_dashboards' + self.dest_files = self.dest_dir / 'files' + self.dest_images = self.dest_dir / 'images' + self.dest_packages = self.dest_dir / 'packages' + + # add optional arguments + self.enable_backup = args['enable_backup'] + self.os_arch = OSArch(os.uname().machine) + self.repos_backup_file = Path(args['repos_backup_file']) + self.retries = args['retries'] + self.is_log_file_enabled = False if args['no_logfile'] else True + + self.distro_subdir = Path(f'{self.os_arch.value}/{self.os_type.value[0]}') + + # offline mode + self.rerun = args['rerun'] + self.pip_installed = args['pip_installed'] + self.poyo_installed = args['poyo_installed'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/crypt.py b/ansible/playbooks/roles/repository/files/download-requirements/src/crypt.py new file mode 100644 index 0000000000..5ca8a27172 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/crypt.py @@ -0,0 +1,29 @@ +from hashlib import sha1, sha256 +from pathlib import Path +from typing import Callable + + +def get_hash(req_path: Path, algorithm: Callable) -> str: + """ + Calculate hash value for `req_path` file using `algorithm`. + + :param req_path: of which file to calculate hash + :param algorithm: hash algorithm to be used + :returns: calculated hash value, "-1" if file not found + """ + try: + with open(req_path, mode='rb') as req_file: + hashgen = algorithm() + hashgen.update(req_file.read()) + return hashgen.hexdigest() + except FileNotFoundError: + return "-1" + + +def get_sha256(req_path: Path) -> str: + return get_hash(req_path, sha256) + + +def get_sha1(req_path: Path) -> str: + """ For larger files sha1 algorithm is significantly faster than sha256 """ + return get_hash(req_path, sha1) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/error.py b/ansible/playbooks/roles/repository/files/download-requirements/src/error.py new file mode 100644 index 0000000000..efdd414814 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/error.py @@ -0,0 +1,21 @@ +import logging + + +class CriticalError(Exception): + """ + Raised when there was an error that could not be fixed by + download-requirements script. + """ + + def __init__(self, msg: str): + super().__init__() + logging.error(msg) + + +class PackageNotfound(CriticalError): + """ + Raised when there was no package found by the query tool. + """ + + def __init__(self, msg: str): + super().__init__(msg) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/mode/base_mode.py b/ansible/playbooks/roles/repository/files/download-requirements/src/mode/base_mode.py new file mode 100644 index 0000000000..611b1e89a4 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/mode/base_mode.py @@ -0,0 +1,261 @@ +import logging +from collections import defaultdict +from os import chmod +from pathlib import Path +from typing import Any, Dict + +from poyo import parse_string, PoyoException + +from src.command.toolchain import Toolchain, TOOLCHAINS +from src.config import Config +from src.crypt import get_sha1, get_sha256 +from src.error import CriticalError + + +def load_yaml_file(filename: Path) -> Any: + try: + with open(filename, encoding="utf-8") as req_handler: + return parse_string(req_handler.read()) + except PoyoException as exc: + logging.error(exc) + except Exception: + logging.error(f'Failed loading: {filename}') + + +class BaseMode: + """ + An abstract class for running specific operations on target OS. + Main running method is :func:`~base_mode.BaseMode.run` + """ + + def __init__(self, config: Config): + self._cfg = config + + self._repositories: Dict[str, Dict] = self.__parse_repositories() + self._requirements: Dict[str, Any] = self.__parse_requirements() + self._tools: Toolchain = TOOLCHAINS[self._cfg.os_type](self._cfg.retries) + + def __parse_repositories(self) -> Dict[str, Dict]: + """ + Load repositories for target architecture/distro from a yaml file. + + :returns: parsed repositories data + """ + return load_yaml_file(self._cfg.repo_path / f'{self._cfg.distro_subdir}.yml')['repositories'] + + def __parse_requirements(self) -> Dict[str, Any]: + """ + Load requirements for target architecture/distro from a yaml file. + + :returns: parsed requirements data + """ + reqs: Dict = defaultdict(dict) + + content = load_yaml_file(self._cfg.reqs_path / f'{self._cfg.distro_subdir}/packages.yml') + reqs['packages'] = content['packages'] + + try: + reqs['prereq-packages'] = content['prereq-packages'] + except KeyError: + pass # prereq packages are only for some distros + + content = load_yaml_file(self._cfg.reqs_path / f'{self._cfg.distro_subdir}/files.yml') + reqs['files'].update(content['files']) + + for common_reqs in ['cranes', 'files', 'images']: + content = load_yaml_file(self._cfg.reqs_path / f'{self._cfg.os_arch.value}/{common_reqs}.yml') + reqs[common_reqs].update(content[common_reqs]) + + content = load_yaml_file(self._cfg.reqs_path / 'grafana-dashboards.yml') + reqs['grafana-dashboards'].update(content['grafana-dashboards']) + + return reqs + + def _use_backup_repositories(self): + """ + Check if there were any critical issues and if so, try to restore the state using backup + """ + raise NotImplementedError + + def _add_third_party_repositories(self): + """ + Add third party repositories for target OS's package manager + """ + raise NotImplementedError + + def _install_base_packages(self): + """ + Ensure that packages for file downloading are installed on the OS. + """ + raise NotImplementedError + + def _download_packages(self): + """ + Download packages under `self._requirements['packages']` using target OS's package manager + """ + raise NotImplementedError + + def _download_file(self, file: str): + """ + Run command for downloading `file` on target OS. + + :param file: to be downloaded + """ + raise NotImplementedError + + def _download_grafana_dashboard(self, dashboard: str, output_file: Path): + """ + Run command for downloading `grafana dashboard` on target OS. + + :param dashboard: to be downloded + :param output_file: under which filename dashboard will be saved + """ + raise NotImplementedError + + def _download_crane_binary(self, url: str, dest: Path): + """ + Run command for downloading `crane` on target OS. + + :param url: to be downloded + :param dest: under which filename dashboard will be saved + """ + raise NotImplementedError + + def __download_files(self): + """ + Download files under `self._requirements['files']` + """ + files: Dict[str, Dict] = self._requirements['files'] + for file in files: + try: + filepath = self._cfg.dest_files / file.split('/')[-1] + if files[file]['sha256'] == get_sha256(filepath): + logging.debug(f'- {file} - checksum ok, skipped') + continue + + logging.info(f'- {file}') + self._download_file(file) + except CriticalError: + logging.warn(f'Could not download file: {file}') + + def __download_grafana_dashboards(self): + """ + Download grafana dashboards under `self._requirements['grafana-dashboards']` + """ + dashboards: Dict[str, Dict] = self._requirements['grafana-dashboards'] + for dashboard in dashboards: + try: + output_file = self._cfg.dest_grafana_dashboards / f'{dashboard}.json' + + if dashboards[dashboard]['sha256'] == get_sha256(output_file): + logging.debug(f'- {dashboard} - checksum ok, skipped') + continue + + logging.info(f'- {dashboard}') + self._download_grafana_dashboard(dashboards[dashboard]['url'], output_file) + except CriticalError: + logging.warn(f'Could not download grafana dashboard: {dashboard}') + + def __download_crane(self): + """ + Download Crane package if needed and setup it's environment + """ + crane_path = self._cfg.dest_dir / 'crane' + crane_package_path = Path(f'{crane_path}.tar.gz') + + cranes = self._requirements['cranes'] + first_crane = next(iter(cranes)) # right now we use only single crane source + if cranes[first_crane]['sha256'] == get_sha256(crane_package_path): + logging.debug('crane - checksum ok, skipped') + else: + self._download_crane_binary(first_crane, crane_package_path) + self._tools.tar.unpack(crane_package_path, 'crane', directory=self._cfg.dest_dir) + chmod(crane_path, 0o0755) + + # create symlink to the crane file so that it'll be visible in shell + crane_symlink = Path('/usr/bin/crane') + if not crane_symlink.exists(): + crane_symlink.symlink_to(crane_path) + self._cfg.dest_crane_symlink = crane_symlink + + def _download_images(self): + """ + Download images under `self._requirements['images']` using Crane + """ + platform: str = 'linux/amd64' if self._cfg.os_arch.X86_64 else 'linux/arm64' + images = self._requirements['images'] + for image in images: + try: + url, version = image.split(':') + filename = Path(f'{url.split("/")[-1]}-{version}.tar') # format: image_version.tar + + if images[image]['sha1'] == get_sha1(self._cfg.dest_images / filename): + logging.debug(f'- {image} - checksum ok, skipped') + continue + + logging.info(f'- {image}') + self._tools.crane.pull(image, self._cfg.dest_images / filename, platform) + except CriticalError: + logging.warn(f'Could not download image: `{image}`') + + def _cleanup(self): + """ + Optional step for cleanup routines + """ + pass + + def run(self): + """ + Run target mode. + + :raises: + :class:`CriticalError`: can be raised on exceeding retries + :class:`Exception`: on I/O OS failures + """ + # add required directories + self._cfg.dest_files.mkdir(exist_ok=True, parents=True) + self._cfg.dest_grafana_dashboards.mkdir(exist_ok=True, parents=True) + self._cfg.dest_images.mkdir(exist_ok=True, parents=True) + self._cfg.dest_packages.mkdir(exist_ok=True, parents=True) + + logging.info('Checking backup repositories...') + self._use_backup_repositories() + logging.info('Done checking backup repositories.') + + logging.info('Installing base packages...') + self._install_base_packages() + logging.info('Done installing base packages.') + + logging.info('Adding third party repositories...') + self._add_third_party_repositories() + logging.info('Done adding third party repositories.') + + logging.info('Downloading packages...') + self._download_packages() + logging.info('Done downloading packages.') + + logging.info('Downloading files...') + self.__download_files() + logging.info('Done downloading files.') + + logging.info('Downloading grafana dashboards...') + self.__download_grafana_dashboards() + logging.info('Done downloading grafana dashboards.') + + logging.info('Downloading Crane...') + self.__download_crane() + logging.info('Done downloading Crane.') + + logging.info('Downloading images...') + self._download_images() + logging.info('Done downloading images.') + + if self._cfg.dest_crane_symlink is not None: + if self._cfg.dest_crane_symlink.exists(): + logging.debug(f'Removing `crane` symlink: {str(self._cfg.dest_crane_symlink)}...') + self._cfg.dest_crane_symlink.unlink() + logging.debug('Done.') + + logging.info('Running cleanup...') + self._cleanup() + logging.info('Done running cleanup.') diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/mode/debian_family_mode.py b/ansible/playbooks/roles/repository/files/download-requirements/src/mode/debian_family_mode.py new file mode 100644 index 0000000000..1dd200362b --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/mode/debian_family_mode.py @@ -0,0 +1,135 @@ +from pathlib import Path +from shutil import move +from typing import Dict, List +import logging +import os + +from src.config import Config +from src.mode.base_mode import BaseMode, get_sha256 + + +class DebianFamilyMode(BaseMode): + """ + Used by distros based of Debian GNU/Linux + """ + + def __init__(self, config: Config): + super().__init__(config) + self.__create_repo_paths() + self.__installed_packages: List[str] = [] + + def __create_repo_paths(self): + for repo in self._repositories.keys(): + self._repositories[repo]['path'] = Path('/etc/apt/sources.list.d') / f'{repo}.list' + + def _use_backup_repositories(self): + sources = Path('/etc/apt/sources.list') + if not sources.exists() or not sources.stat().st_size: + if self._cfg.repos_backup_file.exists() and self._cfg.enable_backup: + logging.warn('OS repositories seems missing, restoring...') + self._tools.tar.unpack(filename=self._cfg.repos_backup_file, + directory=Path('/'), + absolute_names=True, + uncompress=False, + verbose=True) + else: + logging.warn(f'{str(sources)} seems to be missing, you either know what you are doing or ' + 'you need to fix your repositories') + + def _install_base_packages(self): + # install prerequisites which might be missing + installed_packages = self._tools.apt.list_installed_packages() + + for package in ['wget', 'gpg', 'curl', 'tar']: + if package not in installed_packages: + self._tools.apt.install(package, assume_yes=True) + self.__installed_packages.append(package) + logging.info(f'- {package}') + + def _add_third_party_repositories(self): + # backup custom repositories to avoid possible conflicts + for repo_file in Path('/etc/apt/sources.list.d').iterdir(): + if repo_file.name.endswith('.list'): + repo_file.rename(f'{repo_file}.bak') + + # add third party keys + for repo in self._repositories: + data = self._repositories[repo] + key_file = Path(f'/tmp/{repo}') + self._tools.wget.download(data['key'], key_file) + self._tools.apt_key.add(key_file) + + # create repo files + for repo in self._repositories: + data = self._repositories[repo] + with data['path'].open(mode='a') as repo_handler: + repo_handler.write(data['content']) + + self._tools.apt.update() + + def _download_packages(self): + # path needs to be changed since `apt download` does not allow to set target dir + os.chdir(self._cfg.dest_packages) + + packages: Dict[str, Dict] = self._requirements['packages'] + packages_to_download: List[str] = [] + for package in packages: + version: str = '' + try: + package_base_name, version = package.split('=') # some packages are in form of `package=version*` + except ValueError: + package_base_name = package + + package_info = self._tools.apt_cache.get_package_info(package_base_name, version.strip('*')) + + # Files downloaded by `apt download` cannot have custom names + # and they always starts with a package name + versioning and other info. + # Find if there is a file corresponding with it's package name + try: + version = package_info['Version'].split(':')[-1] + found_pkg: Path = [pkg_file for pkg_file in self._cfg.dest_packages.iterdir() if + pkg_file.name.startswith(f'{package_info["Package"]}_') and + version in pkg_file.name][0] + + if get_sha256(found_pkg) == package_info['SHA256']: + logging.debug(f'- {package} - checksum ok, skipped') + continue + + except IndexError: + pass # package not found + + # resolve dependencies for target package and if needed, download them first + deps: List[str] = self._tools.apt_cache.get_package_dependencies(package_base_name) + + packages_to_download.extend(deps) + packages_to_download.append(package) + + for package in set(packages_to_download): + logging.info(f'- {package}') + self._tools.apt.download(package) + + os.chdir(self._cfg.script_path) + + def _download_file(self, file: str): + self._tools.wget.download(file, directory_prefix=self._cfg.dest_files) + + def _download_grafana_dashboard(self, dashboard: str, output_file: Path): + self._tools.wget.download(dashboard, output_document=output_file) + + def _download_crane_binary(self, url: str, dest: Path): + self._tools.wget.download(url, dest) + + def _cleanup(self): + # cleaning up 3rd party repositories + for data in self._repositories.values(): + if data['path'].exists(): + data['path'].unlink() + + # restore masked custom repositories to their original names + for repo_file in Path('/etc/apt/sources.list.d').iterdir(): + if repo_file.name.endswith('.bak'): + move(str(repo_file.absolute()), str(repo_file.with_suffix('').absolute())) + + # remove installed packages + for package in self.__installed_packages: + self._tools.apt.remove(package) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/mode/red_hat_family_mode.py b/ansible/playbooks/roles/repository/files/download-requirements/src/mode/red_hat_family_mode.py new file mode 100644 index 0000000000..4632d3afcb --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/mode/red_hat_family_mode.py @@ -0,0 +1,213 @@ +import logging +import shutil +from pathlib import Path +from typing import List, Set + +from src.command.command import Command +from src.config import Config +from src.error import PackageNotfound +from src.mode.base_mode import BaseMode + + +class RedHatFamilyMode(BaseMode): + """ + Used by distros based of RedHat GNU/Linux + """ + + def __init__(self, config: Config): + super().__init__(config) + self.__base_packages: List[str] = ['yum-utils', 'wget', 'curl', 'tar'] + self.__installed_packages: List[str] = [] + + def _use_backup_repositories(self): + sources = Path('/etc/yum.repos.d/epirepo.repo') + if sources.exists() and sources.stat().st_size: + if self._cfg.repos_backup_file.exists() and self._cfg.enable_backup: + logging.warn('OS repositories seems missing, restoring...') + self._tools.tar.unpack(filename=self._cfg.repos_backup_file, + directory=Path('/'), + absolute_names=True, + uncompress=False, + verbose=True) + else: + logging.warn(f'{str(sources)} seems to be missing, you either know what you are doing or ' + 'you need to fix your repositories') + + def _install_base_packages(self): + # some packages are from EPEL repo + if not self._tools.rpm.is_package_installed('epel-release'): + self._tools.yum.install('https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm') + self.__installed_packages.append('epel-release') + + self.__remove_yum_cache_for_untracked_repos() + self._tools.yum.makecache(True) + + for package in self.__base_packages: + if not self._tools.rpm.is_package_installed(package): + self._tools.yum.install(package) + self.__installed_packages.append(package) + + def __enable_repos(self, repo_id_patterns: List[str]): + """ + :param repo_id_patterns: + """ + for repo in self._tools.yum.find_rhel_repo_id(repo_id_patterns): + if not self._tools.yum.is_repo_enabled(repo): + self._tools.yum_config_manager.enable_repo(repo) + + def _add_third_party_repositories(self): + # backup custom repositories to avoid possible conflicts + for repo_file in Path('/etc/yum.repos.d/').iterdir(): + if repo_file.name.endswith('.repo'): + shutil.copy(str(repo_file), f'{repo_file}.bak') + + # Fix for RHUI client certificate expiration [#2318] + if self._tools.yum.is_repo_enabled('rhui-microsoft-azure-rhel'): + self._tools.yum.update('rhui-microsoft-azure-rhel*') + + # -> rhel-7-server-extras-rpms # for container-selinux package, this repo has different id names on clouds + # About rhel-7-server-extras-rpms: https://access.redhat.com/solutions/3418891 + repo_id_patterns = ['rhel-7-server-extras-rpms', + 'rhui-rhel-7-server-rhui-extras-rpms', + 'rhui-REGION-rhel-server-extras', + 'rhel-7-server-rhui-extras-rpms'] # on-prem|Azure|AWS7.8|AWS7.9 + self.__enable_repos(repo_id_patterns) + + # -> rhel-server-rhscl-7-rpms # for Red Hat Software Collections (RHSCL), this repo has different id names on clouds + # About rhel-server-rhscl-7-rpms: https://access.redhat.com/solutions/472793 + repo_id_patterns = ['rhel-server-rhscl-7-rpms', + 'rhui-rhel-server-rhui-rhscl-7-rpms', + 'rhui-REGION-rhel-server-rhscl', + 'rhel-server-rhui-rhscl-7-rpms'] # on-prem|Azure|AWS7.8|AWS7.9 + self.__enable_repos(repo_id_patterns) + + for repo in self._repositories: + repo_filepath = Path('/etc/yum.repos.d') / f'{repo}.repo' + content = self._repositories[repo]['data'] + content = content + f'\ngpgkey={" ".join(self._repositories[repo]["gpg_keys"])}' + + if not self._tools.yum.is_repo_enabled(repo): + with open(repo_filepath, mode='w') as repo_handler: + repo_handler.write(content) + + for key in self._repositories[repo]['gpg_keys']: + self._tools.rpm.import_key(key) + + self._tools.yum.accept_keys() + + # Official Docker CE repository, added with https://download.docker.com/linux/centos/docker-ce.repo, + # has broken URL (https://download.docker.com/linux/centos/7Server/x86_64/stable) for longer time. + # So direct (patched) link is used first if available. + if self._tools.yum.is_repo_available('docker-ce-stable-patched'): + self._tools.yum_config_manager.disable_repo('docker-ce-stable-patched') + + if not self._tools.yum.is_repo_enabled('docker-ce'): + self._tools.yum_config_manager.add_repo('https://download.docker.com/linux/centos/docker-ce.repo') + self._tools.yum.accept_keys() + + for repo in ['https://dl.2ndquadrant.com/default/release/get/10/rpm', # for repmgr + 'https://dl.2ndquadrant.com/default/release/get/13/rpm']: + Command('curl', self._cfg.retries, [repo]) | Command('bash', self._cfg.retries) # curl {repo} | bash + + # script adds 2 repositories, only 1 is required + for repo in ['2ndquadrant-dl-default-release-pg10-debug', + '2ndquadrant-dl-default-release-pg13-debug']: + self._tools.yum_config_manager.disable_repo(repo) + + def __remove_yum_cache_for_untracked_repos(self): + # clean metadata for upgrades (when the same package can be downloaded from changed repo) + + whatprovides: List[str] = self._tools.rpm.which_packages_provides_file('system-release(releasever)') + capabilities: List[str] = self._tools.rpm.get_package_capabilities(whatprovides[0]) + releasever: str = '' + for cap in capabilities: + if 'system-release(releasever)' in cap: + releasever = cap.split('=')[-1].replace(' ', '') + break + + cachedir: str = '' + with open('/etc/yum.conf') as yum_conf: + for line in yum_conf.readlines(): + if 'cachedir' in line: + cachedir = line.split('=')[-1].replace('\n', '') + break + + cachedir = cachedir.replace('$basearch', self._cfg.os_arch.value) + cachedir = cachedir.replace('$releasever', releasever) + + cachedirs = [cdir for cdir in Path(cachedir).iterdir() if cdir.is_dir()] + repoinfo: List[str] = self._tools.yum.list_all_repo_info() + repoinfo = list(filter(lambda elem: 'Repo-id' in elem, repoinfo)) + repoinfo = [repo.split(':')[-1].replace(' ', '').split('/')[0] for repo in repoinfo] + + for cdir in cachedirs: + if cdir.name in repoinfo: + shutil.rmtree(str(cdir)) + + def __download_prereq_packages(self) -> Set[str]: + # download requirements (fixed versions) + prereqs_dir = self._cfg.dest_packages / 'repo-prereqs' + prereqs_dir.mkdir(exist_ok=True, parents=True) + + collected_prereqs: List[str] = [] + prereq_packages: List[str] = self._requirements['prereq-packages'] + for prereq_pkg in prereq_packages: + collected_prereqs.extend(self._tools.repoquery.query(prereq_pkg, + queryformat='%{ui_nevra}', + arch=self._cfg.os_arch.value)) + + unique_collected_prereqs: Set = set(collected_prereqs) + for prereq in unique_collected_prereqs: + self._tools.yumdownloader.download_packages([prereq], + arch=self._cfg.os_arch.value, + exclude='*i686', + destdir=prereqs_dir) + logging.info(f'- {prereq}') + + return unique_collected_prereqs + + def _download_packages(self): + downloaded_prereqs: Set = self.__download_prereq_packages() + + packages: List[str] = self._requirements['packages'] + packages_to_download: List[str] = [] + for package in packages: + # package itself + package_name = self._tools.repoquery.query(package, + queryformat='%{ui_nevra}', + arch=self._cfg.os_arch.value)[0] + + packages_to_download.append(package_name) + + # dependencies + packages_to_download.extend(self._tools.repoquery.get_dependencies(package, + queryformat='%{name}.%{arch}', + arch=self._cfg.os_arch.value)) + + for package in set(packages_to_download): + if package not in downloaded_prereqs: + logging.info(f'- {package}') + self._tools.yumdownloader.download_packages([package], + arch=self._cfg.os_arch.value, + exclude='*i686', + destdir=self._cfg.dest_packages) + + def _download_file(self, file: str): + self._tools.wget.download(file, directory_prefix=self._cfg.dest_files, additional_params=False) + + def _download_grafana_dashboard(self, dashboard: str, output_file: Path): + self._tools.wget.download(dashboard, output_document=output_file, additional_params=False) + + def _download_crane_binary(self, url: str, dest: Path): + self._tools.wget.download(url, dest, additional_params=False) + + def _cleanup(self): + # restore repo files + for repo_file in Path('/etc/yum.repos.d').iterdir(): + if repo_file.name.endswith('.bak'): + shutil.move(str(repo_file.absolute()), str(repo_file.with_suffix('').absolute())) + + # remove installed packages + for package in self.__installed_packages: + if self._tools.rpm.is_package_installed(package): + self._tools.yum.remove(package) diff --git a/ansible/playbooks/roles/repository/files/download-requirements/src/run.py b/ansible/playbooks/roles/repository/files/download-requirements/src/run.py new file mode 100644 index 0000000000..630cf1b60c --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/src/run.py @@ -0,0 +1,17 @@ +from typing import Dict + +from src.config import Config, OSType +from src.error import CriticalError +from src.mode.base_mode import BaseMode +from src.mode.debian_family_mode import DebianFamilyMode +from src.mode.red_hat_family_mode import RedHatFamilyMode + + +MODES: Dict[OSType, BaseMode] = { + OSType.Ubuntu: DebianFamilyMode, + OSType.RedHat: RedHatFamilyMode, +} + + +def run(config: Config): + MODES[config.os_type](config).run() diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_apt.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_apt.py new file mode 100644 index 0000000000..fa91bb4e44 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_apt.py @@ -0,0 +1,33 @@ +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.apt import Apt + + +def test_interface_update(mocker): + ''' Check argument construction for `apt update` ''' + with CommandRunMock(mocker, Apt(1).update) as call_args: + assert call_args == ['apt', 'update'] + + +def test_interface_download(mocker): + ''' Check argument construction for `apt download package` ''' + with CommandRunMock(mocker, Apt(1).download, {'package': 'vim'}) as call_args: + assert call_args == ['apt', 'download', 'vim'] + + +def test_interface_install(mocker): + ''' Check argument construction for `apt install -y package` ''' + with CommandRunMock(mocker, Apt(1).install, {'package': 'vim', 'assume_yes': True}) as call_args: + assert call_args == ['apt', 'install', '-y', 'vim'] + + +def test_interface_remove(mocker): + ''' Check argument construction for `apt remove -y package` ''' + with CommandRunMock(mocker, Apt(1).remove, {'package': 'vim', 'assume_yes': True}) as call_args: + assert call_args == ['apt', 'remove', '-y', 'vim'] + + +def test_interface_list_installed_packages(mocker): + ''' Check argument construction for `apt list` ''' + with CommandRunMock(mocker, Apt(1).list_installed_packages) as call_args: + assert call_args == ['apt', 'list', '--installed'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_apt_cache.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_apt_cache.py new file mode 100644 index 0000000000..4a7008f476 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_apt_cache.py @@ -0,0 +1,18 @@ +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.apt_cache import AptCache + + +def test_interface_get_package_dependencies(mocker): + ''' Check argument construction for `apt-cache depends` ''' + with CommandRunMock(mocker, AptCache(1).get_package_dependencies, {'package': 'vim'}) as call_args: + assert call_args == ['apt-cache', + 'depends', + '--no-recommends', + '--no-suggests', + '--no-conflicts', + '--no-breaks', + '--no-replaces', + '--no-enhances', + '--no-pre-depends', + 'vim'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_apt_key.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_apt_key.py new file mode 100644 index 0000000000..980398399d --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_apt_key.py @@ -0,0 +1,11 @@ +from pathlib import Path + +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.apt_key import AptKey + + +def test_interface_add(mocker): + ''' Check argument construction for `apt-key add` ''' + with CommandRunMock(mocker, AptKey(1).add, {'key': Path('/path/to/some/key')}) as call_args: + assert call_args == ['apt-key', 'add', '/path/to/some/key'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_crane.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_crane.py new file mode 100644 index 0000000000..4722563aa8 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_crane.py @@ -0,0 +1,19 @@ +from pathlib import Path +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.crane import Crane + + +def test_interface_pull(mocker): + ''' Check argument construction for crane pull ''' + mocker.patch('src.command.crane.chmod', return_value=None) + mocker.patch('src.command.crane.mkstemp', return_value=[None, '/tmp/tmpfile']) + mocker.patch('src.command.crane.move', return_value=None) + + with CommandRunMock(mocker, Crane(1).pull, {'image_name': 'image', + 'destination': Path('/some/place'), + 'platform': 'platform', + 'legacy_format': True, + 'insecure': True}) as call_args: + assert call_args == ['crane', 'pull', '--insecure', '--platform=platform', '--format=legacy', + 'image', '/tmp/tmpfile'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_pip.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_pip.py new file mode 100644 index 0000000000..1765a7958f --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_pip.py @@ -0,0 +1,11 @@ +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.pip import Pip + + +def test_interface_install(mocker): + ''' Check argument construction for `pip install` ''' + with CommandRunMock(mocker, Pip(1).install, {'package': 'poyo', + 'version': '=0.5.0', + 'user': True}) as call_args: + assert call_args == ['pip3', 'install', 'poyo=0.5.0', '--user'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_repoquery.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_repoquery.py new file mode 100644 index 0000000000..3190dbf7e5 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_repoquery.py @@ -0,0 +1,20 @@ +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.repoquery import Repoquery + + +def test_interface_query(mocker): + ''' Check argument construction for `repoquery` ''' + with CommandRunMock(mocker, Repoquery(1).query, {'package': 'vim', + 'queryformat': 'some_format', + 'arch': 'some_arch', + 'requires': True, + 'resolve': True}) as call_args: + assert call_args == ['repoquery', + '--requires', + '--resolve', + '--queryformat', + 'some_format', + '--archlist=some_arch,noarch', + 'vim' + ] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_rpm.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_rpm.py new file mode 100644 index 0000000000..57baf41eb7 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_rpm.py @@ -0,0 +1,27 @@ +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.rpm import Rpm + + +def test_interface_is_package_installed(mocker): + ''' Check argument construction for `rpm --query` ''' + with CommandRunMock(mocker, Rpm(1).is_package_installed, {'package': 'vim'}) as call_args: + assert call_args == ['rpm', '--query', '--quiet', 'vim'] + + +def test_interface_import_key(mocker): + ''' Check argument construction for `rpm --import` ''' + with CommandRunMock(mocker, Rpm(1).import_key, {'key': 'some_key'}) as call_args: + assert call_args == ['rpm', '--import', 'some_key'] + + +def test_interface_get_package_capabilities(mocker): + ''' Check argument construction for `rpm -q --provides` ''' + with CommandRunMock(mocker, Rpm(1).get_package_capabilities, {'filename': 'some_file'}) as call_args: + assert call_args == ['rpm', '-q', '--provides', 'some_file'] + + +def test_interface_which_packages_provides_file(mocker): + ''' Check argument construction for `rpm -q --whatprovides` ''' + with CommandRunMock(mocker, Rpm(1).which_packages_provides_file, {'filename': 'some_file'}) as call_args: + assert call_args == ['rpm', '-q', '--whatprovides', 'some_file'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_tar.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_tar.py new file mode 100644 index 0000000000..6187aa0c5b --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_tar.py @@ -0,0 +1,29 @@ +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.tar import Tar + + +def test_interface_pack(mocker): + ''' Check argument construction for `tar -cf` ''' + with CommandRunMock(mocker, Tar().pack, {'filename': '/tmp/package.tar.gz', + 'target': '*', + 'directory': '/some/directory', + 'verbose': True, + 'compress': True, + 'verify': True}) as call_args: + assert call_args == ['tar', '-czvf', '/tmp/package.tar.gz', '--verify', '--directory', '/some/directory', '*'] + + +def test_interface_unpack(mocker): + ''' Check argument construction for `tar -xf` ''' + with CommandRunMock(mocker, Tar().unpack, {'filename': '/tmp/package.tar.gz', + 'target': 'some_target', + 'absolute_names': True, + 'directory': '/some/directory', + 'overwrite': True, + 'verbose': True, + 'uncompress': True, + 'strip_components': 2}) as call_args: + assert call_args == ['tar', '-xzvf', '/tmp/package.tar.gz', '--absolute-names', '--directory', '/some/directory', + '--strip-components=2', 'some_target', '--overwrite'] + diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_wget.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_wget.py new file mode 100644 index 0000000000..bf76314048 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_wget.py @@ -0,0 +1,15 @@ +from pathlib import Path + +from src.command.wget import IPFamily, Wget +from tests.mocks.command_run_mock import CommandRunMock + + +def test_builder_download(mocker): + ''' Check argument construction for `wget` ''' + with CommandRunMock(mocker, Wget(1).download, {'url': 'http://some.url.com', + 'output_document': Path('/var/log/output_name'), + 'directory_prefix': Path('/custom/prefix'), + 'ip_family': IPFamily.IPV4}) as call_args: + assert call_args == ['wget', '--no-use-server-timestamps', '--continue', '--show-progress', + '--output-document=/var/log/output_name', '--directory-prefix=/custom/prefix', + '--prefer-family=IPv4', 'http://some.url.com'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_yum.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_yum.py new file mode 100644 index 0000000000..0aee600da3 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_yum.py @@ -0,0 +1,36 @@ +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.yum import Yum + + +def test_interface_update(mocker): + ''' Check argument construction for `yum update` ''' + with CommandRunMock(mocker, Yum(1).update, {'enablerepo': 'some_repo', + 'package': 'some_package', + 'disablerepo': 'other_repo', + 'assume_yes': True}) as call_args: + assert call_args == ['yum', 'update', '-y', 'some_package', '--disablerepo=other_repo', '--enablerepo=some_repo'] + + +def test_interface_install(mocker): + ''' Check argument construction for `yum install` ''' + with CommandRunMock(mocker, Yum(1).install, {'package': 'vim'}) as call_args: + assert call_args == ['yum', 'install', '-y', 'vim'] + + +def test_interface_remove(mocker): + ''' Check argument construction for `yum remove` ''' + with CommandRunMock(mocker, Yum(1).remove, {'package': 'vim'}) as call_args: + assert call_args == ['yum', 'remove', '-y', 'vim'] + + +def test_interface_is_repo_enabled(mocker): + ''' Check argument construction for `yum repolist enabled` ''' + with CommandRunMock(mocker, Yum(1).is_repo_enabled, {'repo': 'some_repo'}) as call_args: + assert call_args == ['yum', 'repolist', 'enabled'] + + +def test_interface_find_rhel_repo_id(mocker): + ''' Check argument construction for `yum repolist all` ''' + with CommandRunMock(mocker, Yum(1).find_rhel_repo_id, {'patterns': ['pat1', 'pat2']}) as call_args: + assert call_args == ['yum', 'repolist', 'all'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_yum_config_manager.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_yum_config_manager.py new file mode 100644 index 0000000000..8ce396d22f --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_yum_config_manager.py @@ -0,0 +1,21 @@ +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.yum_config_manager import YumConfigManager + + +def test_interface_enable_repo(mocker): + ''' Check argument construction for `yum-config-manager --enable` ''' + with CommandRunMock(mocker, YumConfigManager(1).enable_repo, {'repo': 'some_repo'}) as call_args: + assert call_args == ['yum-config-manager', '--enable', 'some_repo'] + + +def test_interface_add_repo(mocker): + ''' Check argument construction for `yum-config-manager --add-repo` ''' + with CommandRunMock(mocker, YumConfigManager(1).add_repo, {'repo': 'some_repo'}) as call_args: + assert call_args == ['yum-config-manager', '--add-repo', 'some_repo'] + + +def test_interface_disable_repo(mocker): + ''' Check argument construction for `yum-config-manager --disable` ''' + with CommandRunMock(mocker, YumConfigManager(1).disable_repo, {'repo': 'some_repo'}) as call_args: + assert call_args == ['yum-config-manager', '--disable', 'some_repo'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_yumdownloader.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_yumdownloader.py new file mode 100644 index 0000000000..13c4d9e6f2 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/command/test_yumdownloader.py @@ -0,0 +1,14 @@ +from pathlib import Path + +from tests.mocks.command_run_mock import CommandRunMock + +from src.command.yumdownloader import Yumdownloader + + +def test_interface_download_packages(mocker): + ''' Check argument construction for `yumdownloader` ''' + with CommandRunMock(mocker, Yumdownloader(1).download_packages, {'packages': [], + 'arch': 'some_arch', + 'destdir': Path('/some/path'), + 'exclude': '*'}) as call_args: + assert call_args == ['yumdownloader', '--quiet', '--archlist=some_arch', '--exclude=*', '--destdir=/some/path'] diff --git a/ansible/playbooks/roles/repository/files/download-requirements/tests/mocks/command_run_mock.py b/ansible/playbooks/roles/repository/files/download-requirements/tests/mocks/command_run_mock.py new file mode 100644 index 0000000000..7922980232 --- /dev/null +++ b/ansible/playbooks/roles/repository/files/download-requirements/tests/mocks/command_run_mock.py @@ -0,0 +1,48 @@ +import subprocess +from typing import Any, Callable, Dict, List +from unittest.mock import Mock + +from pytest_mock.plugin import MockerFixture + + +class CommandRunMock: + """ + Mock class for Command.run() calls. + Usage: + + with CommandRunMock(mocker, function_to_test, function_args) as call_args: + assert call_args == [expected_arg1, ...] + """ + def __init__(self, mocker: MockerFixture, func: Callable, args: Dict[str, Any] = None): + """ + :param mocker: mocker object provided by pytest + :param func: function which will be tested + :param args: parameters that will be passed to `__func` + """ + self.__mocker = mocker + self.__func = func + self.__args = args + + def __enter__(self) -> List[str]: + """ + :return: list of arguments passed to the subprocess.run() function + """ + mock = Mock() + mock.returncode = 0 + + self.__mocker.patch('src.command.command.subprocess.run', side_effect=lambda args, encoding, stdout, stderr: mock) + + spy = self.__mocker.spy(subprocess, 'run') + + try: + if self.__args: + self.__func(**self.__args) + else: + self.__func() + except Exception: + pass + + return spy.call_args[0][0] + + def __exit__(self, *args): + pass diff --git a/ansible/playbooks/roles/repository/files/download-requirements/ubuntu-20.04/add-repositories.sh b/ansible/playbooks/roles/repository/files/download-requirements/ubuntu-20.04/add-repositories.sh deleted file mode 100644 index 48b788a6cb..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/ubuntu-20.04/add-repositories.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -eu - -wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | apt-key add - -echo "deb https://artifacts.elastic.co/packages/oss-6.x/apt stable main" | tee /etc/apt/sources.list.d/elastic-6.x.list - -wget -qO - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list - -wget -qO - https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc | apt-key add - -echo "deb https://packages.erlang-solutions.com/ubuntu focal contrib" | tee /etc/apt/sources.list.d/erlang-23.x.list - -# bionic is used since focal contains versions 3.8.11+ but we use 3.8.9 -wget -qO - https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey | apt-key add - -echo "deb https://packagecloud.io/rabbitmq/rabbitmq-server/ubuntu bionic main" | tee /etc/apt/sources.list.d/rabbitmq.list - -wget -qO - https://download.docker.com/linux/ubuntu/gpg | apt-key add - -echo "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable" | tee /etc/apt/sources.list.d/docker-ce.list - -wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | apt-key add - -echo "deb https://artifacts.elastic.co/packages/oss-7.x/apt stable main" | tee /etc/apt/sources.list.d/elastic-7.x.list - -wget -qO - https://d3g5vo6xdbdb9a.cloudfront.net/GPG-KEY-opendistroforelasticsearch | apt-key add - -echo "deb https://d3g5vo6xdbdb9a.cloudfront.net/apt stable main" | tee -a /etc/apt/sources.list.d/opendistroforelasticsearch.list - -wget -qO - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - -echo "deb http://apt.postgresql.org/pub/repos/apt focal-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list - -# Historical packages from apt.postgresql.org -wget -qO - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - -echo "deb http://apt-archive.postgresql.org/pub/repos/apt focal-pgdg-archive main" | tee /etc/apt/sources.list.d/pgdg-archive.list - -# Provides repmgr -wget -qO - https://dl.2ndquadrant.com/gpg-key.asc | apt-key add - -echo "deb https://dl.2ndquadrant.com/default/release/apt focal-2ndquadrant main" | tee -a /etc/apt/sources.list.d/2ndquadrant-dl-default-release.list diff --git a/ansible/playbooks/roles/repository/files/download-requirements/ubuntu-20.04/common.sh b/ansible/playbooks/roles/repository/files/download-requirements/ubuntu-20.04/common.sh deleted file mode 100644 index c21d72a7fc..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/ubuntu-20.04/common.sh +++ /dev/null @@ -1,137 +0,0 @@ -#!/bin/bash -# this file is just a bunch of functions meant to be called from other scripts - - -usage() { - echo "usage: ./$(basename "$0") " - echo "example: ./$(basename "$0") /tmp/downloads" - exit 1 -} - -# params: -remove_file() { - local file_path="$1" - - echol "Removing file: $file_path" - rm -f "$file_path" || exit_with_error "Command failed: rm -f \"$file_path\"" -} - -# params: -create_directory() { - local dir_path="$1" - - if [[ ! -d "$dir_path" ]]; then - mkdir -p $dir_path - fi -} - -# params: -download_image() { - local image_name="$1" - local dest_dir="$2" - - local splited_image=(${image_name//:/ }) - local repository=${splited_image[0]} - local tag=${splited_image[1]} - local repo_basename=$(basename -- "$repository") - local dst_image="${dest_dir}/${repo_basename}-${tag}.tar" - local retries=3 - - if [[ -f ${dst_image} ]]; then - echo "Image: "${dst_image}" already exists. Skipping..." - else - local tmp_file=$(mktemp) - echo "Downloading image: $1" - echo "Crane command is: ${CRANE_BIN} pull --insecure --format=legacy ${image_name} ${dst_image}" - # use temporary file for downloading to be safe from sudden interruptions (network, ctrl+c) - run_cmd_with_retries $retries ${CRANE_BIN} pull --insecure --platform=${docker_platform} --format=legacy ${image_name} ${tmp_file} && chmod 644 ${tmp_file} && mv ${tmp_file} ${dst_image} - fi -} - -# params: [new_filename] -download_file() { - local file_url="$1" - local dest_dir="$2" - if [[ ${3-} ]]; then - local new_filename="$3" - fi - - local file_name - file_name=$(basename "$file_url") - local dest_path="${dest_dir}/${file_name}" - local retries=3 - - # wget with --timestamping sometimes failes on AWS with ERROR 403: Forbidden - # so we remove existing file to overwrite it - - # remove old files to force redownload after a while - # just a precaution so --continue won't append and corrupt files localy if file is updated on server without name change - if [[ -f $dest_path && $(find "$dest_path" -mmin +60 -print) ]]; then - echol "File $dest_path older than 1h, redownloading..." - remove_file "$dest_path" - fi - - # --no-use-server-timestamps - we don't use --timestamping and we need to expire files somehow - # --continue - don't download the same file multiple times, gracefully skip if file is fully downloaded - if [[ ${new_filename-} ]]; then - echol "Downloading file: $file_url as $new_filename" - run_cmd_with_retries $retries wget --no-use-server-timestamps --continue --show-progress --prefer-family=IPv4 "${file_url}" -O "${dest_dir}/${new_filename}" - else - echol "Downloading file: $file_url" - run_cmd_with_retries $retries wget --no-use-server-timestamps --continue --show-progress --prefer-family=IPv4 --directory-prefix="${dest_dir}" "${file_url}" - fi -} - -# to download everything, add "--recurse" flag but then you will get much more packages (e.g. 596 vs 319) -deplist_cmd() { - apt-cache depends --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances --no-pre-depends $1 -} - -get_shell_escaped_array() { - if (( $# > 0 )); then - printf '%q\n' "$@" - fi -} - -print_array_as_shell_escaped_string() { - local output - output=$(get_shell_escaped_array "$@") - local -a escaped=() - if [ -n "$output" ]; then - readarray -t escaped <<< "$output" - fi - if (( ${#escaped[@]} > 0 )); then - printf '%s\n' "${escaped[*]}" - fi -} - -run_cmd() { - local -a cmd_arr=("$@") - local output - output=$(print_array_as_shell_escaped_string "${cmd_arr[@]}") - echo "Running command:" "$output" - "${cmd_arr[@]}" -} - -run_cmd_with_retries() { - local retries=${1} - shift - local -a cmd_arr=("$@") - ( # sub-shell is used to limit scope for 'set +e' - set +e - trap - ERR # disable global trap locally - for ((i=0; i <= retries; i++)); do - run_cmd "${cmd_arr[@]}" - return_code=$? - if (( return_code == 0 )); then - break - elif (( i < retries )); then - sleep 1 - echo "retrying ($(( i+1 ))/${retries})" - else - echo "ERROR: all attempts failed" - fi - done - return $return_code - ) -} diff --git a/ansible/playbooks/roles/repository/files/download-requirements/ubuntu-20.04/download-requirements.sh b/ansible/playbooks/roles/repository/files/download-requirements/ubuntu-20.04/download-requirements.sh deleted file mode 100644 index 22e6b597f7..0000000000 --- a/ansible/playbooks/roles/repository/files/download-requirements/ubuntu-20.04/download-requirements.sh +++ /dev/null @@ -1,248 +0,0 @@ -#!/bin/bash - -set -euo pipefail -export DEBIAN_FRONTEND=noninteractive - -script_path="$( cd "$(dirname "$0")" ; pwd -P )" - -# source common functions -. "${script_path}/common.sh" - -internet_access_checks_enabled="yes" -CREATE_LOGFILE="yes" -LOG_FILE_PATH="${script_path}/log" - -. "${script_path}/common/common_functions.sh" - -if [[ $# -lt 1 ]]; then - usage -fi - -readonly START_TIME=$(date +%s) - -script_file_name=$(basename "$0") -dst_dir=$(readlink -m "$1") # beautify input path - remove double slashes if occurs -dst_dir_packages="${dst_dir}/packages" -dst_dir_files="${dst_dir}/files" -dst_dir_images="${dst_dir}/images" -deplist="${script_path}/.dependencies" -retries="3" -download_cmd="run_cmd_with_retries $retries apt-get download" -add_repos="${script_path}/add-repositories.sh" -CRANE_BIN="${script_path}/crane" - -# arch -arch=$(uname -m) -echol "Detected arch: $arch" -input_file="${script_path}/requirements.${arch}.txt" -case $arch in -x86_64) - docker_platform="linux/amd64" - ;; - -*) - exit_with_error "Arch $arch unsupported" - ;; -esac -echol "Docker platform: $docker_platform" - -# checks - -[[ $EUID -eq 0 ]] || { echo "You have to run as root"; exit 1; } - -[[ -f $input_file ]] || exit_with_error "File not found: $input_file" - -# restore system repositories in case they're missing if ansible role gets interrupted - -enable_system_repos_script="/var/tmp/epi-repository-setup-scripts/enable-system-repos.sh" -disable_epirepo_client_script="/var/tmp/epi-repository-setup-scripts/disable-epirepo-client.sh" -apt_sources_list="/etc/apt/sources.list" - -if [[ ! -f $apt_sources_list || ! -s $apt_sources_list ]]; then - if [[ -f /var/tmp/enabled-system-repos.tar && -f $enable_system_repos_script ]]; then - echol "OS repositories seems missing, restoring..." - $enable_system_repos_script || exit_with_error "Could not restore system repositories" - $disable_epirepo_client_script || exit_with_error "Could not disable epirepo" - else - echol "$apt_sources_list seems missing or is empty, you either know what you're doing or you need to fix your repositories" - fi -fi - -check_connection apt $apt_sources_list - -# install prerequisites which might be missing -prerequisites=(wget gpg curl tar) -for i in ${prerequisites[@]}; do - dpkg -l | grep "^ii $i\s" &>/dev/null || apt install -y $i -done - -# some quick sanity check -echol "Dependency list: $deplist" -echol "Command used to download packages: $download_cmd" -echol "Destination directory for packages: $dst_dir_packages" - -# make sure destination dir exists -mkdir -p "$dst_dir_packages" -mkdir -p "$dst_dir_files" -mkdir -p "$dst_dir_images" - -# mask custom repositories to avoid possible conflicts -shopt -s nullglob -for i in /etc/apt/sources.list.d/*.list; do - mv "$i" "${i}.bak" -done -shopt -u nullglob - -# add 3rd party repositories -# TODO: See if we need to split this up to support different architectures -. "${add_repos}" - -check_connection apt $(ls /etc/apt/sources.list.d) -apt update - -# parse the input file, separete by tags: [crane], [packages], [files], [images] -crane=$(awk '/^$/ || /^#/ {next}; /\[crane\]/ {f=1; next}; /^\[/ {f=0}; f {print $0}' "${input_file}") -packages=$(awk '/^$/ || /^#/ {next}; /\[packages\]/ {f=1; next}; /^\[/ {f=0}; f {print $0}' "${input_file}") -files=$(awk '/^$/ || /^#/ {next}; /\[files\]/ {f=1; f=2; next}; /^\[/ {f=0}; f {print $0}' "${input_file}") -images=$(awk '/^$/ || /^#/ {next}; /\[images\]/ {f=1; next}; /^\[/ {f=0}; f {print $0}' "${input_file}") - -printf "\n" - -if [[ -e $deplist ]]; then - # clear list of cached dependencies if .dependencies is older than 15 minutes - find "$script_path" -type f -wholename "$deplist" -mmin +15 -exec rm "$deplist" \; - # clear list of cached dependencies if requirements.txt was recently edited - find "$script_path" -type f -wholename "$input_file" -mmin -1 -exec rm "$deplist" \; -fi - -# CRANE -if [[ -z "${crane}" || $(wc -l <<< "${crane}") -ne 1 ]] ; then - exit_with_error "Crane binary download path undefined or more than one download path defined" -else - if [[ -x $CRANE_BIN ]]; then - echol "Crane binary already exists" - else - file_url=$(head -n 1 <<< "${crane}") - - check_connection wget $file_url - - echol "Downloading crane from: $file_url" - download_file "$file_url" "$script_path" - tar_path="${script_path}/${file_url##*/}" - echol "Unpacking crane from $tar_path to $CRANE_BIN" - tar -xzf "$tar_path" --directory "$script_path" "crane" --overwrite - chmod +x "$CRANE_BIN" - remove_file "$tar_path" - [[ -f $CRANE_BIN ]] || exit_with_error "File not found: $CRANE_BIN" - [[ -x $CRANE_BIN ]] || exit_with_error "$CRANE_BIN has to be executable" - fi -fi - -printf "\n" - -check_connection crane $(for image in $images; do splitted=(${image//:/ }); echo "${splitted[0]}"; done) - -# PACKAGES -# if dependency list doesn't exist or is zero size then resolve dependency and store them in a deplist file -if [[ ! -f $deplist || ! -s $deplist ]]; then - # clean dependency list if process gets interrupted - trap "rm -f $deplist; echol 'Dependency resolution interrupted, cleaning cache file'" SIGINT SIGTERM - echo Resolving dependencies to download. This might take a while and will be cached in $deplist - while IFS= read -r package; do - echol "Package read from requirements file: $package" - # if package has a specified version e.g. "name 1.0" store it as "name=1.0*" for compatibility with "apt-get download" - package=$(echo "$package" | awk '{if($2 != "") {print $1 "=" $2 "*"} else {print $1}}') - echol "Package to download: $package" - # store package itself in the list of dependencies... - echol "$package" >> "$deplist" - # .. and create depency list for the package - # (names only for dependencies, no version check here, not necessary as most dependencies are backward-compatible) - dependencies=$(deplist_cmd "$package" | awk '/Depends/ && !/= v0.7.1) -kubeadm 1.18.6 -kubectl 1.18.6 -kubelet 1.18.6 - -# K8s v1.19.15 (Epiphany >= v1.3, transitional version) -kubeadm 1.19.15 -kubectl 1.19.15 -kubelet 1.19.15 - -# K8s v1.20.12 (Epiphany >= v1.3, transitional version) -kubeadm 1.20.12 -kubectl 1.20.12 -kubelet 1.20.12 - -# K8s v1.21.7 (Epiphany >= v1.3, transitional version) -kubeadm 1.21.7 -kubectl 1.21.7 -kubelet 1.21.7 - -# K8s v1.22.4 -kubeadm 1.22.4 -kubectl 1.22.4 -kubelet 1.22.4 - -# Kubernetes Generic -# kubernetes-cni-0.8.6 since K8s v1.18.6 -kubernetes-cni 0.8.6-00 -# kubernetes-cni-0.8.7 since K8s v1.19.15 -kubernetes-cni 0.8.7-00 - -[files] -# --- Packages --- -# Switched from APT repo because there was only one (the latest) version available (issue #2262) -https://packages.elastic.co/curator/5/debian9/pool/main/e/elasticsearch-curator/elasticsearch-curator_5.8.3_amd64.deb -# Grafana package is not downloaded from repository since it was not reliable (issue #2449) -https://dl.grafana.com/oss/release/grafana_8.3.2_amd64.deb -# --- Exporters --- -https://github.com/danielqsj/kafka_exporter/releases/download/v1.4.0/kafka_exporter-1.4.0.linux-amd64.tar.gz -https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar -https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz -https://github.com/prometheus-community/postgres_exporter/releases/download/v0.10.0/postgres_exporter-0.10.0.linux-amd64.tar.gz -# --- Misc --- -https://archive.apache.org/dist/kafka/2.6.0/kafka_2.12-2.6.0.tgz -https://archive.apache.org/dist/zookeeper/zookeeper-3.5.8/apache-zookeeper-3.5.8-bin.tar.gz -https://github.com/prometheus/alertmanager/releases/download/v0.23.0/alertmanager-0.23.0.linux-amd64.tar.gz -https://github.com/prometheus/prometheus/releases/download/v2.31.1/prometheus-2.31.1.linux-amd64.tar.gz -https://get.helm.sh/helm-v3.2.0-linux-amd64.tar.gz -https://archive.apache.org/dist/logging/log4j/2.17.1/apache-log4j-2.17.1-bin.tar.gz -# --- Helm charts --- -https://charts.bitnami.com/bitnami/node-exporter-2.3.17.tgz -https://helm.elastic.co/helm/filebeat/filebeat-7.9.2.tgz -# --- Grafana Dashboards --- -# Kubernetes Cluster -https://grafana.com/api/dashboards/7249/revisions/1/download grafana_dashboard_7249.json -# Kubernetes cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/315/revisions/3/download grafana_dashboard_315.json -# Node Exporter for Prometheus -https://grafana.com/api/dashboards/11074/revisions/9/download grafana_dashboard_11074.json -# Node Exporter Server Metrics -https://grafana.com/api/dashboards/405/revisions/8/download grafana_dashboard_405.json -# Postgres Overview -https://grafana.com/api/dashboards/455/revisions/2/download grafana_dashboard_455.json -# PostgreSQL Database -https://grafana.com/api/dashboards/9628/revisions/7/download grafana_dashboard_9628.json -# RabbitMQ Monitoring -https://grafana.com/api/dashboards/4279/revisions/4/download grafana_dashboard_4279.json -# Node Exporter Full -https://grafana.com/api/dashboards/1860/revisions/23/download grafana_dashboard_1860.json -# Kafka Exporter Overview -https://grafana.com/api/dashboards/7589/revisions/5/download grafana_dashboard_7589.json -# HaProxy backend (or frontend/servers) -https://grafana.com/api/dashboards/789/revisions/1/download grafana_dashboard_789.json -# Docker and Host Monitoring w/ Prometheus -https://grafana.com/api/dashboards/179/revisions/7/download grafana_dashboard_179.json -# Kubernetes pod and cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/6663/revisions/1/download grafana_dashboard_6663.json -# RabbitMQ cluster monitoring (via Prometheus) -https://grafana.com/api/dashboards/10991/revisions/11/download grafana_dashboard_10991.json - -[images] -haproxy:2.2.2-alpine -kubernetesui/dashboard:v2.3.1 -kubernetesui/metrics-scraper:v1.0.7 -registry:2 -# applications -bitnami/pgpool:4.2.4 -bitnami/pgbouncer:1.16.0 - -epiphanyplatform/keycloak:14.0.0 -rabbitmq:3.8.9 -# K8s -## v1.18.6 -k8s.gcr.io/kube-apiserver:v1.18.6 -k8s.gcr.io/kube-controller-manager:v1.18.6 -k8s.gcr.io/kube-scheduler:v1.18.6 -k8s.gcr.io/kube-proxy:v1.18.6 -k8s.gcr.io/coredns:1.6.7 -k8s.gcr.io/etcd:3.4.3-0 -quay.io/coreos/flannel:v0.12.0-amd64 -quay.io/coreos/flannel:v0.12.0 -calico/cni:v3.15.0 -calico/kube-controllers:v3.15.0 -calico/node:v3.15.0 -calico/pod2daemon-flexvol:v3.15.0 -## v1.19.15 -k8s.gcr.io/kube-apiserver:v1.19.15 -k8s.gcr.io/kube-controller-manager:v1.19.15 -k8s.gcr.io/kube-scheduler:v1.19.15 -k8s.gcr.io/kube-proxy:v1.19.15 -## v1.20.12 -k8s.gcr.io/kube-apiserver:v1.20.12 -k8s.gcr.io/kube-controller-manager:v1.20.12 -k8s.gcr.io/kube-scheduler:v1.20.12 -k8s.gcr.io/kube-proxy:v1.20.12 -k8s.gcr.io/coredns:1.7.0 -k8s.gcr.io/pause:3.2 -## v1.21.7 -k8s.gcr.io/kube-apiserver:v1.21.7 -k8s.gcr.io/kube-controller-manager:v1.21.7 -k8s.gcr.io/kube-scheduler:v1.21.7 -k8s.gcr.io/kube-proxy:v1.21.7 -k8s.gcr.io/coredns/coredns:v1.8.0 -k8s.gcr.io/etcd:3.4.13-0 -k8s.gcr.io/pause:3.4.1 -## v1.22.4 -k8s.gcr.io/kube-apiserver:v1.22.4 -k8s.gcr.io/kube-controller-manager:v1.22.4 -k8s.gcr.io/kube-scheduler:v1.22.4 -k8s.gcr.io/kube-proxy:v1.22.4 -k8s.gcr.io/coredns/coredns:v1.8.4 -k8s.gcr.io/etcd:3.5.0-0 -k8s.gcr.io/pause:3.5 -quay.io/coreos/flannel:v0.14.0-amd64 -quay.io/coreos/flannel:v0.14.0 -calico/cni:v3.20.3 -calico/kube-controllers:v3.20.3 -calico/node:v3.20.3 -calico/pod2daemon-flexvol:v3.20.3 diff --git a/ansible/playbooks/roles/repository/tasks/Debian/install-packages.yml b/ansible/playbooks/roles/repository/tasks/Debian/install-packages.yml new file mode 100644 index 0000000000..f996249247 --- /dev/null +++ b/ansible/playbooks/roles/repository/tasks/Debian/install-packages.yml @@ -0,0 +1,11 @@ +--- +- name: Install Debian family packages for repository to work + apt: + name: + - python3-pip + state: present + update_cache: true + register: result + retries: 3 + delay: 1 + until: result is succeeded diff --git a/ansible/playbooks/roles/repository/tasks/RedHat/install-packages.yml b/ansible/playbooks/roles/repository/tasks/RedHat/install-packages.yml new file mode 100644 index 0000000000..1c95abbb62 --- /dev/null +++ b/ansible/playbooks/roles/repository/tasks/RedHat/install-packages.yml @@ -0,0 +1,11 @@ +--- +- name: Install RedHat family packages for repository to work + yum: + name: + - python3 # python3.6 by default on RHEL7 + - python3-pip + state: present + register: result + retries: 3 + delay: 1 + until: result is succeeded diff --git a/ansible/playbooks/roles/repository/tasks/check-whether-to-run-download.yml b/ansible/playbooks/roles/repository/tasks/check-whether-to-run-download.yml index 7e5f504a64..f0d1fc4452 100644 --- a/ansible/playbooks/roles/repository/tasks/check-whether-to-run-download.yml +++ b/ansible/playbooks/roles/repository/tasks/check-whether-to-run-download.yml @@ -4,12 +4,12 @@ - name: Check if flag file exists stat: - path: /var/tmp/epi-download-requirements/download-requirements-done.flag + path: "{{ download_requirements_flag }}" register: stat_flag_file - name: Remove download-requirements-done.flag file if expired file: - path: /var/tmp/epi-download-requirements/download-requirements-done.flag + path: "{{ download_requirements_flag }}" state: absent when: - stat_flag_file.stat.exists @@ -17,5 +17,5 @@ - name: Check whether to run download script stat: - path: /var/tmp/epi-download-requirements/download-requirements-done.flag + path: "{{ download_requirements_flag }}" register: stat_flag_file diff --git a/ansible/playbooks/roles/repository/tasks/clean-up-epirepo.yml b/ansible/playbooks/roles/repository/tasks/clean-up-epirepo.yml index f294a00d33..e9b26fd3ba 100644 --- a/ansible/playbooks/roles/repository/tasks/clean-up-epirepo.yml +++ b/ansible/playbooks/roles/repository/tasks/clean-up-epirepo.yml @@ -25,20 +25,26 @@ register: files_in_epirepo loop: # loop used by purpose to have separated results - "{{ _apache_epirepo_path }}/files" + - "{{ _apache_epirepo_path }}/grafana_dashboards" - "{{ _apache_epirepo_path }}/images" +- name: Create full list of files + set_fact: + all_files: "{{ (_files_content['distro_files']['files'].keys() | list) | union(_files_content['files']['files'].keys() | list) }}" + - name: Remove old files from epirepo file: state: absent path: "{{ _apache_epirepo_path }}/files/{{ item }}" vars: - files_found: "{{ files_in_epirepo.results[0].files | map(attribute='path') | map('basename') }}" - files_to_preserve: "{{ requirements_file_lines | select('match', 'https?://') - | map('regex_replace', '\\s+#.*$', '') - | map('trim') - | map('regex_replace', '^.+[/\\s](?P[\\S]+)$', '\\g') }}" - files_to_remove: "{{ files_found | difference(files_to_preserve) }}" - loop: "{{ files_to_remove }}" + _files_to_preserve: "{{ all_files | select('match', 'https?://') + | map('regex_replace', '\\s+#.*$', '') + | map('trim') + | map('regex_replace', '^.+[/\\s](?P[\\S]+)$', '\\g') }}" + _files_found: "{{ files_in_epirepo.results[0].files | map(attribute='path') + | map('basename') }}" + _files_to_remove: "{{ _files_found | difference(_files_to_preserve) }}" + loop: "{{ _files_to_remove }}" - name: Load variables from "image_registry" role # generated also in upgrade mode include_vars: diff --git a/ansible/playbooks/roles/repository/tasks/download-requirements.yml b/ansible/playbooks/roles/repository/tasks/download-requirements.yml index 61392a5195..3725546fdd 100644 --- a/ansible/playbooks/roles/repository/tasks/download-requirements.yml +++ b/ansible/playbooks/roles/repository/tasks/download-requirements.yml @@ -4,14 +4,20 @@ - name: |- Run download-requirements script, this can take a long time - You can check progress on repository host with: journalctl -f -t download-requirements.sh + You can check progress on repository host with: journalctl -f -t download-requirements.py shell: >- - set -o pipefail && /var/tmp/epi-download-requirements/download-requirements.sh /var/www/html/epirepo --no-logfile |& - tee >(systemd-cat --identifier=download-requirements.sh) + set -o pipefail && + "{{ download_requirements_script }}" \ + /var/www/html/epirepo \ + "{{ download_requirements_os_name }}" \ + --enable-repos-backup \ + --repos-backup-file /var/tmp/enabled-system-repos.tar \ + --no-logfile |& + tee >(systemd-cat --identifier=download-requirements.py) args: executable: /bin/bash - name: Create flag file to not re-download requirements next time file: - path: /var/tmp/epi-download-requirements/download-requirements-done.flag + path: "{{ download_requirements_flag }}" state: touch diff --git a/ansible/playbooks/roles/repository/tasks/setup.yml b/ansible/playbooks/roles/repository/tasks/setup.yml index 273e443ae3..f60ef7e99f 100644 --- a/ansible/playbooks/roles/repository/tasks/setup.yml +++ b/ansible/playbooks/roles/repository/tasks/setup.yml @@ -9,35 +9,43 @@ debug: var: offline_mode -- name: Set directory name for download script +- name: Set OS name for download script set_fact: - download_script_subdir: >- + download_requirements_os_name: >- {{ 'centos-7' if (ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7') else 'redhat-7' if (ansible_distribution == 'RedHat' and ansible_distribution_major_version == '7') else 'ubuntu-20.04' if (ansible_distribution == 'Ubuntu' and ansible_distribution_version == '20.04') else None }} -- name: Download Epiphany requirements on repository host # online mode +- name: Download Epiphany requirements on repository host # online mode block: - - name: Check if OS distribution is supported - assert: - that: download_script_subdir | length > 0 - fail_msg: "Your OS distribution is not supported" + - name: Install required packages for repository + include_tasks: "{{ ansible_os_family }}/install-packages.yml" + + - name: Install Poyo library for YAML support + pip: + name: poyo==0.5.0 + executable: pip3 + extra_args: --user + + - name: Create download-requirements directory + file: + path: "{{ download_requirements_dir }}" + state: directory - name: Copy files for downloading requirements to repository host - copy: - src: download-requirements/{{ download_script_subdir }}/ - dest: /var/tmp/epi-download-requirements + synchronize: + src: download-requirements/ + dest: "{{ download_requirements_dir }}" + recursive: true + rsync_opts: + - "--exclude=tests" # tests not needed + - "--exclude=__pycache__" - name: Make download script executable file: - dest: /var/tmp/epi-download-requirements/download-requirements.sh + dest: "{{ download_requirements_script }}" mode: a+x - - name: Copy common files for downloading requirements to repository host - copy: - src: download-requirements/common - dest: /var/tmp/epi-download-requirements - when: - not offline_mode - not custom_repository_url @@ -72,8 +80,20 @@ offline_mode or not stat_flag_file.stat.exists # do not clean up when skipping download vars: - _requirements_file_path: download-requirements/{{ download_script_subdir }}/requirements.{{ ansible_architecture }}.txt - requirements_file_lines: "{{ lookup('file', _requirements_file_path).split('\n') }}" + # general paths: + _reqs_dir: download-requirements/requirements + _arch_dir: "{{ _reqs_dir }}/{{ ansible_architecture }}" + _distro_dir: "{{ _arch_dir }}/{{ download_requirements_os_name }}" + + # file paths: + _files_layout: + distro_files: "{{ _distro_dir }}/files.yml" + files: "{{ _arch_dir }}/files.yml" + + # contents: + _files_content: + distro_files: "{{ lookup('file', _files_layout['distro_files']) | from_yaml }}" + files: "{{ lookup('file', _files_layout['files']) | from_yaml }}" - name: |- Copy requirements for offline installation to repository host, this can take a long time diff --git a/cli/epicli.py b/cli/epicli.py index 36de47ac29..f6af5c9fa1 100644 --- a/cli/epicli.py +++ b/cli/epicli.py @@ -9,6 +9,8 @@ import sys import time +from typing import List + from cli.licenses import LICENSES from cli.src.commands.Apply import Apply from cli.src.commands.Backup import Backup @@ -18,7 +20,7 @@ from cli.src.commands.Recovery import Recovery from cli.src.commands.Test import Test from cli.src.commands.Upgrade import Upgrade -from cli.src.Config import Config +from cli.src.Config import Config, SUPPORTED_OS from cli.src.helpers.build_io import get_output_path, save_to_file from cli.src.helpers.cli_helpers import prompt_for_password, query_yes_no from cli.src.helpers.time_helpers import format_time @@ -160,14 +162,19 @@ def prepare_parser(subparsers): optional = sub_parser._action_groups.pop() required = sub_parser.add_argument_group('required arguments') - #required - required.add_argument('--os', type=str, required=True, dest='os', choices=['ubuntu-20.04', 'redhat-7', 'centos-7'], - help='The OS to prepare the offline requirements for: ubuntu-20.04|redhat-7|centos-7') + # required + supported_os: List[str] = list(SUPPORTED_OS.keys()) + required.add_argument('--os', type=str, required=True, dest='os', choices=supported_os, + help=f'The OS to prepare the offline requirements for: {"|".join(supported_os)}') - #optional + supported_arch: List[str] = list(set([arch for archs in SUPPORTED_OS.values() for arch in archs])) + required.add_argument('--arch', type=str, required=True, dest='arch', choices=supported_arch, + help=f'The OS architecture type to be used: {"|".join(supported_arch)}') + + # optional optional.add_argument('-o', '--output_dir', dest='output_dir', type=str, required=False, - help='Output directory for the offline requirement scripts.', - default=None) + help='Output directory for the offline requirement scripts.', + default=None) sub_parser._action_groups.append(optional) def run_prepare(args): diff --git a/cli/src/Config.py b/cli/src/Config.py index 06b4f6bdc9..f13810afaa 100644 --- a/cli/src/Config.py +++ b/cli/src/Config.py @@ -1,11 +1,21 @@ import os from os.path import expanduser +from typing import Dict, List + LOG_TYPES = ['plain', 'json'] + +SUPPORTED_OS: Dict[str, List[str]] = { + 'ubuntu-20.04': ['x86_64'], + 'redhat-7': ['x86_64'] +} + + class InvalidLogTypeException(Exception): pass + class Config: class __ConfigBase: def __init__(self): diff --git a/cli/src/commands/Prepare.py b/cli/src/commands/Prepare.py index 154568a00e..b05c9ba535 100644 --- a/cli/src/commands/Prepare.py +++ b/cli/src/commands/Prepare.py @@ -1,21 +1,23 @@ import os import stat +from pathlib import Path +from shutil import copy, copytree +from typing import Dict -from cli.src.Config import Config -from cli.src.helpers.build_io import copy_files_recursively +from cli.src.Config import Config, SUPPORTED_OS from cli.src.helpers.data_loader import BASE_DIR from cli.src.Step import Step class Prepare(Step): - PREPARE_PATH = f'{BASE_DIR}/ansible/playbooks/roles/repository/files/download-requirements' - COMMON_PATH = f'{PREPARE_PATH}/common' - CHARTS_PATH = f'{BASE_DIR}/ansible/playbooks/roles/helm_charts/files/system' + PREPARE_PATH: Path = Path(f'{BASE_DIR}/ansible/playbooks/roles/repository/files/download-requirements') + CHARTS_PATH: Path = Path(f'{BASE_DIR}/ansible/playbooks/roles/helm_charts/files/system') def __init__(self, input_data): super().__init__(__name__) - self.os = input_data.os - self.output_dir = input_data.output_dir + self.os: str = input_data.os + self.arch: str = input_data.arch + self.output_dir: str = input_data.output_dir def __enter__(self): super().__enter__() @@ -24,32 +26,55 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): pass - def prepare(self): - prepare_src = os.path.join(self.PREPARE_PATH, self.os) + def prepare(self) -> int: + if self.arch not in SUPPORTED_OS[self.os]: + raise Exception(f'Error: chosen arch: {self.arch} is not supported for os: {self.os}') - if self.output_dir: - prepare_dst = os.path.join(Config().output_dir, self.output_dir) - else: - prepare_dst = os.path.join(Config().output_dir, 'prepare_scripts_' + self.os.replace('-', '_').replace('.', '')) + repositories_path: Path = self.PREPARE_PATH / 'repositories' + repositories_arch_path: Path = repositories_path / f'{self.arch}' + repositories_file_path: Path = repositories_arch_path / f'{self.os}.yml' - charts_dst = os.path.join(prepare_dst, 'charts', 'system') + requirements_path: Path = self.PREPARE_PATH / 'requirements' + arch_path: Path = requirements_path / self.arch + distro_path: Path = arch_path / self.os - if not os.path.exists(prepare_src): - supported_os = os.listdir(self.PREPARE_PATH) - raise Exception(f'Unsupported OS: {self.os}. Currently supported: {supported_os}') + dest_path: Path = Path(Config().output_dir) + dest_path /= self.output_dir if self.output_dir else 'prepare_scripts' + + charts_path = dest_path / 'charts/system' + + # source : destination + download_requirements_paths: Dict[Path, Path] = { + arch_path / 'cranes.yml': dest_path / f'requirements/{self.arch}', + arch_path / 'files.yml': dest_path / f'requirements/{self.arch}', + arch_path / 'images.yml': dest_path / f'requirements/{self.arch}', + charts_path: dest_path / 'charts/system', + distro_path / 'packages.yml': dest_path / f'requirements/{self.arch}/{self.os}', + repositories_file_path: dest_path / f'repositories/{self.arch}', + requirements_path / 'grafana-dashboards.yml': dest_path / 'requirements', + self.PREPARE_PATH / 'download-requirements.py': dest_path, + self.PREPARE_PATH / 'src': dest_path / 'src', + } + + distro_files: Path = distro_path / 'files.yml' + if distro_files.exists(): # specific files for target distro are optional + download_requirements_paths[distro_files] = dest_path / f'requirements/{self.arch}/{self.os}' # copy files to output dir - copy_files_recursively(prepare_src, prepare_dst) - copy_files_recursively(self.COMMON_PATH, os.path.join(prepare_dst, 'common')) - copy_files_recursively(self.CHARTS_PATH, charts_dst) + for source, destination in download_requirements_paths.items(): + destination.mkdir(exist_ok=True, parents=True) + if source.is_dir(): + copytree(source, destination, dirs_exist_ok=True) + else: + copy(source, destination) # make sure the scripts are executable - self.make_file_executable(os.path.join(prepare_dst, 'download-requirements.sh')) + self.make_file_executable(dest_path / 'download-requirements.py') - self.logger.info(f'Prepared files for downloading the offline requirements in: {prepare_dst}') + self.logger.info(f'Prepared files for downloading the offline requirements in: {dest_path}') return 0 @staticmethod - def make_file_executable(file): + def make_file_executable(file: Path): executable_stat = os.stat(file) os.chmod(file, executable_stat.st_mode | stat.S_IEXEC) diff --git a/docs/changelogs/CHANGELOG-2.0.md b/docs/changelogs/CHANGELOG-2.0.md index cd93b49d0f..cb86d15c4b 100644 --- a/docs/changelogs/CHANGELOG-2.0.md +++ b/docs/changelogs/CHANGELOG-2.0.md @@ -10,6 +10,7 @@ - [#2950](https://github.com/epiphany-platform/epiphany/issues/2950) - CLI refactor to make it more consistant - [#2844](https://github.com/epiphany-platform/epiphany/issues/2844) - Refactor K8s upgrade task in order to simplify its flow - [#2716](https://github.com/epiphany-platform/epiphany/issues/2716) - Change container runtime to containerd +- [#805](https://github.com/epiphany-platform/epiphany/issues/805) - Refactor download-requirements script ### Fixed diff --git a/docs/home/COMPONENTS.md b/docs/home/COMPONENTS.md index 7328ff5628..23f77bb4c0 100644 --- a/docs/home/COMPONENTS.md +++ b/docs/home/COMPONENTS.md @@ -213,6 +213,7 @@ Note that versions are default versions and can be changed in certain cases thro | websocket-client | 0.56.0 | https://github.com/websocket-client/websocket-client.git | BSD | | wrapt | 1.13.3 | https://github.com/GrahamDumpleton/wrapt | [BSD 2-Clause "Simplified" License](https://api.github.com/repos/grahamdumpleton/wrapt/license) | | xmltodict | 0.12.0 | https://github.com/martinblech/xmltodict | [MIT License](https://api.github.com/repos/martinblech/xmltodict/license) | +| poyo | 0.5.0 | https://github.com/hackebrot/poyo | [MIT License](https://github.com/hackebrot/poyo/blob/main/LICENSE) | ## Predefined Grafana dashboards diff --git a/docs/home/howto/CLUSTER.md b/docs/home/howto/CLUSTER.md index c2f6914fd7..ac4663a1b4 100644 --- a/docs/home/howto/CLUSTER.md +++ b/docs/home/howto/CLUSTER.md @@ -209,12 +209,11 @@ or VMs and should meet the following requirements: 1. The air-gapped cluster machines/VMs are connected by a network or virtual network of some sorts and can communicate with each other. 2. The air-gapped cluster machines/VMs are running one of the following Linux distributions: - RedHat 7.6+ and < 8 - - CentOS 7.6+ and < 8 - Ubuntu 20.04 3. The cluster machines/VMs are accessible through SSH with a set of SSH keys you provide and configure on each machine yourself (key-based authentication). 4. The user used for SSH connection (`admin_user`) has passwordless root privileges through `sudo`. 5. A requirements machine that: - - Runs the same distribution as the air-gapped cluster machines/VMs (RedHat 7, CentOS 7, Ubuntu 20.04) + - Runs the same distribution as the air-gapped cluster machines/VMs (RedHat 7, Ubuntu 20.04) - Has access to the internet. If you don't have access to a similar machine/VM with internet access, you can also try to download the requirements with a Docker container. More information [here](./CLUSTER.md#downloading-offline-requirements-with-a-docker-container). 6. A provisioning machine that: @@ -228,18 +227,26 @@ To set up the cluster do the following steps: 1. First we need to get the tooling to prepare the requirements. On the provisioning machine run: ```shell - epicli prepare --os OS + epicli prepare --os OS --arch ARCH ``` - Where OS should be `centos-7`, `redhat-7`, `ubuntu-20.04`. This will create a directory called `prepare_scripts` with the needed files inside. + Where: + - OS should be `redhat-7`, `ubuntu-20.04` + - ARCH should be `x86_64`, `arm64` + + This will create a directory called `prepare_scripts` with the needed files inside. 2. The scripts in the `prepare_scripts` will be used to download all requirements. To do that copy the `prepare_scripts` folder over to the requirements machine and run the following command: ```shell - download-requirements.sh /requirementsoutput/ + download-requirements.py /requirementsoutput/ OS ``` - This will start downloading all requirements and put them in the `/requirementsoutput/` folder. Once run successfully the `/requirementsoutput/` needs to be copied to the provisioning machine to be used later on. + Where: + - OS should be `redhat-7`, `ubuntu-20.04`, `detect` + - /requirementsoutput/ where to output downloaded requirements + + This will run the download-requirements script for target OS type and save requirements under /requirementsoutput/. Once run successfully the `/requirementsoutput/` needs to be copied to the provisioning machine to be used later on. 3. Then generate a minimal data yaml file on the provisioning machine: @@ -1096,15 +1103,15 @@ docker run -v /shared_folder:/home <--platform linux/amd64 or --platform linux/a As the ```ubuntu:20.04``` image is multi-arch you can include ```--platform linux/amd64``` or ```--platform linux/arm64``` to run the container as the specified architecture. The ```/shared_folder``` should be a folder on your local machine containing the required scripts. -When you are inside the container run the following commands to prepare for the running of the ```download-requirements.sh``` script: +When you are inside the container run the following commands to prepare for the running of the ```download-requirements.py``` script: ```shell apt-get update # update the package manager -apt-get install sudo # install sudo so we can make the download-requirements.sh executable and run it as root -sudo chmod +x /home/download-requirements.sh # make the requirements script executable +apt-get install sudo # install sudo so we can make the download-requirements.py executable and run it as root +sudo chmod +x /home/download-requirements.py # make the requirements script executable ``` -After this you should be able to run the ```download-requirements.sh``` from the ```home``` folder. +After this you should be able to run the ```download-requirements.py``` from the ```home``` folder. ### RedHat 7.x @@ -1116,40 +1123,14 @@ docker run -v /shared_folder:/home <--platform linux/amd64 or --platform linux/a As the ```registry.access.redhat.com/ubi7/ubi:7.9``` image is multi-arch you can include ```--platform linux/amd64``` or ```--platform linux/arm64``` to run the container as the specified architecture. The ```/shared_folder``` should be a folder on your local machine containing the requirement scripts. -For running the ```download-requirements.sh``` script you will need a RedHat developer subscription to register the running container and make sure you can access to official Redhat repos for the packages needed. More information on getting this free subscription [here](https://developers.redhat.com/articles/getting-red-hat-developer-subscription-what-rhel-users-need-know). +For running the ```download-requirements.py``` script you will need a RedHat developer subscription to register the running container and make sure you can access to official Redhat repos for the packages needed. More information on getting this free subscription [here](https://developers.redhat.com/articles/getting-red-hat-developer-subscription-what-rhel-users-need-know). -When you are inside the container run the following commands to prepare for the running of the ```download-requirements.sh``` script: +When you are inside the container run the following commands to prepare for the running of the ```download-requirements.py``` script: ```shell subscription-manager register # will ask for you credentials of your RedHat developer subscription and setup the container subscription-manager attach --auto # will enable the RedHat official repositories -chmod +x /home/download-requirements.sh # make the requirements script executable -``` - -After this you should be able to run the ```download-requirements.sh``` from the ```home``` folder. - -### CentOS 7.x - -For CentOS, you can use the following command to launch a container: - -arm64: - -```shell -docker run -v /shared_folder:/home --platform linux/arm64 --rm -it arm64v8/centos:7.9.2009 -``` - -x86_64: - -```shell -docker run -v /shared_folder:/home --platform linux/amd64 --rm -it amd64/centos:7.9.2009 -``` - -The ```/shared_folder``` should be a folder on your local machine containing the requirement scripts. - -When you are inside the container run the following commands to prepare for the running of the ```download-requirements.sh``` script: - -```shell -chmod +x /home/download-requirements.sh # make the requirements script executable +chmod +x /home/download-requirements.py # make the requirements script executable ``` -After this you should be able to run the ```download-requirements.sh``` from the ```home``` folder. +After this you should be able to run the ```download-requirements.py``` from the ```home``` folder. diff --git a/docs/home/howto/REPOSITORY.md b/docs/home/howto/REPOSITORY.md index a8ef266cac..d984953459 100644 --- a/docs/home/howto/REPOSITORY.md +++ b/docs/home/howto/REPOSITORY.md @@ -37,7 +37,7 @@ When you see the following output from epicli, requirements are being downloaded ```shell INFO cli.src.ansible.AnsibleCommand - TASK [repository : Run download-requirements script, this can take a long time -INFO cli.src.ansible.AnsibleCommand - You can check progress on repository host with: journalctl -f -t download-requirements.sh] *** +INFO cli.src.ansible.AnsibleCommand - You can check progress on repository host with: journalctl -f -t download-requirements.py] *** ``` As noted this process can take a long time depending on the connection and as downloading requirements is being done by a shell script, the ```Ansible``` process cannot return any realtime information. @@ -45,7 +45,7 @@ As noted this process can take a long time depending on the connection and as do To view the progress during the downloading (realtime output from the logs), one can SSH into the repository machine and run: ```shell -journalctl -f -t download-requirements.sh +journalctl -f -t download-requirements.py ``` If for some reason the download-requirements fails you can also always check the log afterwards on the repository machine here: diff --git a/docs/home/howto/UPGRADE.md b/docs/home/howto/UPGRADE.md index ca64f5a12e..0086bbb0b1 100644 --- a/docs/home/howto/UPGRADE.md +++ b/docs/home/howto/UPGRADE.md @@ -105,21 +105,27 @@ To upgrade the cluster components run the following steps: 1. First we need to get the tooling to prepare the requirements for the upgrade. On the provisioning machine run: ```shell - epicli prepare --os OS + epicli prepare --os OS --arch ARCH ``` - Where OS should be `centos-7`, `redhat-7`, `ubuntu-20.04`. This will create a directory called `prepare_scripts` with - the needed files inside. + Where: + - OS should be `redhat-7`, `ubuntu-20.04` + - ARCH should be `x86_64`, `arm64` + + This will create a directory called `prepare_scripts` with the needed files inside. 2. The scripts in the `prepare_scripts` will be used to download all requirements. To do that, copy the `prepare_scripts` folder over to the requirements machine and run the following command: ```shell - download-requirements.sh /requirementsoutput/ + download-requirements.py /requirementsoutput/ OS ``` - This will start downloading all requirements and put them in the `/requirementsoutput/` folder. Once run successfully - the `/requirementsoutput/` needs to be copied to the provisioning machine to be used later on. + Where: + - OS should be `redhat-7`, `ubuntu-20.04`, `detect` + - /requirementsoutput/ where to output downloaded requirements + + This will run the download-requirements script for target OS type and save requirements under /requirementsoutput/. Once run successfully the `/requirementsoutput/` needs to be copied to the provisioning machine to be used later on. 3. Finally, start the upgrade with: