From 1c5ae22fc2e4e0b8834765c892a710f93ad9ae24 Mon Sep 17 00:00:00 2001 From: "Qi (Keira) Zhang" Date: Tue, 15 Aug 2023 15:07:14 +0800 Subject: [PATCH] [Linux] Support RHEL CoreOS automation testing (#477) Signed-off-by: Qi Zhang --- README.md | 1 + common/collect_ovf_vm_config.yml | 2 +- common/create_temp_file_dir.yml | 10 +- common/vm_remove_serial_port.yml | 17 +++- common/vm_upgrade_hardware_version.yml | 1 + .../check_inbox_driver/check_inbox_driver.yml | 1 - .../check_quiesce_snapshot_custom_script.yml | 94 ++++++++++--------- .../vss_script_sh.sh | 2 +- linux/deploy_vm/collect_serial_port_log.yml | 23 +++++ linux/deploy_vm/collect_vm_logs.yml | 28 +++--- linux/deploy_vm/deploy_vm_from_iso.yml | 26 +---- linux/deploy_vm/deploy_vm_from_ova.yml | 66 ++++++++++--- .../deploy_vm/flatcar/flatcar_post_config.yml | 38 +++----- .../flatcar/generate_ignition_config.yml | 44 --------- linux/deploy_vm/flatcar/ignition_config.j2 | 39 -------- .../flatcar/reconfigure_flatcar_vm.yml | 53 ----------- linux/deploy_vm/generate_ignition_config.yml | 33 +++++++ linux/deploy_vm/get_ova_guest_os_type.yml | 5 + .../reconfigure_vm_with_cloudinit.yml | 75 +++++---------- .../reconfigure_vm_with_ignition.yml | 46 +++++++++ .../templates/amazon-ova-user-data.j2 | 9 +- linux/deploy_vm/templates/ignition_config.j2 | 65 +++++++++++++ .../templates/photon-ova-user-data.j2 | 11 ++- .../templates/ubuntu-ova-user-data.j2 | 7 +- linux/deploy_vm/upgrade_ova_vm_hwv.yml | 7 +- .../check_gosc_support_status.yml | 14 ++- .../check_pvrdma_support_status.yml | 2 +- .../nvdimm_cold_add_remove.yml | 3 +- linux/open_vm_tools/ovt_verify_install.yml | 4 +- linux/open_vm_tools/ovt_verify_uninstall.yml | 2 +- linux/utils/create_seed_iso.yml | 4 +- linux/utils/get_installed_package_info.yml | 2 +- linux/utils/get_linux_system_info.yml | 7 ++ .../vhba_device_hot_add_remove.yml | 2 +- .../wait_device_list_changed.yml | 7 +- 35 files changed, 404 insertions(+), 346 deletions(-) create mode 100644 linux/deploy_vm/collect_serial_port_log.yml delete mode 100644 linux/deploy_vm/flatcar/generate_ignition_config.yml delete mode 100644 linux/deploy_vm/flatcar/ignition_config.j2 delete mode 100644 linux/deploy_vm/flatcar/reconfigure_flatcar_vm.yml create mode 100644 linux/deploy_vm/generate_ignition_config.yml create mode 100644 linux/deploy_vm/reconfigure_vm_with_ignition.yml create mode 100644 linux/deploy_vm/templates/ignition_config.j2 diff --git a/README.md b/README.md index 94a62befb..d34a471ee 100644 --- a/README.md +++ b/README.md @@ -91,6 +91,7 @@ This project supports below scenarios for end-to-end guest operating system vali | openSUSE Leap 15.3 and later | :heavy_check_mark: | | :heavy_check_mark: | | BCLinux 8.x | :heavy_check_mark: | | :heavy_check_mark: | | BCLinux-for-Euler 21.10 | :heavy_check_mark: | | :heavy_check_mark: | +| Red Hat Enterprise Linux CoreOS (RHCOS) 4.13 and later | | :heavy_check_mark: | :heavy_check_mark: | **Notes** 1. This compatible guest operating systems list is used for this project only. For guest operating system support status on ESXi, please refer to [VMware Compatibility Guide](https://www.vmware.com/resources/compatibility/search.php?deviceCategory=software&testConfig=16). diff --git a/common/collect_ovf_vm_config.yml b/common/collect_ovf_vm_config.yml index be66a72cc..14438ed92 100644 --- a/common/collect_ovf_vm_config.yml +++ b/common/collect_ovf_vm_config.yml @@ -191,7 +191,7 @@ ansible.builtin.set_fact: ovf_vm_guest_os: "{{ guest_detailed_data_dict.prettyName | default('') }}" ovf_vm_guest_build: "{{ guest_detailed_data_dict.kernelVersion | default('') }}" - ovf_vm_guest_bitness: "{{ guest_detailed_data_dict.bitness | default('') }}" + ovf_vm_guest_bitness: "{{ guest_detailed_data_dict.bitness | default('') }}" - name: "Set fact of Linux guest OS distribution" block: diff --git a/common/create_temp_file_dir.yml b/common/create_temp_file_dir.yml index 95b28a6a6..7a698716a 100644 --- a/common/create_temp_file_dir.yml +++ b/common/create_temp_file_dir.yml @@ -3,10 +3,10 @@ --- # Create a temporary file or directory # Parameters: +# tmp_state: file or directory. By default, temporary file will be created. # tmp_dir: (Optional) the location where temporary file or diretotry will be created. -# tmp_state: file or directory. By default, temporary file will be created -# tmp_prefix: the prefix of temporary file or directory -# tmp_suffix: the suffix of temporar file or directory +# tmp_prefix: (Optional) the prefix of temporary file or directory. +# tmp_suffix: (Optional) the suffix of temporar file or directory. # Return: # tmp_path: the path to temporary file or directory # @@ -31,11 +31,11 @@ - name: "Modify the mode of temporary file" ansible.builtin.file: path: "{{ tmp_path }}" - mode: "0644" + mode: "0666" when: tmp_state == "file" - name: "Modify the mode of temporary directory" ansible.builtin.file: path: "{{ tmp_path }}" - mode: "0755" + mode: "0777" when: tmp_state == "directory" diff --git a/common/vm_remove_serial_port.yml b/common/vm_remove_serial_port.yml index 9bf5a3980..f2d29a761 100644 --- a/common/vm_remove_serial_port.yml +++ b/common/vm_remove_serial_port.yml @@ -4,9 +4,9 @@ # Remove a serial port to VM by using output file # Parameter: # vm_serial_port_file_path: The serial port output file on datastore. - -# Before removing serial port, remove the serial port output file -- include_tasks: esxi_check_delete_datastore_file.yml +# +- name: "Remove the serial port output file before removing serial port" + include_tasks: esxi_check_delete_datastore_file.yml vars: file_in_datastore: "{{ datastore }}" file_in_datastore_path: "{{ vm_serial_port_file_path.split(']')[-1].strip(' ') }}" @@ -28,3 +28,14 @@ - name: "Display result of removing serial port" ansible.builtin.debug: var=remove_serial_port + +- name: "Check serial port is removed" + ansible.builtin.assert: + that: + - remove_serial_port.changed is defined + - remove_serial_port.changed + fail_msg: "Failed to remove serial port from VM" + +- name: "Clean serial port output file path on datastore" + ansible.builtin.set_fact: + vm_serial_port_file_path: "" diff --git a/common/vm_upgrade_hardware_version.yml b/common/vm_upgrade_hardware_version.yml index 9f5e9a9ff..818959e18 100644 --- a/common/vm_upgrade_hardware_version.yml +++ b/common/vm_upgrade_hardware_version.yml @@ -18,6 +18,7 @@ - name: "Update VM hardware version" ansible.builtin.set_fact: vm_hardware_version: "{{ vm_upgrade_hw_result.instance.hw_version }}" + vm_hardware_version_num: "{{ vm_upgrade_hw_result.instance.hw_version.split('-')[1] }}" - ansible.builtin.debug: msg: "VM hardware version is upgraded to: {{ vm_hardware_version }}" diff --git a/linux/check_inbox_driver/check_inbox_driver.yml b/linux/check_inbox_driver/check_inbox_driver.yml index 93b6e3ecb..fd7f8ab26 100644 --- a/linux/check_inbox_driver/check_inbox_driver.yml +++ b/linux/check_inbox_driver/check_inbox_driver.yml @@ -9,7 +9,6 @@ - name: check_inbox_driver hosts: localhost gather_facts: false - vars: tasks: - name: "Initialized inbox drivers' versions dict" ansible.builtin.set_fact: diff --git a/linux/check_quiesce_snapshot_custom_script/check_quiesce_snapshot_custom_script.yml b/linux/check_quiesce_snapshot_custom_script/check_quiesce_snapshot_custom_script.yml index d2ead3a78..c8f19dcb1 100644 --- a/linux/check_quiesce_snapshot_custom_script/check_quiesce_snapshot_custom_script.yml +++ b/linux/check_quiesce_snapshot_custom_script/check_quiesce_snapshot_custom_script.yml @@ -11,7 +11,8 @@ tasks: - name: "Test case block" block: - - include_tasks: ../setup/test_setup.yml + - name: "Test setup" + include_tasks: ../setup/test_setup.yml vars: skip_test_no_vmtools: true @@ -23,16 +24,19 @@ when: guest_os_ansible_distribution == 'FreeBSD' # Take quiesce snapshot - - name: Set quiesce snapshot variables + - name: "Set quiesce snapshot variables" ansible.builtin.set_fact: qs_snapshot_name: "quiesce_snapshot_{{ lookup('pipe', 'date +%Y-%m-%d-%H-%M-%S') }}" backup_script_dir: "/etc/vmware-tools/backupScripts.d" vss_shell_script: "vss_script_sh.sh" vss_bin_script: "vss_script_bin_{{ guest_os_bit | replace('-', '') }}" - vss_log_path: "/vss.log" + vss_log_path: |- + {%- if guest_os_ansible_distribution in ['Flatcar', 'RHCOS'] -%}/var/vss.log + {%- else -%}/vss.log + {%- endif -%} vss_content: "" - # Copy vss_script_sh.sh to /etc/vmware-tools/backupScripts.d/vss_script_sh.sh + # Create vss_script_sh.sh under /etc/vmware-tools/backupScripts.d/ - name: "Create backup script dir {{ backup_script_dir }}" ansible.builtin.file: path: "{{ backup_script_dir }}" @@ -40,15 +44,15 @@ mode: 0755 delegate_to: "{{ vm_guest_ip }}" - - name: "Copy custom script {{ vss_shell_script }}" - ansible.builtin.copy: + - name: "Add custom shell script {{ vss_shell_script }}" + ansible.builtin.template: src: "{{ vss_shell_script }}" dest: "{{ backup_script_dir }}/{{ vss_shell_script }}" mode: 0755 delegate_to: "{{ vm_guest_ip }}" # Copy pre-freeze-script and post-thaw-script to /usr/sbin/ - - name: Copy custom script pre-freeze-script and post-thaw-script to /usr/sbin + - name: "Copy custom binary scripts pre-freeze-script and post-thaw-script to /usr/sbin" ansible.builtin.copy: src: "{{ vss_bin_script }}" dest: "/usr/sbin/{{ item }}" @@ -57,26 +61,21 @@ - pre-freeze-script - post-thaw-script delegate_to: "{{ vm_guest_ip }}" - when: guest_os_ansible_distribution != 'Flatcar' + when: guest_os_ansible_distribution not in ['Flatcar', 'RHCOS'] - # Ensure /vss.log is absent - name: "Remove {{ vss_log_path }} if it exists" ansible.builtin.file: path: "{{ vss_log_path }}" state: absent delegate_to: "{{ vm_guest_ip }}" - - include_tasks: ../../common/vm_take_snapshot.yml + - name: "Take quiesce snapshot" + include_tasks: ../../common/vm_take_snapshot.yml vars: snapshot_name: "{{ qs_snapshot_name }}" is_quiesce: "yes" dump_memory: "no" - # After snapshot is taken, check log file /vss.log in guest, which should have below contents: - # /usr/sbin/pre-freeze-script freeze - # /etc/vmware-tools/backupScripts.d/vss_script_sh.sh freeze - # /etc/vmware-tools/backupScripts.d/vss_script_sh.sh thaw - # /usr/sbin/post-thaw-script thaw - name: "Check {{ vss_log_path }} existence" include_tasks: ../utils/get_file_stat_info.yml vars: @@ -87,45 +86,54 @@ msg: "{{ vss_log_path }} doesn't exist after quiesce snapshot" when: not (guest_file_exists | bool) - # Retrieve /vss.log - - include_tasks: ../../common/vm_guest_file_operation.yml + - name: "Retrieve {{ vss_log_path }}" + include_tasks: ../../common/vm_guest_file_operation.yml vars: operation: "fetch_file" src_path: "{{ vss_log_path }}" dest_path: "{{ current_test_log_folder }}{{ vss_log_path }}" - - name: "Get file content in /vss.log" + - name: "Get file content in {{ vss_log_path }}" ansible.builtin.shell: "cat {{ current_test_log_folder }}{{ vss_log_path }}" changed_when: false register: vss_content - # Test failed for non-Flatcar guest - - name: "{{ ansible_play_name }} failed" - ansible.builtin.fail: - msg: "{{ vss_log_path }} has incorrect content" - when: (guest_os_ansible_distribution != 'Flatcar') and - (( not vss_content ) or - ( vss_content.stdout_lines is undefined ) or - ( vss_content.stdout_lines | length != 4 ) or - ( vss_content.stdout_lines[0] != "/usr/sbin/pre-freeze-script freeze" ) or - ( vss_content.stdout_lines[1] != "/etc/vmware-tools/backupScripts.d/vss_script_sh.sh freeze" ) or - ( vss_content.stdout_lines[2] != "/etc/vmware-tools/backupScripts.d/vss_script_sh.sh thaw" ) or - ( vss_content.stdout_lines[3] != "/usr/sbin/post-thaw-script thaw" )) + # Guest OS is not Flatcar or RHCOS + # After quiesce snapshot is taken, guest log file /vss.log should have below contents: + # /usr/sbin/pre-freeze-script freeze + # /etc/vmware-tools/backupScripts.d/vss_script_sh.sh freeze + # /etc/vmware-tools/backupScripts.d/vss_script_sh.sh thaw + # /usr/sbin/post-thaw-script thaw + - name: "Check file content in {{ vss_log_path }}" + ansible.builtin.assert: + that: + - vss_content.stdout_lines is defined + - vss_content.stdout_lines | length == 4 + - vss_content.stdout_lines[0] == "/usr/sbin/pre-freeze-script freeze" + - vss_content.stdout_lines[1] == "/etc/vmware-tools/backupScripts.d/vss_script_sh.sh freeze" + - vss_content.stdout_lines[2] == "/etc/vmware-tools/backupScripts.d/vss_script_sh.sh thaw" + - vss_content.stdout_lines[3] == "/usr/sbin/post-thaw-script thaw" + fail_msg: "{{ vss_log_path }} has incorrect content" + when: guest_os_ansible_distribution not in ['Flatcar', 'RHCOS'] - # Test failed for Flatcar guest - - name: "{{ ansible_play_name }} failed" - ansible.builtin.fail: - msg: "{{ vss_log_path }} has incorrect content" - when: (guest_os_ansible_distribution == 'Flatcar') and - (( not vss_content ) or - ( vss_content.stdout_lines is undefined ) or - ( vss_content.stdout_lines | length != 2 ) or - ( vss_content.stdout_lines[0] != "/etc/vmware-tools/backupScripts.d/vss_script_sh.sh freeze") or - ( vss_content.stdout_lines[1] != "/etc/vmware-tools/backupScripts.d/vss_script_sh.sh thaw" )) + # Guest OS is Flatcar or RHCOS + # After quiesce snapshot is taken, guest log file /var/vss.log should have below contents: + # /etc/vmware-tools/backupScripts.d/vss_script_sh.sh freeze + # /etc/vmware-tools/backupScripts.d/vss_script_sh.sh thaw + - name: "Check file content in {{ vss_log_path }}" + ansible.builtin.assert: + that: + - vss_content.stdout_lines is defined + - vss_content.stdout_lines | length == 2 + - vss_content.stdout_lines[0] == "/etc/vmware-tools/backupScripts.d/vss_script_sh.sh freeze" + - vss_content.stdout_lines[1] == "/etc/vmware-tools/backupScripts.d/vss_script_sh.sh thaw" + fail_msg: "{{ vss_log_path }} has incorrect content" + when: guest_os_ansible_distribution in ['Flatcar', 'RHCOS'] - # Remove quiesce snapshot for successful test - - include_tasks: ../../common/vm_remove_snapshot.yml + - name: "Remove quiesce snapshot for successful test" + include_tasks: ../../common/vm_remove_snapshot.yml vars: snapshot_name: "{{ qs_snapshot_name }}" rescue: - - include_tasks: ../../common/test_rescue.yml + - name: "Test case failure" + include_tasks: ../../common/test_rescue.yml diff --git a/linux/check_quiesce_snapshot_custom_script/vss_script_sh.sh b/linux/check_quiesce_snapshot_custom_script/vss_script_sh.sh index cfc6e61de..b3d0ff543 100755 --- a/linux/check_quiesce_snapshot_custom_script/vss_script_sh.sh +++ b/linux/check_quiesce_snapshot_custom_script/vss_script_sh.sh @@ -1,3 +1,3 @@ #!/bin/sh -echo $0 $@ >> /vss.log +echo $0 $@ >> {{ vss_log_path }} exit 0 diff --git a/linux/deploy_vm/collect_serial_port_log.yml b/linux/deploy_vm/collect_serial_port_log.yml new file mode 100644 index 000000000..79f2573d6 --- /dev/null +++ b/linux/deploy_vm/collect_serial_port_log.yml @@ -0,0 +1,23 @@ +# Copyright 2023 VMware, Inc. +# SPDX-License-Identifier: BSD-2-Clause +--- +# Collect VM serial port log file +# +- name: "Collect VM serial port log" + when: + - vm_dir_name is defined + - vm_dir_name + - vm_serial_port_file_path is defined + - vm_serial_port_file_path + block: + - name: "Set fact of VM serial port log file name" + ansible.builtin.set_fact: + vm_serial_port_file_name: "{{ vm_serial_port_file_path.split()[-1] | basename }}" + + - name: "Download VM serial port log file from datastore" + include_tasks: ../../common/esxi_download_datastore_file.yml + vars: + src_datastore: "{{ datastore }}" + src_file_path: "{{ vm_dir_name }}/{{ vm_serial_port_file_name }}" + dest_file_path: "{{ current_test_log_folder }}/{{ vm_serial_port_file_name }}" + download_file_fail_ignore: true diff --git a/linux/deploy_vm/collect_vm_logs.yml b/linux/deploy_vm/collect_vm_logs.yml index 74fd9865a..dd8e6461a 100644 --- a/linux/deploy_vm/collect_vm_logs.yml +++ b/linux/deploy_vm/collect_vm_logs.yml @@ -1,21 +1,32 @@ # Copyright 2023 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause --- -# Collect cloud-init logs for deploying Ubuntu live-server, -# cloud image or Photon OVA. +# Collect VM guest info and cloud-init logs for deploying Ubuntu live-server, +# cloud image, VMware Photon OS OVA or Amazon Linux OVA. # -- name: "Collect VM logs" +- name: "Collect VM deployement logs" + when: + - vm_exists is defined + - vm_exists block: - name: "Get VM's power state" include_tasks: ../../common/vm_get_power_state.yml - - name: "Collect cloud-init logs" + - name: "Collect VM's guest info and cloud-init logs" + when: + - vm_power_state_get is defined + - vm_power_state_get == "poweredOn" block: + - name: "Get VMware Tools status" + include_tasks: ../../common/vm_get_vmtools_status.yml + + # VM's guest info will be collected by log plugin - name: "Get VM's guest info" include_tasks: ../../common/vm_get_guest_info.yml when: - unattend_install_conf is defined - unattend_install_conf | lower is not match('.*bclinux-for-euler.*') + - vmtools_is_running - name: "Collect cloud-init logs" include_tasks: ../utils/collect_cloudinit_logs.yml @@ -23,12 +34,7 @@ - guestinfo_guest_id is defined - ((guestinfo_guest_id is match('ubuntu.*') and unattend_install_conf is defined and - (unattend_install_conf is match('Ubuntu/Server/') or unattend_install_conf is match('Ubuntu/Desktop/Subiquity'))) or + (unattend_install_conf is match('Ubuntu/Server/') or + unattend_install_conf is match('Ubuntu/Desktop/Subiquity'))) or (ova_guest_os_type is defined and ova_guest_os_type in ['photon', 'ubuntu', 'amazon'])) - when: - - vm_power_state_get is defined - - vm_power_state_get == "poweredOn" - when: - - vm_exists is defined - - vm_exists diff --git a/linux/deploy_vm/deploy_vm_from_iso.yml b/linux/deploy_vm/deploy_vm_from_iso.yml index 2c176ca0f..2e9dfd9fa 100644 --- a/linux/deploy_vm/deploy_vm_from_iso.yml +++ b/linux/deploy_vm/deploy_vm_from_iso.yml @@ -244,30 +244,12 @@ boot_order_list: ['disk'] when: guest_id is match('ubuntu.*') - - name: "Download serial output file before removing serial port" - include_tasks: ../../common/esxi_download_datastore_file.yml - vars: - src_datastore: "{{ datastore }}" - src_file_path: "{{ vm_dir_name }}/{{ vm_serial_port_output_file | basename }}" - dest_file_path: "{{ current_test_log_folder }}/{{ vm_serial_port_output_file | basename }}" - download_file_fail_ignore: true - when: - - vm_dir_name is defined - - vm_dir_name - - vm_serial_port_output_file is defined - - vm_serial_port_output_file + - name: "Collect serial port log before removing serial port" + include_tasks: collect_serial_port_log.yml - name: "Remove serial port" include_tasks: ../../common/vm_remove_serial_port.yml - - name: "Failed to remove serial port" - ansible.builtin.fail: - msg: "Failed to remove serial port from VM" - when: > - remove_serial_port is undefined or - remove_serial_port.changed is undefined or - not remove_serial_port.changed - - name: "OS auto install is completed. Power on VM now" include_tasks: ../../common/vm_set_power_state.yml vars: @@ -367,8 +349,10 @@ Oracle Linux 9.0 UEK R7 upgrading failed. Before upgrade, the UEK R7 version is '{{ ol9_uekr7_before_upgrade }}', after upgrade the UEK R7 version is '{{ ol9_uekr7_after_upgrade }}'. - rescue: + - name: "Collect serial port log at test failure" + include_tasks: collect_serial_port_log.yml + - name: "Test case failure" include_tasks: ../../common/test_rescue.yml vars: diff --git a/linux/deploy_vm/deploy_vm_from_ova.yml b/linux/deploy_vm/deploy_vm_from_ova.yml index 8ffc3ece1..35bf3a9b7 100644 --- a/linux/deploy_vm/deploy_vm_from_ova.yml +++ b/linux/deploy_vm/deploy_vm_from_ova.yml @@ -18,15 +18,16 @@ # OVA file on NFS server - name: "Get OVA path and file name after mounting NFS storage at local" + when: ova_nfs_server_path is defined and ova_nfs_server_path block: - - include_tasks: ../../common/mount_nfs_storage_local.yml + - name: "Mount NFS storage at local" + include_tasks: ../../common/mount_nfs_storage_local.yml vars: nfs_server_path: "{{ ova_nfs_server_path }}" - name: "Get OVA path and file name" ansible.builtin.set_fact: vm_ova_path: "{{ nfs_mount_dir }}/{{ ova_path }}" vm_ova_name: "{{ ova_path | basename }}" - when: ova_nfs_server_path is defined and ova_nfs_server_path # Check OVA file exists - name: "Check for {{ vm_ova_path }} existence" ansible.builtin.stat: @@ -60,7 +61,7 @@ include_tasks: "../../common/collect_ovf_vm_config.yml" vars: ovf_vm_hardware_config_path: "{{ current_test_log_folder }}" - + - name: "Get VM info" include_tasks: ../../common/vm_get_vm_info.yml @@ -87,16 +88,17 @@ - (hardware_version == "latest" or (vm_hardware_version_num | int < hardware_version | int)) - - name: "Get VM info" - include_tasks: ../../common/vm_get_vm_info.yml + # Add serial port for collecting messages + - name: "Add a serial port for VM" + include_tasks: ../../common/vm_add_serial_port.yml - name: "Reconfigure VM with cloud-init" include_tasks: reconfigure_vm_with_cloudinit.yml when: ova_guest_os_type in ['photon', 'ubuntu', 'amazon'] - - name: "Reconfigure Flatcar VM with Ignition" - include_tasks: flatcar/reconfigure_flatcar_vm.yml - when: ova_guest_os_type == 'flatcar' + - name: "Reconfigure VM with Ignition" + include_tasks: reconfigure_vm_with_ignition.yml + when: ova_guest_os_type in ['flatcar', 'rhcos'] - name: "Warning about unknown guest OS type" ansible.builtin.debug: @@ -104,13 +106,57 @@ WARNING: The guest OS type of the OVA doesn't match known guest OS type for reconfiguration. Please add it if needed or the following tests might fail. when: ova_guest_os_type == 'unknown' + + - name: "Collect VM deployment logs" + include_tasks: collect_vm_logs.yml + + - name: "Shutdown guest OS" + include_tasks: ../utils/shutdown.yml + + - name: "Collect serial port log before removing serial port" + include_tasks: collect_serial_port_log.yml + + - name: "Remove serial port from VM" + include_tasks: ../../common/vm_remove_serial_port.yml + + # The workaround "Remove CDROM" for issue: https://bugs.launchpad.net/cloud-init/+bug/1992509 + - name: "Remove existing CDROMs" + include_tasks: ../../common/vm_configure_cdrom.yml + vars: + cdrom_type: client + cdrom_controller_type: "{{ vm_cdrom.controller_label.split()[0] | lower }}" + cdrom_controller_num: "{{ vm_cdrom.bus_num }}" + cdrom_unit_num: "{{ vm_cdrom.unit_num }}" + cdrom_state: absent + with_items: "{{ vm_existing_cdrom_list }}" + loop_control: + loop_var: vm_cdrom + when: + - guest_os_ansible_distribution == "Ubuntu" + - vm_existing_cdrom_list is defined + - vm_existing_cdrom_list | length > 0 + + - name: "Power on VM" + include_tasks: ../../common/vm_set_power_state.yml + vars: + vm_power_state_set: 'powered-on' rescue: + # If test case failed before collecting VM cloud-init logs, + # the logs need to be collected at rescue + - name: "Collect VM deployment logs" + include_tasks: collect_vm_logs.yml + when: cloudinit_logs_local_path is undefined + + - name: "Collect serial port log at test failure" + include_tasks: collect_serial_port_log.yml + - name: "Test case failure" include_tasks: ../../common/test_rescue.yml vars: exit_testing_when_fail: true always: - name: "Unmount NFS share folder and remove mount folder" + when: nfs_mount_dir is defined and nfs_mount_dir block: - name: "Umount NFS share points" include_tasks: ../../common/local_unmount.yml @@ -124,7 +170,3 @@ vars: local_path: "{{ nfs_mount_dir }}" del_local_file_ignore_errors: true - when: nfs_mount_dir is defined and nfs_mount_dir - - - name: "Collect VM deployment logs" - include_tasks: collect_vm_logs.yml diff --git a/linux/deploy_vm/flatcar/flatcar_post_config.yml b/linux/deploy_vm/flatcar/flatcar_post_config.yml index 6c957f8a3..fe6303b31 100644 --- a/linux/deploy_vm/flatcar/flatcar_post_config.yml +++ b/linux/deploy_vm/flatcar/flatcar_post_config.yml @@ -15,32 +15,16 @@ vars: guest_file_path: "/etc/ssh/sshd_config" -# If /etc/ssh/sshd_config is a symbolic link -# it means the file has not been changed -- name: "Permit root login through ssh" - block: - - name: "Convert /etc/ssh/sshd_config from symbolic link to regular file" - ansible.builtin.shell: sed -i '' /etc/ssh/sshd_config - become: true - delegate_to: "{{ vm_guest_ip }}" - - - include_tasks: ../../utils/replace_or_add_line_in_file.yml - vars: - file: "/etc/ssh/sshd_config" - reg_exp: "^#?PermitRootLogin" - line_content: "PermitRootLogin yes" +# The original /etc/ssh/sshd_config on Flatcar is a symbolic link +- name: "Convert /etc/ssh/sshd_config from symbolic link to regular file" + ansible.builtin.shell: sed -i '' /etc/ssh/sshd_config + become: true + delegate_to: "{{ vm_guest_ip }}" when: guest_file_stat.islnk | default(false) -# Change user password -- name: "Get user names for password changing" - ansible.builtin.set_fact: - usernames: "{{ [vm_username] | union(['core']) }}" - -- name: "Change password for user {{ vm_username }}" - ansible.builtin.user: - name: "{{ username }}" - password: "{{ vm_password | password_hash('sha512') }}" - delegate_to: "{{ vm_guest_ip }}" - with_items: "{{ usernames }}" - loop_control: - loop_var: username +- name: "Permit root login through ssh" + include_tasks: ../../utils/replace_or_add_line_in_file.yml + vars: + file: "/etc/ssh/sshd_config" + reg_exp: "^#?PermitRootLogin" + line_content: "PermitRootLogin yes" diff --git a/linux/deploy_vm/flatcar/generate_ignition_config.yml b/linux/deploy_vm/flatcar/generate_ignition_config.yml deleted file mode 100644 index eae83f011..000000000 --- a/linux/deploy_vm/flatcar/generate_ignition_config.yml +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2021-2023 VMware, Inc. -# SPDX-License-Identifier: BSD-2-Clause ---- -# Generate ignition_config.json file and its base64 encoding -# Parameters: None -# Return: ignition_config_data -- name: "Create a temporary file as Ignition config file" - include_tasks: ../../../common/create_temp_file_dir.yml - vars: - tmp_state: "file" - tmp_prefix: "ignition_" - tmp_suffix: ".json" - -- name: "Set variables for Ignition config" - ansible.builtin.set_fact: - ignition_config_file: "{{ tmp_path }}" - -# Put SSH public key into ignition config file -- name: "Generation ignition config file {{ ignition_config_file }}" - ansible.builtin.template: - src: ignition_config.j2 - dest: "{{ ignition_config_file }}" - mode: "0644" - register: generate_ignition - -- name: "Assert {{ ignition_config_file }} is generated" - ansible.builtin.assert: - that: - - generate_ignition.changed - fail_msg: "Failed to generate {{ ignition_config_file }}" - success_msg: "Successfully generated {{ ignition_config_file }}" - -- block: - - name: "Generate base64 encoded string for {{ ignition_config_file }}" - ansible.builtin.set_fact: - ignition_config_data: "{{ lookup('file', ignition_config_file) | b64encode }}" - - - ansible.builtin.debug: var=ignition_config_data - - - name: "Remove {{ ignition_config_file }}" - include_tasks: ../../../common/delete_local_file.yml - vars: - local_path: "{{ ignition_config_file }}" - when: generate_ignition.changed diff --git a/linux/deploy_vm/flatcar/ignition_config.j2 b/linux/deploy_vm/flatcar/ignition_config.j2 deleted file mode 100644 index d6a41d9f9..000000000 --- a/linux/deploy_vm/flatcar/ignition_config.j2 +++ /dev/null @@ -1,39 +0,0 @@ -{ - "ignition": { - "config": {}, - "timeouts": {}, - "version": "2.1.0" - }, - "passwd": { - "users": [ - { - "name": "{{ vm_username }}", - "sshAuthorizedKeys": [ - "{{ ssh_public_key }}" - ] - {% if new_user is defined and new_user != 'root' and new_user != 'core' %} - }, - { - "name": "{{ new_user }}", - "passwordHash": "{{ vm_password_hash }}", - "sshAuthorizedKeys": [ - "{{ ssh_public_key }}" - ] - {% endif %} - } - ] - }, - "systemd": { - "units":[{ - "name": "update-engine.service", - "enabled": false, - "mask": true - }, - { - "name": "locksmithd.service", - "enabled": false, - "mask": true - }] - }, - "storage": {} -} diff --git a/linux/deploy_vm/flatcar/reconfigure_flatcar_vm.yml b/linux/deploy_vm/flatcar/reconfigure_flatcar_vm.yml deleted file mode 100644 index be16c3267..000000000 --- a/linux/deploy_vm/flatcar/reconfigure_flatcar_vm.yml +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2021-2023 VMware, Inc. -# SPDX-License-Identifier: BSD-2-Clause ---- -# Use Ignition to configure ssh authorized key and user password -- name: "Generate Ignition config file" - include_tasks: generate_ignition_config.yml - -- name: "Defining the Ignition configs in Guestinfo" - include_tasks: ../../../common/vm_set_extra_config.yml - vars: - vm_advanced_settings: - - key: "guestinfo.coreos.config.data" - value: "{{ ignition_config_data }}" - - key: "guestinfo.coreos.config.data.encoding" - value: "base64" - -- name: "Power on VM" - include_tasks: ../../common/vm_set_power_state.yml - vars: - vm_power_state_set: 'powered-on' - -- name: "Get VM's extra config" - include_tasks: ../../common/vm_get_extra_config.yml - -- name: "Check Ignition configs is present in VM's extra config after VM power-on" - ansible.builtin.assert: - that: - - vm_extra_config['guestinfo.coreos.config.data'] is defined - - vm_extra_config['guestinfo.coreos.config.data'] == ignition_config_data - - vm_extra_config['guestinfo.coreos.config.data.encoding'] is defined - - vm_extra_config['guestinfo.coreos.config.data.encoding'] == 'base64' - fail_msg: >- - The Ignition configs are not correct in VM's extra config after VM power-on. In VM's extra config, - guestinfo.coreos.config.data = {{ vm_extra_config['guestinfo.coreos.config.data'] | default('') }}, - guestinfo.coreos.config.data.encoding = {{ vm_extra_config['guestinfo.coreos.config.data.encoding'] | - default('') }}. - -# Skip checking guest full name here because we will check it in TD1 case -- include_tasks: ../../../common/vm_wait_guest_fullname.yml - -# Set vm_python to auto -- name: "Reset vm_python to auto" - ansible.builtin.set_fact: - vm_python: "auto" - when: vm_python is defined and vm_python != "auto" - -# Get guest ip -- include_tasks: ../../../common/update_inventory.yml - vars: - update_inventory_timeout: 300 - -# Post configuration for ssh, cloud-init and vmtools -- include_tasks: flatcar_post_config.yml diff --git a/linux/deploy_vm/generate_ignition_config.yml b/linux/deploy_vm/generate_ignition_config.yml new file mode 100644 index 000000000..045cb1753 --- /dev/null +++ b/linux/deploy_vm/generate_ignition_config.yml @@ -0,0 +1,33 @@ +# Copyright 2021-2023 VMware, Inc. +# SPDX-License-Identifier: BSD-2-Clause +--- +# Generate ignition_config.json file and its base64 encoding +# Return: +# ignition_config_data: The Ignition config data encoded in base64 +# +- name: "Set fact of Ignition config file path" + ansible.builtin.set_fact: + ignition_config_file: "{{ current_test_log_folder }}/ignition.json" + ignition_sshd_config: "{{ 'PermitRootLogin yes\nPasswordAuthentication yes\n' | b64encode }}" + +# Put SSH public key into ignition config file +- name: "Generation Ignition config file {{ ignition_config_file }}" + ansible.builtin.template: + src: "templates/ignition_config.j2" + dest: "{{ ignition_config_file }}" + mode: "0644" + register: generate_ignition + +- name: "Assert Ignition config file is generated" + ansible.builtin.assert: + that: + - generate_ignition.changed + fail_msg: "Failed to generate {{ ignition_config_file }}" + success_msg: "Successfully generated {{ ignition_config_file }}" + +- name: "Generate base64 encoded data for {{ ignition_config_file }}" + ansible.builtin.set_fact: + ignition_config_data: "{{ lookup('file', ignition_config_file) | b64encode }}" + +- name: "Display base64 encoded data of Ignition config file {{ ignition_config_file }}" + ansible.builtin.debug: var=ignition_config_data diff --git a/linux/deploy_vm/get_ova_guest_os_type.yml b/linux/deploy_vm/get_ova_guest_os_type.yml index da1c1e778..9ab6bac77 100644 --- a/linux/deploy_vm/get_ova_guest_os_type.yml +++ b/linux/deploy_vm/get_ova_guest_os_type.yml @@ -8,6 +8,7 @@ {%- if 'ubuntu' in vm_guest_id -%}ubuntu {%- elif 'amazonlinux' in vm_guest_id -%}amazon {%- elif 'vmwarePhoton' in vm_guest_id -%}photon + {%- elif 'coreos' in vm_guest_id or 'rhel' in vm_guest_id -%}rhcos {%- else -%}unknown{%- endif -%} # Try to get the OS type from guest id @@ -17,6 +18,7 @@ {%- if 'ubuntu' in guestinfo_guest_id -%}ubuntu {%- elif 'amazonlinux' in guestinfo_guest_id -%}amazon {%- elif 'vmwarePhoton' in guestinfo_guest_id -%}photon + {%- elif 'coreos' in guestinfo_guest_id or 'rhel' in guestinfo_guest_id -%}rhcos {%- else -%}unknown{%- endif -%} when: - ova_guest_os_type == 'unknown' @@ -31,6 +33,8 @@ {%- elif 'amazon linux' in guestinfo_guest_full_name | lower -%}amazon {%- elif 'vmware photon' in guestinfo_guest_full_name | lower -%}photon {%- elif 'flatcar' in guestinfo_guest_full_name | lower -%}flatcar + {%- elif 'coreos' in guestinfo_guest_full_name | lower -%}rhcos + {%- elif 'Red Hat Enterprise Linux' in guestinfo_guest_full_name -%}rhcos {%- else -%}unknown{%- endif -%} when: - ova_guest_os_type == 'unknown' @@ -44,6 +48,7 @@ {%- elif 'Amazon Linux' in guestinfo_detailed_data -%}amazon {%- elif 'VMware Photon OS' in guestinfo_detailed_data -%}photon {%- elif 'Flatcar' in guestinfo_detailed_data -%}flatcar + {%- elif 'CoreOS' in guestinfo_detailed_data -%}rhcos {%- else -%}unknown{%- endif -%} when: - ova_guest_os_type == 'unknown' diff --git a/linux/deploy_vm/reconfigure_vm_with_cloudinit.yml b/linux/deploy_vm/reconfigure_vm_with_cloudinit.yml index 21ba76c6d..354e08211 100644 --- a/linux/deploy_vm/reconfigure_vm_with_cloudinit.yml +++ b/linux/deploy_vm/reconfigure_vm_with_cloudinit.yml @@ -3,23 +3,35 @@ --- - name: "Set fact of cloud-init final message" ansible.builtin.set_fact: - cloudinit_final_msg: "The system is finally up, after $UPTIME seconds" - -# Add serial port for Ubuntu OVA when hardware_version > 10. -# Otherwise, it would hang at boot time -- name: "Add serial port for Ubuntu VM deployed from OVA" - include_tasks: ../../common/vm_add_serial_port.yml - when: - - ova_guest_os_type == 'ubuntu' - - vm_hardware_version_num is defined - - vm_hardware_version_num | int > 10 + cloudinit_final_msg: "OVA deployment with cloud-init config is completed" + cloudinit_runcmd: + - echo 'Get OS release info' >/dev/ttyS0 + - cat /etc/os-release >/dev/ttyS0 + - echo 'Update sshd settings in cloud-init config' >/dev/ttyS0 + - [sed, -i, 's/^disable_root:.*/disable_root: false/', /etc/cloud/cloud.cfg] + - [sed, -i, 's/^ssh_pwauth:.*/ssh_pwauth: true/', /etc/cloud/cloud.cfg] + - echo 'Update sshd config to permit root login and password authentication' >/dev/ttyS0 + - [sed, -i, 's/^#*PermitRootLogin .*/PermitRootLogin yes/', /etc/ssh/sshd_config] + - [sed, -i, 's/^#*PasswordAuthentication .*/PasswordAuthentication yes/', /etc/ssh/sshd_config] + +- name: "Update cloud-init runcmd for {{ ova_guest_os_type | capitalize }}" + ansible.builtin.set_fact: + cloudinit_runcmd: >- + {{ + cloudinit_runcmd | + union(["echo 'Restart sshd service' >/dev/ttyS0", + "systemctl restart sshd", + "echo 'Get sshd service status' >/dev/ttyS0", + "systemctl status sshd >/dev/ttyS0" + ]) + }} + when: ova_guest_os_type in ['amazon', 'ubuntu'] - name: "Create cloud-init seed ISO to configure guest OS" include_tasks: ../utils/create_seed_iso.yml vars: user_data_template: "{{ ova_guest_os_type }}-ova-user-data.j2" local_hostname: "{{ ova_guest_os_type }}-ova-{{ hostname_timestamp }}" - mode: '0644' - name: "Upload cloud-init seed ISO to ESXi server datastore" include_tasks: ../../common/esxi_upload_datastore_file.yml @@ -137,44 +149,3 @@ when: - cdrom_device_list is defined - cdrom_device_list | length > 0 - -- name: "Remove serial port from Ubuntu VM" - block: - - name: "Shutdown guest OS" - include_tasks: ../utils/shutdown.yml - - - name: "Remove serial port from VM" - include_tasks: ../../common/vm_remove_serial_port.yml - - - name: "Failed to remove serial port from VM" - ansible.builtin.fail: - msg: "Failed to remove serial port from VM" - when: > - remove_serial_port is undefined or - remove_serial_port.changed is undefined or - not remove_serial_port.changed - - # The workaround "Remove CDROM" for issue: https://bugs.launchpad.net/cloud-init/+bug/1992509 - - name: "Remove all CDROM" - include_tasks: ../../common/vm_configure_cdrom.yml - vars: - cdrom_type: client - cdrom_controller_type: "{{ vm_cdrom.controller_label.split()[0] | lower }}" - cdrom_controller_num: "{{ vm_cdrom.bus_num }}" - cdrom_unit_num: "{{ vm_cdrom.unit_num }}" - cdrom_state: absent - with_items: "{{ vm_existing_cdrom_list }}" - loop_control: - loop_var: vm_cdrom - when: vm_existing_cdrom_list | length > 0 - - - name: "Power on VM" - include_tasks: ../../common/vm_set_power_state.yml - vars: - vm_power_state_set: 'powered-on' - when: - - ova_guest_os_type == 'ubuntu' - - vm_hardware_version_num is defined - - vm_hardware_version_num | int > 10 - - vm_serial_port_file_path is defined - - vm_serial_port_file_path diff --git a/linux/deploy_vm/reconfigure_vm_with_ignition.yml b/linux/deploy_vm/reconfigure_vm_with_ignition.yml new file mode 100644 index 000000000..07c393e97 --- /dev/null +++ b/linux/deploy_vm/reconfigure_vm_with_ignition.yml @@ -0,0 +1,46 @@ +# Copyright 2023 VMware, Inc. +# SPDX-License-Identifier: BSD-2-Clause +--- +# Use Ignition to configure guest OS username, password, SSH authorized keys, etc +# +- name: "Generate Ignition config file" + include_tasks: generate_ignition_config.yml + +- name: "Define Ignition configs in VM guestinfo" + include_tasks: ../../common/vm_set_extra_config.yml + vars: + vm_advanced_settings: + - key: "guestinfo.ignition.config.data" + value: "{{ ignition_config_data }}" + - key: "guestinfo.ignition.config.data.encoding" + value: "base64" + +- name: "Power on VM" + include_tasks: ../../common/vm_set_power_state.yml + vars: + vm_power_state_set: 'powered-on' + +- name: "Get VM's extra config" + include_tasks: ../../common/vm_get_extra_config.yml + +- name: "Wait for guest full name is collected by VMware Tools" + include_tasks: ../../common/vm_wait_guest_fullname.yml + +# Set vm_python to auto +- name: "Reset vm_python to auto" + ansible.builtin.set_fact: + vm_python: "auto" + when: vm_python is defined and vm_python != "auto" + +# Get guest ip +- name: "Get VM's guest IP address and add it to in-memory inventory" + include_tasks: ../../common/update_inventory.yml + vars: + update_inventory_timeout: 300 + +- name: "Post config on Flatcar" + include_tasks: flatcar/flatcar_post_config.yml + when: ova_guest_os_type == "flatcar" + +- name: "Retrieve Linux guest system info" + include_tasks: ../utils/get_linux_system_info.yml diff --git a/linux/deploy_vm/templates/amazon-ova-user-data.j2 b/linux/deploy_vm/templates/amazon-ova-user-data.j2 index 642dd11a2..339f928f7 100644 --- a/linux/deploy_vm/templates/amazon-ova-user-data.j2 +++ b/linux/deploy_vm/templates/amazon-ova-user-data.j2 @@ -19,10 +19,11 @@ users: {% endif %} runcmd: - - [ sed, -i, "s/^#PermitRootLogin .*/PermitRootLogin yes/", "/etc/ssh/sshd_config"] - - [ sed, -i, "s/^disable_root:.*/disable_root: false/", "/etc/cloud/cloud.cfg"] - - [ sed, -i, "s/^ssh_pwauth:.*/ssh_pwauth: true/", "/etc/cloud/cloud.cfg"] - - [ sed, -i, "s/^repo_upgrade:.*/repo_upgrade: none/", "/etc/cloud/cloud.cfg"] +{% for cmd in cloudinit_runcmd %} + - {{ cmd }} +{% endfor %} + - echo 'Disable repo upgrade in cloud-init config' >/dev/ttyS0 + - [sed, -i, 's/^repo_upgrade:.*/repo_upgrade: none/', /etc/cloud/cloud.cfg] write_files: - content: | diff --git a/linux/deploy_vm/templates/ignition_config.j2 b/linux/deploy_vm/templates/ignition_config.j2 new file mode 100644 index 000000000..26c6a38d8 --- /dev/null +++ b/linux/deploy_vm/templates/ignition_config.j2 @@ -0,0 +1,65 @@ +{ + "ignition": { +{% if ova_guest_os_type is defined and ova_guest_os_type == 'flatcar' %} + "version": "2.1.0" +{% else %} + "version": "3.3.0" +{% endif %} + }, + "passwd": { + "users": [ + { + "name": "core", + "passwordHash": "{{ vm_password_hash }}", + "sshAuthorizedKeys": [ + "{{ ssh_public_key }}" + ] + }, + { + "name": "{{ vm_username }}", + "passwordHash": "{{ vm_password_hash }}", + "sshAuthorizedKeys": [ + "{{ ssh_public_key }}" + ] + } + {% if new_user is defined and new_user != 'root' and new_user != 'core' %} + ,{ + "name": "{{ new_user }}", + "passwordHash": "{{ vm_password_hash }}", + "sshAuthorizedKeys": [ + "{{ ssh_public_key }}" + ] + } + {% endif %} + ] + }, +{% if ova_guest_os_type is defined and ova_guest_os_type == 'flatcar' %} + "systemd": { + "units": [ + { + "name": "update-engine.service", + "enabled": false, + "mask": true + }, + { + "name": "locksmithd.service", + "enabled": false, + "mask": true + } + ] + }, +{% endif %} + "storage": { +{% if ova_guest_os_type is defined and ova_guest_os_type == 'rhcos' %} + "files": [ + { + "contents": { + "source": "data:text/plain;charset=utf-8;base64,{{ ignition_sshd_config }}" + }, + "mode": 644, + "path": "/etc/ssh/sshd_config.d/00_permit_root.conf" + } + ] +{% endif %} + } +} diff --git a/linux/deploy_vm/templates/photon-ova-user-data.j2 b/linux/deploy_vm/templates/photon-ova-user-data.j2 index 5ae08c055..62d6ec61c 100644 --- a/linux/deploy_vm/templates/photon-ova-user-data.j2 +++ b/linux/deploy_vm/templates/photon-ova-user-data.j2 @@ -29,13 +29,16 @@ users: # Workaround for root password setting when shadow version is 4.6-5 and earlier bootcmd: - - shadow_version=$(rpm -q shadow | grep -E -o "[0-9]+(\.[0-9]+)+-[0-9]+"); [ $(printf "$shadow_version\n4.6-6\n" | sort | head -n 1) != "4.6-6" ] && /bin/sed -E -i 's/^root:([^:]+):.*$/root:\1:17532:0:99999:0:::/' /etc/shadow + - echo "Running cloud-init bootcmd" >/dev/ttyS0 + - shadow_version=$(rpm -q shadow | grep -E -o "[0-9]+(\.[0-9]+)+-[0-9]+"); echo "Shadow version is $shadow_version" >/dev/ttyS0; [ $(printf "$shadow_version\n4.6-6\n" | sort | head -n 1) != "4.6-6" ] && /bin/sed -E -i 's/^root:([^:]+):.*$/root:\1:17532:0:99999:0:::/' /etc/shadow runcmd: +{% for cmd in cloudinit_runcmd %} + - {{ cmd }} +{% endfor %} + - echo "Stop and disable iptables" >/dev/ttyS0 - [systemctl, stop, iptables] - [systemctl, disable, iptables] - - [systemctl, disable, chronyd] - - [systemctl, disable, chrony-wait] - - os_version=$(grep VERSION= /etc/os-release | grep -E -o "[0-9]+\.[0-9]+"); [ $(printf "$os_version\n4.0\n" | sort | head -n 1 | cut -d '.' -f 1) -ge 4 ] && systemctl start sshd.socket + - os_major_ver=$(grep VERSION= /etc/os-release | grep -Eo "[0-9]+\.[0-9]+" | cut -d '.' -f 1); [ $os_major_ver -ge 4 ] && (echo "Starting sshd.socket ..." >/dev/ttyS0; systemctl start sshd.socket; systemctl status sshd.socket >/dev/ttyS0) final_message: "{{ cloudinit_final_msg }}" diff --git a/linux/deploy_vm/templates/ubuntu-ova-user-data.j2 b/linux/deploy_vm/templates/ubuntu-ova-user-data.j2 index 9567372b5..f8d9bbf8e 100644 --- a/linux/deploy_vm/templates/ubuntu-ova-user-data.j2 +++ b/linux/deploy_vm/templates/ubuntu-ova-user-data.j2 @@ -20,9 +20,12 @@ packages: - net-tools - sg3-utils - ndctl + runcmd: +{% for cmd in cloudinit_runcmd %} + - {{ cmd }} +{% endfor %} + - echo 'Force apt-get to use IPv4 address' >/dev/ttyS0 - echo 'Acquire::ForceIPv4 "true";' >>/etc/apt/apt.conf.d/99force-ipv4 - - sed -i 's/^#*PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config - - systemctl restart sshd final_message: "{{ cloudinit_final_msg }}" diff --git a/linux/deploy_vm/upgrade_ova_vm_hwv.yml b/linux/deploy_vm/upgrade_ova_vm_hwv.yml index d91e61fe9..f8f138926 100644 --- a/linux/deploy_vm/upgrade_ova_vm_hwv.yml +++ b/linux/deploy_vm/upgrade_ova_vm_hwv.yml @@ -5,14 +5,11 @@ vars: hw_version: "{{ hardware_version }}" -- name: "Reset VM hardware version to the upgraded version" - ansible.builtin.set_fact: - vm_hardware_version_num: "{{ vm_upgrade_hw_result.instance.hw_version.split('-')[1] }}" - - name: "Set fact of the base snapshot name" ansible.builtin.set_fact: base_snapshot_for_reconfig: "AfterUpgradeHwv" -- include_tasks: ../../common/vm_take_snapshot.yml +- name: "Take a new snapshot after upgrading hardware version" + include_tasks: ../../common/vm_take_snapshot.yml vars: snapshot_name: "{{ base_snapshot_for_reconfig }}" diff --git a/linux/guest_customization/check_gosc_support_status.yml b/linux/guest_customization/check_gosc_support_status.yml index 204aec1ca..846256388 100644 --- a/linux/guest_customization/check_gosc_support_status.yml +++ b/linux/guest_customization/check_gosc_support_status.yml @@ -44,14 +44,19 @@ - name: "Set default guest OS list not support GOSC" ansible.builtin.set_fact: - gos_not_support_gosc: ["FreeBSD", "SLED", "Astra Linux (Orel)", "Fedora", "openSUSE Leap"] + gos_not_support_gosc: + - "Flatcar" + - "SLED" + - "FreeBSD" + - "Astra Linux (Orel)" + - "Fedora" + - "openSUSE Leap" + - "RHCOS" - name: "Set fact of GOSC support status to False for {{ guest_os_ansible_distribution }}" ansible.builtin.set_fact: gosc_is_supported: false - when: > - (guest_os_ansible_distribution in gos_not_support_gosc) or - (guest_os_ansible_distribution == 'Flatcar') + when: guest_os_ansible_distribution in gos_not_support_gosc - name: "Set cloud-init GOSC support status for {{ vm_guest_os_distribution }}" block: @@ -126,7 +131,6 @@ when: - not enable_cloudinit_gosc | bool - guest_os_ansible_distribution not in gos_not_support_gosc - - guest_os_ansible_distribution != 'Flatcar' - name: "Display GOSC support status for {{ vm_guest_os_distribution }}" ansible.builtin.debug: diff --git a/linux/network_device_ops/check_pvrdma_support_status.yml b/linux/network_device_ops/check_pvrdma_support_status.yml index 96617fe14..dff8deaf6 100644 --- a/linux/network_device_ops/check_pvrdma_support_status.yml +++ b/linux/network_device_ops/check_pvrdma_support_status.yml @@ -8,7 +8,7 @@ vars: skip_msg: "{{ guest_os_ansible_distribution }} doesn't support PVRDMA" skip_reason: "Not Supported" - when: guest_os_ansible_distribution in ['Fedora', 'VMware Photon OS', 'Flatcar', 'FreeBSD'] + when: guest_os_ansible_distribution in ['Fedora', 'VMware Photon OS', 'Flatcar', 'FreeBSD', 'RHCOS'] - name: "Get guest config options" include_tasks: ../../common/esxi_get_guest_config_options.yml diff --git a/linux/nvdimm_cold_add_remove/nvdimm_cold_add_remove.yml b/linux/nvdimm_cold_add_remove/nvdimm_cold_add_remove.yml index bf82154be..b2ec2daa2 100644 --- a/linux/nvdimm_cold_add_remove/nvdimm_cold_add_remove.yml +++ b/linux/nvdimm_cold_add_remove/nvdimm_cold_add_remove.yml @@ -25,7 +25,8 @@ doesn't support NVDIMM. skip_reason: "Not Supported" when: > - (guest_os_ansible_distribution in ['Flatcar', 'UnionTech', 'Uos', 'Amazon', 'openSUSE Leap', 'FreeBSD'] or + (guest_os_ansible_distribution in ['Flatcar', 'UnionTech', 'Uos', 'Amazon', + 'openSUSE Leap', 'FreeBSD', 'RHCOS'] or (guest_os_ansible_distribution == 'Ubuntu' and guest_os_edition == 'CloudImage')) diff --git a/linux/open_vm_tools/ovt_verify_install.yml b/linux/open_vm_tools/ovt_verify_install.yml index b2f7b6b92..043836c47 100644 --- a/linux/open_vm_tools/ovt_verify_install.yml +++ b/linux/open_vm_tools/ovt_verify_install.yml @@ -21,12 +21,12 @@ update_vmtools: true when: update_vmtools is undefined - # Flatcar doesn't support to install open-vm-tools + # Flatcar and RHCOS doesn't support to install open-vm-tools - include_tasks: ../../common/skip_test_case.yml vars: skip_msg: "Skip test case because {{ guest_os_ansible_distribution }} doesn't support installing open-vm-tools" skip_reason: "Not Supported" - when: guest_os_ansible_distribution == 'Flatcar' + when: guest_os_ansible_distribution in ['Flatcar', 'RHCOS'] # VM has open-vm-tools installed and update_vmtools is set false - include_tasks: ../../common/skip_test_case.yml diff --git a/linux/open_vm_tools/ovt_verify_uninstall.yml b/linux/open_vm_tools/ovt_verify_uninstall.yml index e6ff3f492..ead4cf9c9 100644 --- a/linux/open_vm_tools/ovt_verify_uninstall.yml +++ b/linux/open_vm_tools/ovt_verify_uninstall.yml @@ -21,7 +21,7 @@ vars: skip_msg: "Skip test case because {{ guest_os_ansible_distribution }} doesn't support uninstalling open-vm-tools" skip_reason: "Not Supported" - when: guest_os_ansible_distribution == 'Flatcar' + when: guest_os_ansible_distribution in ['Flatcar', 'RHCOS'] - name: "Block test case when guest OS doesn't install open-vm-tools" include_tasks: ../../common/skip_test_case.yml diff --git a/linux/utils/create_seed_iso.yml b/linux/utils/create_seed_iso.yml index 2fb88d1eb..bb265c85a 100644 --- a/linux/utils/create_seed_iso.yml +++ b/linux/utils/create_seed_iso.yml @@ -30,7 +30,7 @@ ansible.builtin.template: src: "{{ user_data_template }}" dest: "{{ user_data_path }}" - mode: "0644" + mode: "0666" vars: vm_password_hash: "{{ vm_password | password_hash('sha512') }}" @@ -38,7 +38,7 @@ ansible.builtin.file: path: "{{ meta_data_path }}" state: touch - mode: "0644" + mode: "0666" - name: "Set hostname in cloud-init meta-data" ansible.builtin.lineinfile: diff --git a/linux/utils/get_installed_package_info.yml b/linux/utils/get_installed_package_info.yml index 2e21d6774..4bd2adb16 100644 --- a/linux/utils/get_installed_package_info.yml +++ b/linux/utils/get_installed_package_info.yml @@ -16,7 +16,7 @@ - name: "Get package query command for OS packages managed by {{ guest_os_ansible_pkg_mgr | upper }}" ansible.builtin.set_fact: package_query_cmd: "rpm -qi" - when: guest_os_ansible_pkg_mgr in ['yum', 'dnf', 'zypper'] + when: guest_os_ansible_pkg_mgr in ['yum', 'dnf', 'zypper', 'atomic_container'] - name: "Get package query command for OS packages managed by {{ guest_os_ansible_pkg_mgr | upper }}" ansible.builtin.set_fact: diff --git a/linux/utils/get_linux_system_info.yml b/linux/utils/get_linux_system_info.yml index eae4843e4..663dc3401 100644 --- a/linux/utils/get_linux_system_info.yml +++ b/linux/utils/get_linux_system_info.yml @@ -9,6 +9,13 @@ - name: "Get OS release info" include_tasks: get_os_release.yml +- name: "Correct guest OS ansible distribution for RHCOS" + ansible.builtin.set_fact: + guest_os_ansible_distribution: "RHCOS" + when: + - guest_os_release.ID is defined + - guest_os_release.ID == 'rhcos' + - name: "Set fact of guest OS ansible distribution is correct or not" ansible.builtin.set_fact: guest_os_ansible_distrib_is_correct: >- diff --git a/linux/vhba_hot_add_remove/vhba_device_hot_add_remove.yml b/linux/vhba_hot_add_remove/vhba_device_hot_add_remove.yml index 514be4349..526f72c68 100644 --- a/linux/vhba_hot_add_remove/vhba_device_hot_add_remove.yml +++ b/linux/vhba_hot_add_remove/vhba_device_hot_add_remove.yml @@ -36,7 +36,7 @@ package_state: "present" when: - new_disk_ctrl_type == 'lsilogic' - - guest_os_ansible_distribution != 'Flatcar' + - guest_os_ansible_distribution not in ['Flatcar', 'RHCOS', 'FreeBSD'] - name: "Get file lsblk.sh for FreeBSD" when: guest_os_ansible_distribution == "FreeBSD" diff --git a/linux/vhba_hot_add_remove/wait_device_list_changed.yml b/linux/vhba_hot_add_remove/wait_device_list_changed.yml index 6622c62f1..ac59a8ba2 100644 --- a/linux/vhba_hot_add_remove/wait_device_list_changed.yml +++ b/linux/vhba_hot_add_remove/wait_device_list_changed.yml @@ -34,7 +34,7 @@ block: - name: "Rescan scsi devices in {{ guest_os_ansible_distribution }} {{ guest_os_ansible_distribution_ver }}" when: - - guest_os_ansible_distribution not in ['Flatcar', 'Fedora', 'FreeBSD'] + - guest_os_ansible_distribution not in ['Flatcar', 'Fedora', 'FreeBSD', 'RHCOS'] - not (guest_os_ansible_distribution == "Ubuntu" and guest_os_ansible_distribution_major_ver | int >= 22) - not (guest_os_ansible_distribution == "Debian" and @@ -53,12 +53,11 @@ - name: "Rescan scsi devices in {{ guest_os_ansible_distribution }} {{ guest_os_ansible_distribution_ver }}" when: > - (guest_os_ansible_distribution == 'Flatcar' or + (guest_os_ansible_distribution in ['Flatcar', 'Fedora', 'RHCOS'] or (guest_os_ansible_distribution == "Ubuntu" and guest_os_ansible_distribution_major_ver | int >= 22) or (guest_os_ansible_distribution == "Debian" and - guest_os_ansible_distribution_major_ver | int >= 12) or - guest_os_ansible_distribution == "Fedora") + guest_os_ansible_distribution_major_ver | int >= 12)) block: - name: "Rescan all hard disks" ansible.builtin.shell: |