Skip to content

Commit

Permalink
[VM]: Support SONiC VM on the server (#3036)
Browse files Browse the repository at this point in the history
The VEOS cannot support MACsec feature, so to make SONiC VM as the testbed servers is useful for testing some scenarios, like MACsec. This PR improves some ansible playbooks to start/stop SONiV VM.

Signed-off-by: Ze Gan <[email protected]>
  • Loading branch information
Pterosaur authored Mar 4, 2021
1 parent 92a8b70 commit 78d2df9
Show file tree
Hide file tree
Showing 10 changed files with 251 additions and 42 deletions.
2 changes: 2 additions & 0 deletions ansible/group_vars/vm_host/main.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
supported_vm_types: [ "veos", "sonic", "ceos" ]
root_path: veos-vm
vm_images_url: https://acsbe.blob.core.windows.net/vmimages
cd_image_filename: Aboot-veos-serial-8.0.0.iso
hdd_image_filename: vEOS-lab-4.20.15M.vmdk
sonic_image_filename: sonic-vs.img
skip_image_downloading: false

vm_console_base: 7000
Expand Down
65 changes: 64 additions & 1 deletion ansible/roles/vm_set/tasks/kickstart_vm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,67 @@
become: yes
when: autostart|bool == true

when: not skip_this_vm
when: not skip_this_vm and (vm_type | lower) == "veos"

- block:
- set_fact:
num_asic: "{{ hostvars[vm_name]['num_asics'] | default(1) }}"

- name: Wait until vm {{ vm_name }} is loaded
sonic_kickstart: telnet_port={{ serial_port }}
login={{ sonic_login }}
passwords={{ sonic_default_passwords }}
hostname={{ hostname }}
mgmt_ip="{{ mgmt_ip_address }}/{{ mgmt_prefixlen }}"
mgmt_gw={{ vm_mgmt_gw | default(mgmt_gw) }}
new_password={{ sonic_password }}
num_asic={{ num_asic }}
register: kickstart_output
until: '"kickstart_code" in kickstart_output and kickstart_output.kickstart_code == 0'
retries: 5
delay: 10
ignore_errors: true

- name: Destroy vm {{ vm_name }} if it hangs
virt: name={{ vm_name }}
command=destroy
uri=qemu:///system
when: kickstart_output.kickstart_code != 0
become: yes
ignore_errors: true

- name: Start vm again {{ vm_name }}
virt: name={{ vm_name }}
state=running
uri=qemu:///system
when: kickstart_output.kickstart_code != 0
become: yes
ignore_errors: true

- name: Wait until vm {{ vm_name }} is loaded
sonic_kickstart: telnet_port={{ serial_port }}
login={{ sonic_login }}
passwords={{ sonic_default_passwords }}
hostname={{ hostname }}
mgmt_ip="{{ mgmt_ip_address }}/{{ mgmt_prefixlen }}"
mgmt_gw={{ vm_mgmt_gw | default(mgmt_gw) }}
new_password={{ sonic_password }}
num_asic={{ num_asic }}
register: kickstart_output_final
until: '"kickstart_code" in kickstart_output_final and kickstart_output_final.kickstart_code == 0'
retries: 5
delay: 10
ignore_errors: true
when: kickstart_output.kickstart_code != 0

- name: Kickstart gives error again vm {{ vm_name }}
set_fact:
kickstart_failed_vms: "{{ kickstart_failed_vms + [vm_name] }}"
when: '"kickstart_code" in kickstart_output_final and kickstart_output_final.kickstart_code != 0'

- name: Set VM to autostart
command: "virsh autostart {{ vm_name }}"
become: yes
when: autostart|bool == true

when: not skip_this_vm and (vm_type | lower) == "sonic"
25 changes: 18 additions & 7 deletions ansible/roles/vm_set/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# This role creates a set of VM with veos or Ubuntu for Kubernetes master
# This role creates a set of VM with veos or SONiC or Ubuntu for Kubernetes master
# Input parameters for the role:
# - action: 'start', 'stop' or 'renumber' for creating, removeing, or renumbering vm set respectively
# - id: sequence number for vm set on the host.
Expand Down Expand Up @@ -166,19 +166,19 @@
when: home_path is not defined
- debug: msg="{{ home_path }}"

- name: Require veos VMs by default
- name: Require veos or SONiC VMs by default
set_fact:
veos_vm_required: true
vm_required: true

- name: veos VMs not needed when setting up Kubernetes master
- name: veos or SONiC VMs not needed when setting up Kubernetes master
set_fact:
veos_vm_required: false
vm_required: false
when:
- k8s is defined

- name: VMs not needed in case of Keysight API Server
set_fact:
veos_vm_required: false
vm_required: false
when:
- ptf_imagename is defined
- ptf_imagename == "docker-keysight-api-server"
Expand All @@ -204,6 +204,17 @@
become: true

- block:

- name: Require VMs as VEOS by default
set_fact:
vm_type: "veos"
when: vm_type is not defined

- name: Check VM type
fail:
msg: "Cannot support this VM type {{ vm_type }}"
when: vm_type not in supported_vm_types

- name: Ensure {{ root_path }} exists
file: path={{ root_path }} state=directory

Expand Down Expand Up @@ -252,7 +263,7 @@
include_tasks: renumber_topo.yml
when: action == 'renumber_topo'

when: veos_vm_required is defined and veos_vm_required == True
when: vm_required is defined and vm_required == True

- name: Add topology
include_tasks: add_topo.yml
Expand Down
71 changes: 49 additions & 22 deletions ansible/roles/vm_set/tasks/start.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
- name: Require VMs as VEOS by default
set_fact:
vm_type: "veos"
when: vm_type is not defined

- name: Load topo variables
include_vars: "vars/topo_{{ topo }}.yml"
when: topo is defined
Expand All @@ -12,29 +17,51 @@
- "{{ root_path }}/images"
- "{{ root_path }}/disks"

- name: Check hdd image
stat: path={{ root_path }}/images/{{ hdd_image_filename }}
register: hdd_stat
- block:

- name: Check hdd image
stat: path={{ root_path }}/images/{{ hdd_image_filename }}
register: hdd_stat

- name: Fail if there are no hdd image and skip image downloading is active
fail: msg="Please put {{ hdd_image_filename }} to {{ root_path }}/images"
when: not hdd_stat.stat.exists and skip_image_downloading

- name: Download hdd image
get_url: url="{{ vm_images_url }}/{{ hdd_image_filename }}?{{ vmimage_saskey }}" dest="{{ root_path }}/images/{{ hdd_image_filename }}"
when: not hdd_stat.stat.exists and not skip_image_downloading

- name: Fail if there are no hdd image and skip image downloading is active
fail: msg="Please put {{ hdd_image_filename }} to {{ root_path }}/images"
when: not hdd_stat.stat.exists and skip_image_downloading
- name: Check cd image
stat: path={{ root_path }}/images/{{ cd_image_filename }}
register: cd_stat

- name: Fail if there are no cd image and skip image downloading is active
fail: msg="Please put {{ cd_image_filename }} to {{ root_path }}/images"
when: not cd_stat.stat.exists and skip_image_downloading

- name: Download cd image
get_url: url="{{ vm_images_url }}/{{ cd_image_filename }}?{{ cdimage_saskey }}" dest="{{ root_path }}/images/{{ cd_image_filename }}"
when: not cd_stat.stat.exists and not skip_image_downloading

- set_fact:
src_image_name: "{{ hdd_image_filename }}"

when: (vm_type | lower) == "veos"

- block:

- name: Download hdd image
get_url: url="{{ vm_images_url }}/{{ hdd_image_filename }}?{{ vmimage_saskey }}" dest="{{ root_path }}/images/{{ hdd_image_filename }}"
when: not hdd_stat.stat.exists and not skip_image_downloading
- name: Check SONiC image
stat: path={{ root_path }}/images/{{ sonic_image_filename }}
register: img_stat

- name: Check cd image
stat: path={{ root_path }}/images/{{ cd_image_filename }}
register: cd_stat
- name: Fail if there are no SONiC image and skip image downloading is active
fail: msg="Please put {{ sonic_image_filename }} to {{ root_path }}/images"
when: not img_stat.stat.exists

- name: Fail if there are no cd image and skip image downloading is active
fail: msg="Please put {{ cd_image_filename }} to {{ root_path }}/images"
when: not cd_stat.stat.exists and skip_image_downloading
- set_fact:
src_image_name: "{{ sonic_image_filename }}"

- name: Download cd image
get_url: url="{{ vm_images_url }}/{{ cd_image_filename }}?{{ cdimage_saskey }}" dest="{{ root_path }}/images/{{ cd_image_filename }}"
when: not cd_stat.stat.exists and not skip_image_downloading
when: (vm_type | lower) == "sonic"

- name: Create VMs network
become: yes
Expand Down Expand Up @@ -66,8 +93,8 @@
hostname: "{{ vm_name }}"
mgmt_ip_address: "{{ hostvars[vm_name]['ansible_host'] }}"
serial_port: "{{ vm_console_base|int + vm_name[4:]|int }}"
src_disk_image: "{{ root_path }}/images/{{ hdd_image_filename }}"
disk_image: "{{ home_path }}/{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk"
src_disk_image: "{{ root_path }}/images/{{ src_image_name }}"
disk_image_dir: "{{ home_path }}/{{ root_path }}/disks"
cdrom_image: "{{ home_path }}/{{ root_path }}/images/{{ cd_image_filename }}"
mgmt_tap: "{{ vm_name }}-m"
backplane_tap: "{{ vm_name }}-back"
Expand All @@ -83,8 +110,8 @@
hostname: "{{ vm_name }}"
mgmt_ip_address: "{{ hostvars[vm_name]['ansible_host'] }}"
serial_port: "{{ vm_console_base|int + vm_name[4:]|int }}"
src_disk_image: "{{ root_path }}/images/{{ hdd_image_filename }}"
disk_image: "{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk"
src_disk_image: "{{ root_path }}/images/{{ src_image_name }}"
disk_image_dir: "{{ root_path }}/disks"
cdrom_image: "{{ root_path }}/images/{{ cd_image_filename }}"
mgmt_tap: "{{ vm_name }}-m"
backplane_tap: "{{ vm_name }}-back"
Expand Down
17 changes: 15 additions & 2 deletions ansible/roles/vm_set/tasks/start_vm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,25 @@
# After respining individual VMs, the affected topology needs to
# be removed and deployed again.

- set_fact:
disk_image_name: "{{ vm_type }}_{{ vm_name }}_hdd.vmdk"
vm_xml_template: "arista.xml.j2"
when: (vm_type | lower) == "veos"

- set_fact:
disk_image_name: "{{ vm_type }}_{{ vm_name }}.img"
vm_xml_template: "sonic_vm.xml.j2"
when: (vm_type | lower) == "sonic"

- set_fact:
disk_image: "{{ disk_image_dir }}/{{ disk_image_name }}"

- set_fact:
respin_vms: []
when: respin_vms is not defined

- name: Device debug output
debug: msg="hostname = {{ hostname }} serial port = {{ serial_port }} ip = {{ mgmt_ip_address }}"
debug: msg="hostname = {{ hostname }} vm_type = {{ vm_type }} serial port = {{ serial_port }} ip = {{ mgmt_ip_address }}"

- name: Check destination file existance
stat: path={{ disk_image }}
Expand All @@ -29,7 +42,7 @@
- name: Define vm {{ vm_name }}
virt: name={{ vm_name }}
command=define
xml="{{ lookup('template', 'templates/arista.xml.j2') }}"
xml="{{ lookup('template', 'templates/{{ vm_xml_template }}') }}"
uri=qemu:///system
when: vm_name not in vm_list_defined.list_vms
become: yes
Expand Down
2 changes: 1 addition & 1 deletion ansible/roles/vm_set/tasks/stop.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
include_tasks: stop_vm.yml
vars:
vm_name: "{{ item }}"
disk_image: "{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk"
disk_image_dir: "{{ root_path }}/disks"
with_items: "{{ VM_hosts }}"

- name: Destroy VMs network
Expand Down
13 changes: 12 additions & 1 deletion ansible/roles/vm_set/tasks/stop_vm.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
- set_fact:
disk_image_name: "{{ vm_type }}_{{ vm_name }}_hdd.vmdk"
when: (vm_type | lower) == "veos"

- set_fact:
disk_image_name: "{{ vm_type }}_{{ vm_name }}.img"
when: (vm_type | lower) == "sonic"

- set_fact:
disk_image: "{{ disk_image_dir }}/{{ disk_image_name }}"

- name: Destroy VM {{ vm_name }}
virt: name={{ vm_name }}
state=destroyed
Expand All @@ -12,6 +23,6 @@
when: vm_name in vm_list_defined.list_vms
become: yes

- name: Remove arista disk image for {{ vm_name }}
- name: Remove {{ vm_type }} disk image for {{ vm_name }}
file: path={{ disk_image }} state=absent

54 changes: 54 additions & 0 deletions ansible/roles/vm_set/templates/sonic_vm.xml.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
<domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
<name>{{ vm_name }}</name>
<memory unit='KiB'>2097152</memory>
<currentMemory unit='KiB'>2097152</currentMemory>
<vcpu placement='static'>2</vcpu>
<resource>
<partition>/machine</partition>
</resource>
<os>
<type arch='x86_64' machine='q35'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='writeback'/>
<source file='{{ disk_image }}'/>
<target bus='virtio' dev='vda'/>
</disk>
<serial type='tcp'>
<source host='127.0.0.1' mode='bind' service='{{ serial_port }}'/>
<target port='0'/>
<protocol type='telnet'/>
</serial>
<interface type='bridge'>
<model type='virtio'/>
<source bridge='{{ mgmt_bridge }}'/>
<target dev='{{ mgmt_tap }}'/>
</interface>
{% for fp_num in range(0, max_fp_num) %}
<interface type='bridge'>
<model type='virtio'/>
<source bridge='br-{{ vm_name }}-{{ fp_num }}'/>
<target dev='{{ vm_name }}-t{{ fp_num }}'/>
<virtualport type='openvswitch'/>
</interface>
{% endfor %}
<controller type='usb' index='0'/>
<memballoon model='virtio'>
<alias name='balloon0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</memballoon>
<watchdog model='i6300esb'/>
</devices>
<seclabel type='dynamic' model='apparmor' relabel='yes'/>
</domain>
6 changes: 3 additions & 3 deletions ansible/testbed-cli.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ function usage
echo "Options:"
echo " -t <tbfile> : testbed CSV file name (default: 'testbed.csv')"
echo " -m <vmfile> : virtual machine file name (default: 'veos')"
echo " -k <vmtype> : vm type (veos|ceos) (default: 'veos')"
echo " -k <vmtype> : vm type (veos|ceos|sonic) (default: 'veos')"
echo " -n <vm_num> : vm num (default: 0)"
echo " -s <msetnumber> : master set identifier on specified <k8s-server-name> (default: 1)"
echo " -d <dir> : sonic vm directory (default: $HOME/sonic-vm)"
Expand Down Expand Up @@ -161,7 +161,7 @@ function start_vms
shift
echo "Starting VMs on server '${server}'"

ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -e VM_num="$vm_num" testbed_start_VMs.yml \
ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -e VM_num="$vm_num" -e vm_type="$vm_type" testbed_start_VMs.yml \
--vault-password-file="${passwd}" -l "${server}" $@
}

Expand All @@ -173,7 +173,7 @@ function stop_vms
shift
echo "Stopping VMs on server '${server}'"

ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_stop_VMs.yml --vault-password-file="${passwd}" -l "${server}" $@
ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -e vm_type="$vm_type" testbed_stop_VMs.yml --vault-password-file="${passwd}" -l "${server}" $@
}

function start_topo_vms
Expand Down
Loading

0 comments on commit 78d2df9

Please sign in to comment.