diff --git a/Dockerfile.ignition-manifests-and-kubeconfig-generate b/Dockerfile.ignition-manifests-and-kubeconfig-generate index 9315fd1..ac2863f 100644 --- a/Dockerfile.ignition-manifests-and-kubeconfig-generate +++ b/Dockerfile.ignition-manifests-and-kubeconfig-generate @@ -1,15 +1,21 @@ # [TODO] - remove this line, once we are ready to use openshift-installer from the release -FROM quay.io/yshnaidm/openshift-installer +FROM quay.io/ocpmetal/openshift-installer +# [TODO] - add someway to get oc client in order to use it to extract openshift-baremetal-install executable +# FROM quay.io/yshnaidm/oc-image:latest +FROM quay.io/ocpmetal/bm-inventory:latest AS inventory -FROM fedora:31 -RUN dnf install -y libvirt-libs pip python && \ +FROM centos:8 +RUN dnf install -y libvirt-libs python3 findutils && \ yum clean all && \ rm -rf /var/cache/yum COPY requirements.txt /tmp/requirements.txt -RUN pip install -r /tmp/requirements.txt +COPY --from=inventory /clients/bm-inventory-client-*.tar.gz /build/pip/ +RUN pip3 install -r /tmp/requirements.txt +RUN pip3 install ipython +RUN find /build/pip/ -name 'setup.py' -exec dirname {} \; | xargs pip3 install RUN rm /tmp/*requirements.txt @@ -18,11 +24,16 @@ ARG WORK_DIR=/data RUN mkdir $WORK_DIR RUN chmod 777 $WORK_DIR +RUN mkdir /root/.docker + # [TODO] - change this line to use openshift-installer from the release, once we are ready COPY --from=0 /root/installer/openshift-install $WORK_DIR +#COPY --from=0 /root/oc/oc $WORK_DIR COPY ./render_files.py $WORK_DIR +COPY ./utils.py $WORK_DIR +COPY ./test_utils.py $WORK_DIR ENV WORK_DIR=$WORK_DIR ENV EXEC_PATH=$WORK_DIR/render_files.py -ENTRYPOINT python $EXEC_PATH +ENTRYPOINT python3 $EXEC_PATH diff --git a/Dockerfile.oc-image b/Dockerfile.oc-image new file mode 100644 index 0000000..1250629 --- /dev/null +++ b/Dockerfile.oc-image @@ -0,0 +1,5 @@ +#This dockerfile require an openshift installer binary in CWD +FROM alpine:latest +RUN mkdir /root/oc +COPY ./oc /root/oc + diff --git a/installer_dir/test_hosts_list.yaml b/installer_dir/test_hosts_list.yaml new file mode 100644 index 0000000..cb2e9ae --- /dev/null +++ b/installer_dir/test_hosts_list.yaml @@ -0,0 +1,129 @@ +- checked_in_at: '2020-07-22T12:18:54' + cluster_id: 51724237-025b-4693-993f-49d321ec2ea5 + created_at: '2020-07-22T12:07:53' + discovery_agent_version: quay.io/ocpmetal/agent:latest + hardware_info: '{"block_devices":[{"device_type":"loop","fstype":"squashfs","major_device_number":7,"name":"loop0","size":746217472},{"device_type":"rom","fstype":"iso9660","major_device_number":11,"name":"sr0","removable_device":1,"size":822083584},{"device_type":"disk","major_device_number":252,"name":"vda","size":21474836480}],"cpu":{"architecture":"x86_64","cpu_mhz":2095.076,"cpus":4,"model_name":"Intel(R) + Xeon(R) Gold 6152 CPU @ 2.10GHz","sockets":4,"threads_per_core":1},"memory":[{"free":15661727744,"name":"Mem","shared":1127391232,"total":17378611200,"used":1716883456},{"name":"Swap"}],"nics":[{"cidrs":[{"ip_address":"192.168.126.11","mask":24}],"mac":"52:54:00:42:1e:8d","mtu":1500,"name":"eth0","state":"BROADCAST,MULTICAST,UP,LOWER_UP"},{"cidrs":[{"ip_address":"192.168.140.133","mask":24}],"mac":"52:54:00:ca:7b:16","mtu":1500,"name":"eth1","state":"BROADCAST,MULTICAST,UP,LOWER_UP"},{"cidrs":[{"ip_address":"10.88.0.1","mask":16}],"mac":"8a:97:25:6c:c0:61","mtu":1500,"name":"cni-podman0","state":"NO-CARRIER,BROADCAST,MULTICAST,UP"}]}' + href: /api/assisted-install/v1/clusters/51724237-025b-4693-993f-49d321ec2ea5/hosts/26cc8e92-0f5a-4d05-93e1-0c55667a723b + id: 26cc8e92-0f5a-4d05-93e1-0c55667a723b + inventory: '{"bmc_address":"0.0.0.0","bmc_v6address":"::/0","boot":{"current_boot_mode":"bios"},"cpu":{"architecture":"x86_64","count":4,"flags":["fpu","vme","de","pse","tsc","msr","pae","mce","cx8","apic","sep","mtrr","pge","mca","cmov","pat","pse36","clflush","mmx","fxsr","sse","sse2","ss","syscall","nx","pdpe1gb","rdtscp","lm","constant_tsc","arch_perfmon","rep_good","nopl","xtopology","cpuid","tsc_known_freq","pni","pclmulqdq","vmx","ssse3","fma","cx16","pcid","sse4_1","sse4_2","x2apic","movbe","popcnt","tsc_deadline_timer","aes","xsave","avx","f16c","rdrand","hypervisor","lahf_lm","abm","3dnowprefetch","cpuid_fault","invpcid_single","pti","ssbd","ibrs","ibpb","stibp","tpr_shadow","vnmi","flexpriority","ept","vpid","ept_ad","fsgsbase","tsc_adjust","bmi1","hle","avx2","smep","bmi2","erms","invpcid","rtm","mpx","avx512f","avx512dq","rdseed","adx","smap","clflushopt","clwb","avx512cd","avx512bw","avx512vl","xsaveopt","xsavec","xgetbv1","xsaves","arat","umip","pku","ospke","md_clear","arch_capabilities"],"frequency":2095.076,"model_name":"Intel(R) + Xeon(R) Gold 6152 CPU @ 2.10GHz"},"disks":[{"by_path":"/dev/disk/by-path/pci-0000:00:06.0","drive_type":"HDD","model":"unknown","name":"vda","path":"/dev/vda","serial":"unknown","size_bytes":21474836480,"vendor":"0x1af4","wwn":"unknown"}],"hostname":"test-infra-cluster-master-1.redhat.com","interfaces":[{"flags":["up","broadcast","multicast"],"has_carrier":true,"ipv4_addresses":["192.168.126.11/24"],"ipv6_addresses":["fe80::5054:ff:fe42:1e8d/64"],"mac_address":"52:54:00:42:1e:8d","mtu":1500,"name":"eth0","product":"0x0001","speed_mbps":-1,"vendor":"0x1af4"},{"flags":["up","broadcast","multicast"],"has_carrier":true,"ipv4_addresses":["192.168.140.133/24"],"ipv6_addresses":["fe80::5054:ff:feca:7b16/64"],"mac_address":"52:54:00:ca:7b:16","mtu":1500,"name":"eth1","product":"0x0001","speed_mbps":-1,"vendor":"0x1af4"}],"memory":{"physical_bytes":17809014784,"usable_bytes":17378611200},"system_vendor":{"manufacturer":"Red + Hat","product_name":"KVM"}}' + kind: Host + progress: + current_stage: '' + stage_started_at: '0001-01-01T00:00:00' + stage_updated_at: '0001-01-01T00:00:00' + progress_stages: + - Starting installation + - Installing + - Writing image to disk + - Rebooting + - Configuring + - Joined + - Done + requested_hostname: test-infra-cluster-master-1.redhat.com + role: master + status: error + status_info: Cluster is no longer preparing for installation + status_updated_at: '2020-07-22T12:18:34' + updated_at: '2020-07-22T12:18:54' + validations_info: '{"hardware":[{"id":"has-inventory","status":"success","message":"Valid + inventory exists for the host"},{"id":"has-min-cpu-cores","status":"success","message":"Sufficient + minimum CPU cores"},{"id":"has-min-memory","status":"success","message":"Sufficient + minimum memory"},{"id":"has-min-valid-disks","status":"success","message":"Sufficient + number of disks with required size"},{"id":"has-cpu-cores-for-role","status":"success","message":"Sufficient + CPU cores for role master"},{"id":"has-memory-for-role","status":"success","message":"Sufficient + memory for role master"},{"id":"hostname-unique","status":"success","message":"Hostname + test-infra-cluster-master-1.redhat.com is unique in cluster"},{"id":"hostname-valid","status":"success","message":"Hostname + test-infra-cluster-master-1.redhat.com is allowed"}],"network":[{"id":"connected","status":"success","message":"Host + is connected"},{"id":"machine-cidr-defined","status":"success","message":"Machine + network CIDR is defined"},{"id":"belongs-to-machine-cidr","status":"success","message":"Host + belongs to machine network CIDR 192.168.126.0/24"}],"role":[{"id":"role-defined","status":"success","message":"Role + is defined"}]}' +- checked_in_at: '2020-07-22T12:18:57' + cluster_id: 51724237-025b-4693-993f-49d321ec2ea5 + created_at: '2020-07-22T12:07:57' + discovery_agent_version: quay.io/ocpmetal/agent:latest + hardware_info: '{"block_devices":[{"device_type":"loop","fstype":"squashfs","major_device_number":7,"name":"loop0","size":746217472},{"device_type":"rom","fstype":"iso9660","major_device_number":11,"name":"sr0","removable_device":1,"size":822083584},{"device_type":"disk","major_device_number":252,"name":"vda","size":21474836480}],"cpu":{"architecture":"x86_64","cpu_mhz":2095.076,"cpus":4,"model_name":"Intel(R) + Xeon(R) Gold 6152 CPU @ 2.10GHz","sockets":4,"threads_per_core":1},"memory":[{"free":15659839488,"name":"Mem","shared":1125908480,"total":17378619392,"used":1718779904},{"name":"Swap"}],"nics":[{"cidrs":[{"ip_address":"192.168.126.12","mask":24}],"mac":"52:54:00:c6:90:cc","mtu":1500,"name":"eth0","state":"BROADCAST,MULTICAST,UP,LOWER_UP"},{"cidrs":[{"ip_address":"192.168.140.221","mask":24}],"mac":"52:54:00:ce:61:27","mtu":1500,"name":"eth1","state":"BROADCAST,MULTICAST,UP,LOWER_UP"},{"cidrs":[{"ip_address":"10.88.0.1","mask":16}],"mac":"de:f2:95:50:17:4d","mtu":1500,"name":"cni-podman0","state":"NO-CARRIER,BROADCAST,MULTICAST,UP"}]}' + href: /api/assisted-install/v1/clusters/51724237-025b-4693-993f-49d321ec2ea5/hosts/5ecb0277-1621-4991-9fb5-d5832cf9b463 + id: 5ecb0277-1621-4991-9fb5-d5832cf9b463 + inventory: '{"bmc_address":"0.0.0.0","bmc_v6address":"::/0","boot":{"current_boot_mode":"bios"},"cpu":{"architecture":"x86_64","count":4,"flags":["fpu","vme","de","pse","tsc","msr","pae","mce","cx8","apic","sep","mtrr","pge","mca","cmov","pat","pse36","clflush","mmx","fxsr","sse","sse2","ss","syscall","nx","pdpe1gb","rdtscp","lm","constant_tsc","arch_perfmon","rep_good","nopl","xtopology","cpuid","tsc_known_freq","pni","pclmulqdq","vmx","ssse3","fma","cx16","pcid","sse4_1","sse4_2","x2apic","movbe","popcnt","tsc_deadline_timer","aes","xsave","avx","f16c","rdrand","hypervisor","lahf_lm","abm","3dnowprefetch","cpuid_fault","invpcid_single","pti","ssbd","ibrs","ibpb","stibp","tpr_shadow","vnmi","flexpriority","ept","vpid","ept_ad","fsgsbase","tsc_adjust","bmi1","hle","avx2","smep","bmi2","erms","invpcid","rtm","mpx","avx512f","avx512dq","rdseed","adx","smap","clflushopt","clwb","avx512cd","avx512bw","avx512vl","xsaveopt","xsavec","xgetbv1","xsaves","arat","umip","pku","ospke","md_clear","arch_capabilities"],"frequency":2095.076,"model_name":"Intel(R) + Xeon(R) Gold 6152 CPU @ 2.10GHz"},"disks":[{"by_path":"/dev/disk/by-path/pci-0000:00:06.0","drive_type":"HDD","model":"unknown","name":"vda","path":"/dev/vda","serial":"unknown","size_bytes":21474836480,"vendor":"0x1af4","wwn":"unknown"}],"hostname":"test-infra-cluster-master-2.redhat.com","interfaces":[{"flags":["up","broadcast","multicast"],"has_carrier":true,"ipv4_addresses":["192.168.126.12/24"],"ipv6_addresses":["fe80::5054:ff:fec6:90cc/64"],"mac_address":"52:54:00:c6:90:cc","mtu":1500,"name":"eth0","product":"0x0001","speed_mbps":-1,"vendor":"0x1af4"},{"flags":["up","broadcast","multicast"],"has_carrier":true,"ipv4_addresses":["192.168.140.221/24"],"ipv6_addresses":["fe80::5054:ff:fece:6127/64"],"mac_address":"52:54:00:ce:61:27","mtu":1500,"name":"eth1","product":"0x0001","speed_mbps":-1,"vendor":"0x1af4"}],"memory":{"physical_bytes":17809014784,"usable_bytes":17378619392},"system_vendor":{"manufacturer":"Red + Hat","product_name":"KVM"}}' + kind: Host + progress: + current_stage: '' + stage_started_at: '0001-01-01T00:00:00' + stage_updated_at: '0001-01-01T00:00:00' + progress_stages: + - Starting installation + - Installing + - Writing image to disk + - Rebooting + - Configuring + - Joined + - Done + requested_hostname: test-infra-cluster-master-2.redhat.com + role: master + status: error + status_info: Cluster is no longer preparing for installation + status_updated_at: '2020-07-22T12:18:34' + updated_at: '2020-07-22T12:18:57' + validations_info: '{"hardware":[{"id":"has-inventory","status":"success","message":"Valid + inventory exists for the host"},{"id":"has-min-cpu-cores","status":"success","message":"Sufficient + minimum CPU cores"},{"id":"has-min-memory","status":"success","message":"Sufficient + minimum memory"},{"id":"has-min-valid-disks","status":"success","message":"Sufficient + number of disks with required size"},{"id":"has-cpu-cores-for-role","status":"success","message":"Sufficient + CPU cores for role master"},{"id":"has-memory-for-role","status":"success","message":"Sufficient + memory for role master"},{"id":"hostname-unique","status":"success","message":"Hostname + test-infra-cluster-master-2.redhat.com is unique in cluster"},{"id":"hostname-valid","status":"success","message":"Hostname + test-infra-cluster-master-2.redhat.com is allowed"}],"network":[{"id":"connected","status":"success","message":"Host + is connected"},{"id":"machine-cidr-defined","status":"success","message":"Machine + network CIDR is defined"},{"id":"belongs-to-machine-cidr","status":"success","message":"Host + belongs to machine network CIDR 192.168.126.0/24"}],"role":[{"id":"role-defined","status":"success","message":"Role + is defined"}]}' +- checked_in_at: '2020-07-22T12:18:53' + cluster_id: 51724237-025b-4693-993f-49d321ec2ea5 + created_at: '2020-07-22T12:07:53' + discovery_agent_version: quay.io/ocpmetal/agent:latest + hardware_info: '{"block_devices":[{"device_type":"loop","fstype":"squashfs","major_device_number":7,"name":"loop0","size":746217472},{"device_type":"rom","fstype":"iso9660","major_device_number":11,"name":"sr0","removable_device":1,"size":822083584},{"device_type":"disk","major_device_number":252,"name":"vda","size":21474836480}],"cpu":{"architecture":"x86_64","cpu_mhz":2095.076,"cpus":4,"model_name":"Intel(R) + Xeon(R) Gold 6152 CPU @ 2.10GHz","sockets":4,"threads_per_core":1},"memory":[{"free":15614251008,"name":"Mem","shared":1140981760,"total":17378619392,"used":1764368384},{"name":"Swap"}],"nics":[{"cidrs":[{"ip_address":"192.168.126.10","mask":24}],"mac":"52:54:00:0e:ce:92","mtu":1500,"name":"eth0","state":"BROADCAST,MULTICAST,UP,LOWER_UP"},{"cidrs":[{"ip_address":"192.168.140.105","mask":24}],"mac":"52:54:00:be:00:05","mtu":1500,"name":"eth1","state":"BROADCAST,MULTICAST,UP,LOWER_UP"},{"cidrs":[{"ip_address":"10.88.0.1","mask":16}],"mac":"06:e6:05:66:e5:2f","mtu":1500,"name":"cni-podman0","state":"NO-CARRIER,BROADCAST,MULTICAST,UP"}]}' + href: /api/assisted-install/v1/clusters/51724237-025b-4693-993f-49d321ec2ea5/hosts/7368d414-9b7c-493d-9b9d-ea0b23bb12b6 + id: 7368d414-9b7c-493d-9b9d-ea0b23bb12b6 + inventory: '{"bmc_address":"0.0.0.0","bmc_v6address":"::/0","boot":{"current_boot_mode":"bios"},"cpu":{"architecture":"x86_64","count":4,"flags":["fpu","vme","de","pse","tsc","msr","pae","mce","cx8","apic","sep","mtrr","pge","mca","cmov","pat","pse36","clflush","mmx","fxsr","sse","sse2","ss","syscall","nx","pdpe1gb","rdtscp","lm","constant_tsc","arch_perfmon","rep_good","nopl","xtopology","cpuid","tsc_known_freq","pni","pclmulqdq","vmx","ssse3","fma","cx16","pcid","sse4_1","sse4_2","x2apic","movbe","popcnt","tsc_deadline_timer","aes","xsave","avx","f16c","rdrand","hypervisor","lahf_lm","abm","3dnowprefetch","cpuid_fault","invpcid_single","pti","ssbd","ibrs","ibpb","stibp","tpr_shadow","vnmi","flexpriority","ept","vpid","ept_ad","fsgsbase","tsc_adjust","bmi1","hle","avx2","smep","bmi2","erms","invpcid","rtm","mpx","avx512f","avx512dq","rdseed","adx","smap","clflushopt","clwb","avx512cd","avx512bw","avx512vl","xsaveopt","xsavec","xgetbv1","xsaves","arat","umip","pku","ospke","md_clear","arch_capabilities"],"frequency":2095.076,"model_name":"Intel(R) + Xeon(R) Gold 6152 CPU @ 2.10GHz"},"disks":[{"by_path":"/dev/disk/by-path/pci-0000:00:06.0","drive_type":"HDD","model":"unknown","name":"vda","path":"/dev/vda","serial":"unknown","size_bytes":21474836480,"vendor":"0x1af4","wwn":"unknown"}],"hostname":"test-infra-cluster-master-0.redhat.com","interfaces":[{"flags":["up","broadcast","multicast"],"has_carrier":true,"ipv4_addresses":["192.168.126.10/24"],"ipv6_addresses":["fe80::5054:ff:fe0e:ce92/64"],"mac_address":"52:54:00:0e:ce:92","mtu":1500,"name":"eth0","product":"0x0001","speed_mbps":-1,"vendor":"0x1af4"},{"flags":["up","broadcast","multicast"],"has_carrier":true,"ipv4_addresses":["192.168.140.105/24"],"ipv6_addresses":["fe80::5054:ff:febe:5/64"],"mac_address":"52:54:00:be:00:05","mtu":1500,"name":"eth1","product":"0x0001","speed_mbps":-1,"vendor":"0x1af4"}],"memory":{"physical_bytes":17809014784,"usable_bytes":17378619392},"system_vendor":{"manufacturer":"Red + Hat","product_name":"KVM"}}' + kind: Host + progress: + current_stage: '' + stage_started_at: '0001-01-01T00:00:00' + stage_updated_at: '0001-01-01T00:00:00' + progress_stages: + - Starting installation + - Installing + - Writing image to disk + - Rebooting + - Configuring + - Joined + - Done + requested_hostname: test-infra-cluster-master-0.redhat.com + role: master + status: error + status_info: Cluster is no longer preparing for installation + status_updated_at: '2020-07-22T12:18:34' + updated_at: '2020-07-22T12:18:53' + validations_info: '{"hardware":[{"id":"has-inventory","status":"success","message":"Valid + inventory exists for the host"},{"id":"has-min-cpu-cores","status":"success","message":"Sufficient + minimum CPU cores"},{"id":"has-min-memory","status":"success","message":"Sufficient + minimum memory"},{"id":"has-min-valid-disks","status":"success","message":"Sufficient + number of disks with required size"},{"id":"has-cpu-cores-for-role","status":"success","message":"Sufficient + CPU cores for role master"},{"id":"has-memory-for-role","status":"success","message":"Sufficient + memory for role master"},{"id":"hostname-unique","status":"success","message":"Hostname + test-infra-cluster-master-0.redhat.com is unique in cluster"},{"id":"hostname-valid","status":"success","message":"Hostname + test-infra-cluster-master-0.redhat.com is allowed"}],"network":[{"id":"connected","status":"success","message":"Host + is connected"},{"id":"machine-cidr-defined","status":"success","message":"Machine + network CIDR is defined"},{"id":"belongs-to-machine-cidr","status":"success","message":"Host + belongs to machine network CIDR 192.168.126.0/24"}],"role":[{"id":"role-defined","status":"success","message":"Role + is defined"}]}' diff --git a/render_files.py b/render_files.py index 78cdb03..6e2583d 100644 --- a/render_files.py +++ b/render_files.py @@ -1,19 +1,23 @@ #!/usr/bin/env python import argparse -import json import logging import subprocess import sys import os +import re +import base64 +import json +import yaml import boto3 from botocore.exceptions import NoCredentialsError +import utils +import test_utils +BMH_CR_FILE_PATTERN = 'openshift-cluster-api_hosts' -def get_s3_client(s3_endpoint_url): - aws_access_key_id = os.environ.get("aws_access_key_id", "accessKey1") - aws_secret_access_key = os.environ.get("aws_secret_access_key", "verySecretKey1") +def get_s3_client(s3_endpoint_url, aws_access_key_id, aws_secret_access_key): s3_client = boto3.client( 's3', aws_access_key_id=aws_access_key_id, @@ -33,25 +37,85 @@ def upload_to_aws(s3_client, local_file, bucket, s3_file): return False -def remove_bmo_provisioning(ignition_file): - found = False +def is_bmh_cr_file(path): + if BMH_CR_FILE_PATTERN in path: + return True + return False + + +def get_bmh_dict_from_file(file_data): + source_string = file_data['contents']['source'] + base64_string = re.split("base64,", source_string)[1] + decoded_string = base64.b64decode(base64_string).decode() + return yaml.safe_load(decoded_string) + + +def prepare_annotation_dict(status_dict, hosts_list, is_master): + inventory_host = utils.find_available_inventory_host(hosts_list, is_master) + if inventory_host is None: + return None + + annot_dict = dict.copy(status_dict) + nics = inventory_host.get_inventory_host_nics_data() + cpu = inventory_host.get_inventory_host_cpu_data() + storage = inventory_host.get_inventory_host_storage_data() + ram = inventory_host.get_inventory_host_memory() + hostname = inventory_host.get_inventory_host_name() + system_vendor = inventory_host.get_inventory_host_system_vendor() + hardware = {'nics': nics, 'cpu': cpu, 'storage': storage, 'ramMebibytes': ram, 'hostname': hostname, 'systemVendor': system_vendor} + annot_dict['hardware'] = hardware + hosts_list.remove(inventory_host) + return {'baremetalhost.metal3.io/status': json.dumps(annot_dict)} + + +def set_new_bmh_dict_in_file(file_data, bmh_dict): + decoded_string = yaml.dump(bmh_dict) + base64_string = base64.b64encode(decoded_string.encode()) + source_string = 'data:text/plain;charset=utf-8;' + 'base64,' + base64_string.decode() + file_data['contents']['source'] = source_string + + +def is_master_bmh(bmh_dict): + if "-master-" in bmh_dict['metadata']['name']: + return True + return False + + +def update_credentials_name(bmh_dict): + bmh_dict['spec']['bmc']['credentialsName'] = '' + + +def update_bmh_cr_file(file_data, hosts_list): + bmh_dict = get_bmh_dict_from_file(file_data) + annot_dict = prepare_annotation_dict(bmh_dict['status'], hosts_list, is_master_bmh(bmh_dict)) + if annot_dict is not None: + # [TODO] - make sure that Kiren fix to openshift-installer is working before removing this fix in 4.6 + # update_credentials_name(bmh_dict) + bmh_dict['metadata']['annotations'] = annot_dict + set_new_bmh_dict_in_file(file_data, bmh_dict) + + +def update_bmh_files(ignition_file, cluster_id, inventory_endpoint): + if inventory_endpoint: + hosts_list = utils.get_inventory_hosts(inventory_endpoint, cluster_id) + else: + hosts_list = test_utils.get_test_list_hosts(cluster_id) + with open(ignition_file, "r") as file_obj: data = json.load(file_obj) storage_files = data['storage']['files'] - # Iterate through a copy of the list - for file_data in storage_files[:]: - if 'baremetal-provisioning-config' in file_data['path']: - storage_files.remove(file_data) - found = True - break - if found: - with open(ignition_file, "w") as file_obj: - json.dump(data, file_obj) + # since we don't remove file for now, we don't need to iterate through copy + for file_data in storage_files: + if is_bmh_cr_file(file_data['path']): + update_bmh_cr_file(file_data, hosts_list) + + with open(ignition_file, "w") as file_obj: + json.dump(data, file_obj) -def upload_to_s3(s3_endpoint_url, bucket, install_dir): - s3_client = get_s3_client(s3_endpoint_url) - prefix = os.environ.get("CLUSTER_ID") +def upload_to_s3(s3_endpoint_url, bucket, aws_access_key_id, aws_secret_access_key, install_dir, cluster_id): + s3_client = get_s3_client(s3_endpoint_url, aws_access_key_id, aws_secret_access_key) + prefix = cluster_id for root, _, files in os.walk(install_dir): for file_name in files: @@ -82,10 +146,17 @@ def main(): args = parser.parse_args() work_dir = os.environ.get("WORK_DIR") + install_config = os.environ.get("INSTALLER_CONFIG") + cluster_id = os.environ.get("CLUSTER_ID") + inventory_endpoint = os.environ.get("INVENTORY_ENDPOINT") + s3_endpoint_url = os.environ.get("S3_ENDPOINT_URL", args.s3_endpoint_url) + bucket = os.environ.get('S3_BUCKET', args.s3_bucket) + aws_access_key_id = os.environ.get("aws_access_key_id", "accessKey1") + aws_secret_access_key = os.environ.get("aws_secret_access_key", "verySecretKey1") + if not work_dir: raise Exception("working directory was not defined") - install_config = os.environ.get("INSTALLER_CONFIG") config_dir = os.path.join(work_dir, "installer_dir") if install_config: subprocess.check_output(["mkdir", "-p", config_dir]) @@ -97,21 +168,32 @@ def main(): if not os.path.isfile(os.path.join(config_dir, 'install-config.yaml')): raise Exception("install config file not located in installer dir") + # [TODO] - add extracting openshift-baremetal-install from release image and using it instead of locally compile openshift-intall + # try: + # command = "%s/oc adm release extract --command=openshift-baremetal-install --to=%s \ + # quay.io/openshift-release-dev/ocp-release-nightly@sha256:ba2e09a06c7fca19e162286055c6922135049e6b91f71e2a646738b2d7ab9983" \ + # % (work_dir, work_dir) + # subprocess.check_output(command, shell=True, stderr=sys.stdout) + # except Exception as ex: + # raise Exception('Failed to extract installer, exception: {}'.format(ex)) + + # command = "OPENSHIFT_INSTALL_INVOKER=\"assisted-installer\" %s/openshift-baremetal-install create ignition-configs --dir %s" \ + # % (work_dir, config_dir) command = "OPENSHIFT_INSTALL_INVOKER=\"assisted-installer\" %s/openshift-install create ignition-configs --dir %s" % (work_dir, config_dir) try: subprocess.check_output(command, shell=True, stderr=sys.stdout) except Exception as ex: raise Exception('Failed to generate files, exception: {}'.format(ex)) + # cluster_id = os.environ.get("CLUSTER_ID") try: - remove_bmo_provisioning("%s/bootstrap.ign" % config_dir) + # inventory_endpoint = os.environ.get("INVENTORY_ENDPOINT") + update_bmh_files("%s/bootstrap.ign" % config_dir, cluster_id, inventory_endpoint) except Exception as ex: - raise Exception('Failed to remove BMO prosioning configuration from bootstrap ignition, exception: {}'.format(ex)) + raise Exception('Failed to update BMH CRs in bootstrap ignition, exception: {}'.format(ex)) - s3_endpoint_url = os.environ.get("S3_ENDPOINT_URL", args.s3_endpoint_url) if s3_endpoint_url: - bucket = os.environ.get('S3_BUCKET', args.s3_bucket) - upload_to_s3(s3_endpoint_url, bucket, config_dir) + upload_to_s3(s3_endpoint_url, bucket, aws_access_key_id, aws_secret_access_key, config_dir, cluster_id) else: # for debug purposes debug_print_upload_to_s3(config_dir) diff --git a/reports/render_files1.stats b/reports/render_files1.stats index 55142c0..c851e34 100644 Binary files a/reports/render_files1.stats and b/reports/render_files1.stats differ diff --git a/requirements.txt b/requirements.txt index 22ca957..6f627d7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ boto3 botocore +pyyaml diff --git a/test_utils.py b/test_utils.py new file mode 100644 index 0000000..a92257f --- /dev/null +++ b/test_utils.py @@ -0,0 +1,8 @@ +import utils +import yaml + + +def get_test_list_hosts(cluster_id): + with open('/data/installer_dir/test_hosts_list.yaml', 'r') as file: + hosts_list = yaml.full_load(file) + return [utils.InventoryHost(host) for host in hosts_list if host['status'] != 'disabled'] diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..3721b3f --- /dev/null +++ b/utils.py @@ -0,0 +1,60 @@ +import os +import json +from bm_inventory_client import ApiClient, Configuration, api, models + + + +class InventoryHost: + + def __init__(self, host_dict): + self._host = models.Host(**host_dict) + self._inventory = models.Inventory(**json.loads(self._host.inventory)) + + + def get_inventory_host_nics_data(self): + interfaces_list = [models.Interface(**interface) for interface in self._inventory.interfaces] + return [{'name': interface.name, 'model': interface.product, 'mac': interface.mac_address, 'ip': interface.ipv4_addresses[0].split("/")[0], 'speed': interface.speed_mbps} for interface in interfaces_list] + + + def get_inventory_host_cpu_data(self): + cpu = models.Cpu(**self._inventory.cpu) + return {'model': cpu.model_name, 'arch': cpu.architecture, 'flags': cpu.flags, 'clockMegahertz': cpu.frequency, 'count': cpu.count} + + + def get_inventory_host_storage_data(self): + disks_list = [models.Disk(**disk) for disk in self._inventory.disks] + return [{'name': disk.name, 'vendor': disk.vendor, 'sizeBytes': disk.size_bytes, 'model': disk.model, 'wwn': disk.wwn, 'hctl': disk.hctl, 'serialNumber': disk.serial, 'rotational': True if disk.drive_type == 'HDD' else False} for disk in disks_list] + + + def get_inventory_host_memory(self): + memory = models.Memory(**self._inventory.memory) + return int(memory.physical_bytes / 1024 / 1024) + + + def get_inventory_host_name(self): + return self._host.requested_hostname + + + def get_inventory_host_system_vendor(self): + system_vendor = models.SystemVendor(**self._inventory.system_vendor) + return {'manufacturer': system_vendor.manufacturer, 'productName': system_vendor.product_name, 'serialNumber': system_vendor.serial_number} + + def is_role(self, role): + return self._host.role == role + + +def get_inventory_hosts(inventory_endpoint, cluster_id): + configs = Configuration() + configs.host = inventory_endpoint + apiClient = ApiClient(configuration=configs) + client = api.InstallerApi(api_client=apiClient) + hosts_list = client.list_hosts(cluster_id=cluster_id) + return [InventoryHost(host) for host in hosts_list if host['status'] != 'disabled'] + + +def find_available_inventory_host(hosts_list, is_master): + role = 'master' if is_master else 'worker' + for host in hosts_list: + if host.is_role(role): + return host + return None