Skip to content
This repository has been archived by the owner on Jun 25, 2024. It is now read-only.

Commit

Permalink
Merging inventory secrets
Browse files Browse the repository at this point in the history
Kuttle tests were expanded to new functionality.

Signed-off-by: Jiri Podivin <[email protected]>
  • Loading branch information
jpodivin committed Feb 14, 2024
1 parent 67cdb4b commit 5a290df
Show file tree
Hide file tree
Showing 7 changed files with 1,415 additions and 23 deletions.
46 changes: 31 additions & 15 deletions controllers/openstackdataplanedeployment_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package controllers
import (
"context"
"fmt"
"strings"
"time"

corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -208,6 +207,13 @@ func (r *OpenStackDataPlaneDeploymentReconciler) Reconcile(ctx context.Context,

globalInventorySecrets := []string{}

// Gathering individual inventory secrets for later use
for _, nodeSet := range nodeSets.Items {
nodeSetSecretInv := fmt.Sprintf("dataplanenodeset-%s", nodeSet.Name)
// Add inventory secret to list of inventories for global services
globalInventorySecrets = append(globalInventorySecrets, nodeSetSecretInv)
}

// Deploy each nodeSet
// The loop starts and checks NodeSet deployments sequentially. However, after they
// are started, they are running in parallel, since the loop does not wait
Expand Down Expand Up @@ -245,9 +251,6 @@ func (r *OpenStackDataPlaneDeploymentReconciler) Reconcile(ctx context.Context,
InventorySecrets: []string{nodeSetSecretInv},
}

// Add inventory secret to list of inventories for global services
globalInventorySecrets = append(globalInventorySecrets, nodeSetSecretInv)

// When ServicesOverride is set on the OpenStackDataPlaneDeployment,
// deploy those services for each OpenStackDataPlaneNodeSet. Otherwise,
// deploy with the OpenStackDataPlaneNodeSet's Services.
Expand Down Expand Up @@ -280,16 +283,15 @@ func (r *OpenStackDataPlaneDeploymentReconciler) Reconcile(ctx context.Context,
condition.DeploymentReadyMessage)
}

// Gathering mounts that may be inventories
for _, mount := range nodeSet.GetAnsibleEESpec().ExtraMounts {
for _, mountPoint := range mount.Mounts {
if strings.HasPrefix(mountPoint.MountPath, "/runner/inventory/") {
globalAnsibleEESpec.ExtraMounts = append(globalAnsibleEESpec.ExtraMounts, mount)
break
}
}
}

}
if haveError {
return ctrl.Result{}, err
}

if shouldRequeue {
logger.Info("Not all NodeSets done for OpenStackDeployment")

Check failure on line 293 in controllers/openstackdataplanedeployment_controller.go

View workflow job for this annotation

GitHub Actions / github (govet, golint and gotest)

undefined: logger
return ctrl.Result{}, nil
}

// If we have any services we want to deploy everywhere, deploy them now
Expand All @@ -307,7 +309,7 @@ func (r *OpenStackDataPlaneDeploymentReconciler) Reconcile(ctx context.Context,

deployResult, err := globalDeployer.Deploy(instance.Spec.AllNodeSetsServices)
if err != nil {
util.LogErrorForObject(helper, err, fmt.Sprintf("OpenStackDeployment error for all nodesets due to %s", err), instance)
util.LogErrorForObject(helper, err, "OpenStackDeployment error for all nodesets", instance)
haveError = true
instance.Status.Conditions.MarkFalse(
condition.ReadyCondition,
Expand All @@ -319,7 +321,7 @@ func (r *OpenStackDataPlaneDeploymentReconciler) Reconcile(ctx context.Context,
if deployResult != nil {
shouldRequeue = true
} else {
logger.Info("Global OpenStackDeployment succeeded", "NodeSet")
logger.Info("Global OpenStackDeployment succeeded")

Check failure on line 324 in controllers/openstackdataplanedeployment_controller.go

View workflow job for this annotation

GitHub Actions / github (govet, golint and gotest)

undefined: logger
}
}

Expand Down Expand Up @@ -382,6 +384,20 @@ func (r *OpenStackDataPlaneDeploymentReconciler) setHashes(
}
}

// Now do the same for global services
for _, serviceName := range instance.Spec.AllNodeSetsServices {
err = deployment.GetDeploymentHashesForService(
ctx,
helper,
instance.Namespace,
serviceName,
instance.Status.ConfigMapHashes,
instance.Status.SecretHashes)
if err != nil {
return err
}
}

return nil
}

Expand Down
10 changes: 10 additions & 0 deletions docs/assemblies/custom_resources.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,11 @@ OpenStackDataPlaneServiceSpec defines the desired state of OpenStackDataPlaneSer
| AddCertMounts - Whether to add cert mounts
| bool
| true
| deployOnAllNodeSets
| DeployOnAllNodeSets - should the service be deploy across all nodesets This will override default target of a service play, setting it to 'all'.
| *bool
| false
|===
<<custom-resources,Back to Custom Resources>>
Expand Down Expand Up @@ -642,6 +647,11 @@ OpenStackDataPlaneDeploymentSpec defines the desired state of OpenStackDataPlane
| Time before the deployment is requeued in seconds
| int
| true
| allNodeSetsServices
| Services which should be deployed on all nodesets
| []string
| true
|===
<<custom-resources,Back to Custom Resources>>
Expand Down
28 changes: 20 additions & 8 deletions pkg/util/ansible_execution.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,11 @@ func AnsibleExecution(
) error {
var err error
var cmdLineArguments strings.Builder
var inventoryVolume corev1.Volume
var inventoryName string
var inventoryMountPath string
log := helper.GetLogger()
ansibleEEMounts := storage.VolMounts{}

ansibleEE, err := GetAnsibleExecution(ctx, helper, obj, service.Spec.Label)
if err != nil && !k8serrors.IsNotFound(err) {
Expand Down Expand Up @@ -110,7 +114,6 @@ func AnsibleExecution(
log.Info(fmt.Sprintf("for service %s, substituting existing ansible play host with 'all'.", service.Name))
}

ansibleEEMounts := storage.VolMounts{}
sshKeyVolume := corev1.Volume{
Name: "ssh-key",
VolumeSource: corev1.VolumeSource{
Expand All @@ -130,10 +133,21 @@ func AnsibleExecution(
MountPath: "/runner/env/ssh_key",
SubPath: "ssh_key",
}
// Mount ssh secrets
ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, sshKeyVolume)
ansibleEEMounts.Mounts = append(ansibleEEMounts.Mounts, sshKeyMount)

// Mounting inventory secrets
for inventoryIndex, inventorySecret := range inventorySecrets {
inventoryName := fmt.Sprintf("inventory-%d", inventoryIndex)
if service.Spec.DeployOnAllNodeSets != nil && *service.Spec.DeployOnAllNodeSets {
inventoryName = fmt.Sprintf("inventory-%d", inventoryIndex)
inventoryMountPath = fmt.Sprintf("/runner/inventory/%s", inventoryName)
} else {
inventoryName = "inventory"
inventoryMountPath = "/runner/inventory/hosts"
}

inventoryVolume := corev1.Volume{
inventoryVolume = corev1.Volume{
Name: inventoryName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
Expand All @@ -149,16 +163,14 @@ func AnsibleExecution(
}
inventoryMount := corev1.VolumeMount{
Name: inventoryName,
MountPath: fmt.Sprintf("/runner/inventory/hosts_%d", inventoryIndex),
MountPath: inventoryMountPath,
SubPath: inventoryName,
}
ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, inventoryVolume)
// Inventory mount
ansibleEEMounts.Mounts = append(ansibleEEMounts.Mounts, inventoryMount)
ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, inventoryVolume)
}

ansibleEEMounts.Volumes = append(ansibleEEMounts.Volumes, sshKeyVolume)
ansibleEEMounts.Mounts = append(ansibleEEMounts.Mounts, sshKeyMount)

ansibleEE.Spec.ExtraMounts = append(aeeSpec.ExtraMounts, []storage.VolMounts{ansibleEEMounts}...)
ansibleEE.Spec.Env = aeeSpec.Env

Expand Down
131 changes: 131 additions & 0 deletions tests/kuttl/tests/dataplane-deploy-global-service-test/00-assert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
apiVersion: dataplane.openstack.org/v1beta1
kind: OpenStackDataPlaneNodeSet
metadata:
name: edpm-compute-global-service
namespace: openstack
spec:
preProvisioned: true
services:
- download-cache
- bootstrap
- configure-network
- validate-network
- install-os
- configure-os
- run-os
- install-certs
- ovn
- neutron-metadata
- neutron-ovn
- neutron-sriov
- neutron-dhcp
- libvirt
- nova
tlsEnabled: false
env:
- name: ANSIBLE_FORCE_COLOR
value: "True"
nodes:
edpm-compute-0:
hostName: edpm-compute-0
ansible:
ansibleHost: 192.168.122.100
ansibleUser: cloud-admin
ansibleVars:
ctlplane_ip: 192.168.122.100
internalapi_ip: 172.17.0.100
storage_ip: 172.18.0.100
tenant_ip: 172.19.0.100
edpm-compute-1:
hostName: edpm-compute-1
ansible:
ansibleHost: 192.168.122.200
ansibleUser: cloud-admin
ansibleVars:
ctlplane_ip: 192.168.122.200
internalapi_ip: 172.17.0.200
storage_ip: 172.18.0.200
tenant_ip: 172.19.0.200
nodeTemplate:
ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret
managementNetwork: ctlplane
ansible:
ansibleUser: cloud-admin
ansiblePort: 22
ansibleVars:
service_net_map:
nova_api_network: internalapi
nova_libvirt_network: internalapi
timesync_ntp_servers:
- hostname: clock.redhat.com
# edpm_network_config
# Default nic config template for a EDPM compute node
# These vars are edpm_network_config role vars
edpm_network_config_hide_sensitive_logs: false
edpm_network_config_template: templates/single_nic_vlans/single_nic_vlans.j2
# These vars are for the network config templates themselves and are
# considered EDPM network defaults.
neutron_physical_bridge_name: br-ex
neutron_public_interface_name: eth0
ctlplane_mtu: 1500
ctlplane_cidr: 24
ctlplane_gateway_ip: 192.168.122.1
ctlplane_host_routes:
- ip_netmask: 0.0.0.0/0
next_hop: 192.168.122.1
external_mtu: 1500
external_vlan_id: 44
external_cidr: '24'
external_host_routes: []
internalapi_mtu: 1500
internalapi_vlan_id: 20
internalapi_cidr: '24'
internalapi_host_routes: []
storage_mtu: 1500
storage_vlan_id: 21
storage_cidr: '24'
storage_host_routes: []
tenant_mtu: 1500
tenant_vlan_id: 22
tenant_cidr: '24'
tenant_host_routes: []
role_networks:
- InternalApi
- Storage
- Tenant
networks_lower:
External: external
InternalApi: internalapi
Storage: storage
Tenant: tenant
# edpm_nodes_validation
edpm_nodes_validation_validate_controllers_icmp: false
edpm_nodes_validation_validate_gateway_icmp: false
ctlplane_dns_nameservers:
- 192.168.122.1
dns_search_domains: []
gather_facts: false
enable_debug: false
# edpm firewall, change the allowed CIDR if needed
edpm_sshd_configure_firewall: true
edpm_sshd_allowed_ranges: ['192.168.122.0/24']
# SELinux module
edpm_selinux_mode: enforcing
status:
conditions:
- message: Deployment not started
reason: Requested
status: "False"
type: Ready
- message: Deployment not started
reason: Requested
status: "False"
type: DeploymentReady
- message: Input data complete
reason: Ready
status: "True"
type: InputReady
- message: Setup complete
reason: Ready
status: "True"
type: SetupReady
Loading

0 comments on commit 5a290df

Please sign in to comment.