From bb7e8f9a7dfe283bc34c331e78c62f4bbce1c5d6 Mon Sep 17 00:00:00 2001 From: benoist-s Date: Tue, 9 Oct 2018 17:43:24 +0200 Subject: [PATCH 01/19] Add yorc.nodes.google.PersistentDisk node type Add PersistentDisk Google Terraform resource --- data/tosca/yorc-google-types.yml | 30 +++++- helper/sizeutil/sizeutil.go | 43 +++++++++ prov/terraform/google/generator.go | 5 + prov/terraform/google/persistent_disk.go | 112 +++++++++++++++++++++++ prov/terraform/google/resources.go | 12 +++ prov/terraform/openstack/bs_volume.go | 28 +----- 6 files changed, 204 insertions(+), 26 deletions(-) create mode 100644 helper/sizeutil/sizeutil.go create mode 100644 prov/terraform/google/persistent_disk.go diff --git a/data/tosca/yorc-google-types.yml b/data/tosca/yorc-google-types.yml index 3669d71d0..edaa6a55e 100644 --- a/data/tosca/yorc-google-types.yml +++ b/data/tosca/yorc-google-types.yml @@ -150,8 +150,6 @@ node_types: description: > Comma-separated list of label KEY=VALUE pairs to assign to the Compute Address. required: false - entry_schema: - type: string region: type: string description: > @@ -180,3 +178,31 @@ node_types: type: string description: The compute IP address. + yorc.nodes.google.PersistentDisk: + derived_from: tosca.nodes.BlockStorage + properties: + # See https://www.terraform.io/docs/providers/google/r/compute_disk.html + description: + type: string + description: > + An optional description of this resource. + required: false + type: + type: string + description: > + URL of the disk type resource describing which disk type to use to create the disk. + If this field is not specified, it is assumed to be pd-standard for Standard Persistent Disk HDD. + pd-ssd is for solid-state drives (SSD). + required: false + labels: + type: string + description: > + Comma-separated list of label KEY=VALUE pairs to assign to the Compute Disk. + required: false + zone: + type: string + description: > + A reference to the zone where the disk resides. In any case the disk must be on the same zone as the associated Compute instances. + required: false + + diff --git a/helper/sizeutil/sizeutil.go b/helper/sizeutil/sizeutil.go new file mode 100644 index 000000000..728f1c193 --- /dev/null +++ b/helper/sizeutil/sizeutil.go @@ -0,0 +1,43 @@ +// Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizeutil + +import ( + "github.com/dustin/go-humanize" + "github.com/pkg/errors" + "github.com/ystia/yorc/helper/mathutil" + "strconv" +) + +// ConvertToGB allows to convert a MB size as "42" or a human readable size as "42MB" or "42 KB" into GB +func ConvertToGB(size string) (int, error) { + // Default size unit is MB + mSize, err := strconv.Atoi(size) + // Not an int value, so maybe a human readable size: we try to retrieve bytes + if err != nil { + var bsize uint64 + bsize, err = humanize.ParseBytes(size) + if err != nil { + return 0, errors.Errorf("Can't convert size to bytes value: %v", err) + } + gSize := float64(bsize) / humanize.GByte + gSize = mathutil.Round(gSize, 0, 0) + return int(gSize), nil + } + + gSize := float64(mSize) / 1000 + gSize = mathutil.Round(gSize, 0, 0) + return int(gSize), nil +} diff --git a/prov/terraform/google/generator.go b/prov/terraform/google/generator.go index 14949a7f1..b8050662b 100644 --- a/prov/terraform/google/generator.go +++ b/prov/terraform/google/generator.go @@ -148,6 +148,11 @@ func (g *googleGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg if err != nil { return false, nil, nil, err } + case "yorc.nodes.google.PersistentDisk": + err = g.generatePersistentDisk(ctx, kv, cfg, deploymentID, nodeName, instanceName, instNb, &infrastructure, outputs) + if err != nil { + return false, nil, nil, err + } default: return false, nil, nil, errors.Errorf("Unsupported node type '%s' for node '%s' in deployment '%s'", nodeType, nodeName, deploymentID) } diff --git a/prov/terraform/google/persistent_disk.go b/prov/terraform/google/persistent_disk.go new file mode 100644 index 000000000..b136323ea --- /dev/null +++ b/prov/terraform/google/persistent_disk.go @@ -0,0 +1,112 @@ +// Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package google + +import ( + "context" + "fmt" + "github.com/hashicorp/consul/api" + "github.com/pkg/errors" + "github.com/ystia/yorc/config" + "github.com/ystia/yorc/deployments" + "github.com/ystia/yorc/helper/consulutil" + "github.com/ystia/yorc/helper/sizeutil" + "github.com/ystia/yorc/log" + "github.com/ystia/yorc/prov/terraform/commons" + "path" + "strings" +) + +func (g *googleGenerator) generatePersistentDisk(ctx context.Context, kv *api.KV, + cfg config.Configuration, deploymentID, nodeName, instanceName string, instanceID int, + infrastructure *commons.Infrastructure, + outputs map[string]string) error { + + nodeType, err := deployments.GetNodeType(kv, deploymentID, nodeName) + if err != nil { + return err + } + if nodeType != "yorc.nodes.google.PersistentDisk" { + return errors.Errorf("Unsupported node type for %q: %s", nodeName, nodeType) + } + + instancesPrefix := path.Join(consulutil.DeploymentKVPrefix, deploymentID, + "topology", "instances") + instancesKey := path.Join(instancesPrefix, nodeName) + + persistentDisk := &PersistentDisk{} + var size, volumes string + stringParams := []struct { + pAttr *string + propertyName string + mandatory bool + }{ + {&volumes, "volume_id", false}, + {&persistentDisk.Description, "description", false}, + {&persistentDisk.Snapshot, "snapshot_id", false}, + {&persistentDisk.Type, "type", false}, + {&persistentDisk.Zone, "zone", false}, + {&size, "size", false}, + } + + for _, stringParam := range stringParams { + if *stringParam.pAttr, err = deployments.GetStringNodeProperty(kv, deploymentID, nodeName, + stringParam.propertyName, stringParam.mandatory); err != nil { + return err + } + } + + var volumeID string + if volumes != "" { + tabVol := strings.Split(volumes, ",") + if len(tabVol) > instanceID { + volumeID = strings.TrimSpace(tabVol[instanceID]) + } + } + + persistentDisk.Labels, err = deployments.GetKeyValuePairsNodeProperty(kv, deploymentID, nodeName, "labels") + if err != nil { + return err + } + + if size != "" { + // Default size unit is MB + log.Debugf("Initial size property value (default is MB): %q", size) + persistentDisk.Size, err = sizeutil.ConvertToGB(size) + if err != nil { + return err + } + log.Debugf("Computed size (in GB): %d", persistentDisk.Size) + } + + name := strings.ToLower(cfg.ResourcesPrefix + nodeName + "-" + instanceName) + persistentDisk.Name = strings.Replace(name, "_", "-", -1) + + // Add google persistent disk resource if not any volume ID is provided + if volumeID == "" { + commons.AddResource(infrastructure, "google_compute_disk", persistentDisk.Name, persistentDisk) + volumeID = fmt.Sprintf("${google_compute_disk.%s.name}", persistentDisk.Name) + } + + // Provide Consul Key for attribute volume_id + consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} + consulKeyIPAddr := commons.ConsulKey{ + Path: path.Join(instancesKey, instanceName, "/attributes/volume_id"), + Value: volumeID} + + consulKeys.Keys = append(consulKeys.Keys, consulKeyIPAddr) + commons.AddResource(infrastructure, "consul_keys", persistentDisk.Name, &consulKeys) + return nil +} diff --git a/prov/terraform/google/resources.go b/prov/terraform/google/resources.go index c42afcc3b..24f100841 100644 --- a/prov/terraform/google/resources.go +++ b/prov/terraform/google/resources.go @@ -84,3 +84,15 @@ type ComputeAddress struct { Labels map[string]string `json:"labels,omitempty"` Project string `json:"project,omitempty"` } + +// PersistentDisk represents a Google persistent disk +// See https://www.terraform.io/docs/providers/google/r/compute_disk.html +type PersistentDisk struct { + Name string `json:"name"` + Size int `json:"size,omitempty"` + Description string `json:"description,omitempty"` + Type string `json:"type,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Zone string `json:"zone,omitempty"` + Snapshot string `json:"snapshot,omitempty"` +} diff --git a/prov/terraform/openstack/bs_volume.go b/prov/terraform/openstack/bs_volume.go index 5192803b6..eb97c75a5 100644 --- a/prov/terraform/openstack/bs_volume.go +++ b/prov/terraform/openstack/bs_volume.go @@ -15,14 +15,11 @@ package openstack import ( - "strconv" - - "github.com/dustin/go-humanize" "github.com/hashicorp/consul/api" "github.com/pkg/errors" "github.com/ystia/yorc/config" - "github.com/ystia/yorc/helper/mathutil" + "github.com/ystia/yorc/helper/sizeutil" "github.com/ystia/yorc/log" ) @@ -50,28 +47,11 @@ func (g *osGenerator) generateOSBSVolume(kv *api.KV, cfg config.Configuration, u } // Default size unit is MB log.Debugf("Size form consul is %q", size) - mSize, err := strconv.Atoi(size) + volume.Size, err = sizeutil.ConvertToGB(size) if err != nil { - var bsize uint64 - bsize, err = humanize.ParseBytes(size) - if err != nil { - return volume, errors.Errorf("Can't convert size to bytes value: %v", err) - } - // OpenStack needs the size in GB so we round it up. - gSize := float64(bsize) / humanize.GByte - log.Debugf("Computed size in GB: %f", gSize) - gSize = mathutil.Round(gSize, 0, 0) - log.Debugf("Computed size rounded in GB: %d", int(gSize)) - volume.Size = int(gSize) - } else { - log.Debugf("Size in MB: %d", mSize) - // OpenStack needs the size in GB so we round it up. - gSize := float64(mSize) / 1000 - log.Debugf("Computed size in GB: %f", gSize) - gSize = mathutil.Round(gSize, 0, 0) - log.Debugf("Computed size rounded in GB: %d", int(gSize)) - volume.Size = int(gSize) + return volume, err } + log.Debugf("Computed size rounded in GB: %d", volume.Size) region, err := g.getStringFormConsul(kv, url, "properties/region") if err != nil { From 526da9f8f81178eb04284fdb17a57e3b5da4315e Mon Sep 17 00:00:00 2001 From: stephane benoist <29753229+stebenoist@users.noreply.github.com> Date: Wed, 10 Oct 2018 17:11:01 +0200 Subject: [PATCH 02/19] Enrich yorc.google.nodes.PersistentDisk props&attributes --- data/tosca/yorc-google-types.yml | 48 +++++++++++ prov/terraform/google/compute_instance.go | 100 ++++++++++++++++++++-- prov/terraform/google/persistent_disk.go | 28 +++++- prov/terraform/google/resources.go | 33 +++++-- 4 files changed, 190 insertions(+), 19 deletions(-) diff --git a/data/tosca/yorc-google-types.yml b/data/tosca/yorc-google-types.yml index edaa6a55e..30b7c54e1 100644 --- a/data/tosca/yorc-google-types.yml +++ b/data/tosca/yorc-google-types.yml @@ -12,6 +12,19 @@ artifact_types: yorc.artifacts.google.Deployment: derived_from: tosca.artifacts.Deployment +data_types: + yorc.datatypes.google.EncryptionKey: + derived_from: tosca.datatypes.Root + properties: + raw_key: + type: string + required: false + description: Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. + sha256: + type: string + required: false + description: The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + node_types: yorc.nodes.google.Compute: derived_from: yorc.nodes.Compute @@ -203,6 +216,41 @@ node_types: type: string description: > A reference to the zone where the disk resides. In any case the disk must be on the same zone as the associated Compute instances. + required: true + disk_encryption_key: + type: yorc.datatypes.google.EncryptionKey + description: > + Encrypts the disk using a customer-supplied encryption key. After you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later + (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine). + Customer-supplied encryption keys do not protect access to metadata of the disk. + If you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. + required: false + source_image: + type: string + description: > + The image from which to initialize this disk. This can be one of: the image's self_link, projects/{project}/global/images/{image}, projects/{project}/global/images/family/{family}, global/images/{image}, global/images/family/{family}, family/{family}, {project}/{family}, {project}/{image}, {family}, or {image}. + If referred by family, the images names must include the family name. If they don't, use the google_compute_image data source. For instance, the image centos-6-v20180104 includes its family name centos-6. These images can be referred by family name here. required: false + source_image_encryption_key: + type: yorc.datatypes.google.EncryptionKey + description: > + The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. + required: false + source_snapshot: + type: string + description: > + The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. + For example, https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot, projects/project/global/snapshots/snapshot, global/snapshots/snapshot, snapshot are valid values + required: false + source_snapshot_encryption_key: + type: yorc.datatypes.google.EncryptionKey + description: > + The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key. + required: false + attributes: + users: + type: string + description: > + Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance diff --git a/prov/terraform/google/compute_instance.go b/prov/terraform/google/compute_instance.go index 18100eb5d..984bc81d5 100644 --- a/prov/terraform/google/compute_instance.go +++ b/prov/terraform/google/compute_instance.go @@ -119,7 +119,7 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K // External IP address can be static if required if hasStaticAddressReq { // Address Lookup - externalAddress, err = addressLookup(ctx, kv, deploymentID, instanceName, addressNode) + externalAddress, err = attributeLookup(ctx, kv, deploymentID, instanceName, addressNode, "ip_address") if err != nil { return err } @@ -178,6 +178,12 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K // Add the compute instance commons.AddResource(infrastructure, "google_compute_instance", instance.Name, &instance) + // Add Persistent disks + err = addAttachedDisks(ctx, cfg, kv, deploymentID, nodeName, instanceName, instance.Name, infrastructure) + if err != nil { + return err + } + // Provide Consul Keys consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} @@ -239,15 +245,16 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K return nil } -func addressLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName, addressNodeName string) (string, error) { - log.Debugf("Address lookup for deploymentID:%q, address node name:%q, instance:%q", deploymentID, addressNodeName, instanceName) - var address string +func attributeLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName, nodeName, attribute string) (string, error) { + log.Debugf("Attribute:%q lookup for deploymentID:%q, node name:%q, instance:%q", attribute, deploymentID, nodeName, instanceName) res := make(chan string, 1) go func() { for { - if address, _ := deployments.GetInstanceAttributeValue(kv, deploymentID, addressNodeName, instanceName, "ip_address"); address != nil && address.RawString() != "" { - res <- address.RawString() - return + if attr, _ := deployments.GetInstanceAttributeValue(kv, deploymentID, nodeName, instanceName, attribute); attr != nil && attr.RawString() != "" { + if attr != nil && attr.RawString() != "" { + res <- attr.RawString() + return + } } select { @@ -259,9 +266,84 @@ func addressLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName, }() select { - case address = <-res: - return address, nil + case val := <-res: + return val, nil case <-ctx.Done(): return "", ctx.Err() } } + +func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, deploymentID, nodeName, instanceName, computeName string, infrastructure *commons.Infrastructure) error { + storageKeys, err := deployments.GetRequirementsKeysByTypeForNode(kv, deploymentID, nodeName, "local_storage") + if err != nil { + return err + } + for _, storagePrefix := range storageKeys { + requirementIndex := deployments.GetRequirementIndexFromRequirementKey(storagePrefix) + volumeNodeName, err := deployments.GetTargetNodeForRequirement(kv, deploymentID, nodeName, requirementIndex) + if err != nil { + return err + } + + log.Debugf("Volume attachment required form Volume named %s", volumeNodeName) + + deviceValue, err := deployments.GetRelationshipPropertyValueFromRequirement(kv, deploymentID, nodeName, requirementIndex, "device") + if err != nil { + return err + } + + zone, err := deployments.GetStringNodeProperty(kv, deploymentID, volumeNodeName, "zone", true) + if err != nil { + return err + } + + volumeIDValue, err := deployments.GetNodePropertyValue(kv, deploymentID, volumeNodeName, "volume_id") + if err != nil { + return err + } + var volumeID string + if volumeIDValue == nil || volumeIDValue.RawString() == "" { + // Lookup for attribute volume_id + volumeID, err = attributeLookup(ctx, kv, deploymentID, instanceName, volumeNodeName, "volume_id") + if err != nil { + return err + } + + } else { + volumeID = volumeIDValue.RawString() + } + + attachedDisk := &ComputeAttachedDisk{ + Disk: volumeID, + Instance: fmt.Sprintf("${google_compute_instance.%s.name}", computeName), + Zone: zone, + } + if deviceValue != nil && deviceValue.RawString() != "" { + attachedDisk.DeviceName = deviceValue.RawString() + } + + attachName := strings.ToLower("Vol" + cfg.ResourcesPrefix + volumeNodeName + "-" + instanceName + "to" + nodeName + "-" + instanceName) + attachName = strings.Replace(attachName, "_", "-", -1) + commons.AddResource(infrastructure, "google_compute_attached_disk", attachName, &attachedDisk) + + // Provide Consul Keys + consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} + instancesPrefix := path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "instances") + + volumeDevConsulKey := commons.ConsulKey{ + Path: path.Join(instancesPrefix, volumeNodeName, instanceName, "attributes/device"), + Value: attachedDisk.DeviceName, + } + relDevConsulKey := commons.ConsulKey{ + Path: path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", nodeName, requirementIndex, instanceName, "attributes/device"), + Value: attachedDisk.DeviceName, + } + relVolDevConsulKey := commons.ConsulKey{ + Path: path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", volumeNodeName, requirementIndex, instanceName, "attributes/device"), + Value: attachedDisk.DeviceName, + } + consulKeys.Keys = append(consulKeys.Keys, volumeDevConsulKey, relDevConsulKey, relVolDevConsulKey) + commons.AddResource(infrastructure, "consul_keys", attachName, &consulKeys) + } + return nil +} diff --git a/prov/terraform/google/persistent_disk.go b/prov/terraform/google/persistent_disk.go index b136323ea..7138683cd 100644 --- a/prov/terraform/google/persistent_disk.go +++ b/prov/terraform/google/persistent_disk.go @@ -55,9 +55,11 @@ func (g *googleGenerator) generatePersistentDisk(ctx context.Context, kv *api.KV }{ {&volumes, "volume_id", false}, {&persistentDisk.Description, "description", false}, - {&persistentDisk.Snapshot, "snapshot_id", false}, + {&persistentDisk.SourceSnapshot, "snapshot_id", false}, {&persistentDisk.Type, "type", false}, {&persistentDisk.Zone, "zone", false}, + {&persistentDisk.SourceSnapshot, "source_snapshot", false}, + {&persistentDisk.SourceImage, "source_image", false}, {&size, "size", false}, } @@ -91,6 +93,21 @@ func (g *googleGenerator) generatePersistentDisk(ctx context.Context, kv *api.KV log.Debugf("Computed size (in GB): %d", persistentDisk.Size) } + // Get Encryption key + rawEncryptKeyValue, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "disk_encryption_key", "raw_key") + if err != nil { + return err + } + if rawEncryptKeyValue.RawString() != "" { + hashValue, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "disk_encryption_key", "sha256") + if err != nil { + return err + } + persistentDisk.DiskEncryptionKey = &EncryptionKey{ + Raw: rawEncryptKeyValue.RawString(), + SHA256: hashValue.RawString()} + } + name := strings.ToLower(cfg.ResourcesPrefix + nodeName + "-" + instanceName) persistentDisk.Name = strings.Replace(name, "_", "-", -1) @@ -102,11 +119,16 @@ func (g *googleGenerator) generatePersistentDisk(ctx context.Context, kv *api.KV // Provide Consul Key for attribute volume_id consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} - consulKeyIPAddr := commons.ConsulKey{ + consulKeyVolumeID := commons.ConsulKey{ Path: path.Join(instancesKey, instanceName, "/attributes/volume_id"), Value: volumeID} - consulKeys.Keys = append(consulKeys.Keys, consulKeyIPAddr) + consulKeyUsers := commons.ConsulKey{ + Path: path.Join(instancesKey, instanceName, "/attributes/users"), + Value: fmt.Sprintf("${google_compute_disk.%s.users}", + persistentDisk.Name)} + + consulKeys.Keys = append(consulKeys.Keys, consulKeyVolumeID, consulKeyUsers) commons.AddResource(infrastructure, "consul_keys", persistentDisk.Name, &consulKeys) return nil } diff --git a/prov/terraform/google/resources.go b/prov/terraform/google/resources.go index 24f100841..3d229fcb6 100644 --- a/prov/terraform/google/resources.go +++ b/prov/terraform/google/resources.go @@ -85,14 +85,33 @@ type ComputeAddress struct { Project string `json:"project,omitempty"` } +// EncryptionKey represents a Google encryption key +type EncryptionKey struct { + Raw string `json:"raw_key,omitempty"` + SHA256 string `json:"sha256,omitempty"` +} + // PersistentDisk represents a Google persistent disk // See https://www.terraform.io/docs/providers/google/r/compute_disk.html type PersistentDisk struct { - Name string `json:"name"` - Size int `json:"size,omitempty"` - Description string `json:"description,omitempty"` - Type string `json:"type,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Zone string `json:"zone,omitempty"` - Snapshot string `json:"snapshot,omitempty"` + Name string `json:"name"` + Size int `json:"size,omitempty"` + Description string `json:"description,omitempty"` + Type string `json:"type,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Zone string `json:"zone,omitempty"` + DiskEncryptionKey *EncryptionKey `json:"disk_encryption_key,omitempty"` + SourceSnapshot string `json:"snapshot,omitempty"` + SourceSnapshotEncryptionKey *EncryptionKey `json:"source_snapshot_encryption_key,omitempty"` + SourceImage string `json:"image,omitempty"` + SourceImageEncryptionKey *EncryptionKey `json:"source_image_encryption_key,omitempty"` +} + +// ComputeAttachedDisk represents compute instance's attached disk +type ComputeAttachedDisk struct { + Instance string `json:"instance"` + Disk string `json:"disk"` + DeviceName string `json:"device_name,omitempty"` + Mode string `json:"mode,omitempty"` + Zone string `json:"zone,omitempty"` } From 682f7c63fcded2f1a273b1b17081d74e2bccc37e Mon Sep 17 00:00:00 2001 From: benoist-s Date: Thu, 11 Oct 2018 18:01:40 +0200 Subject: [PATCH 03/19] Update yorc.nodes.google.PersistentDisk properties Add unit tests --- data/tosca/yorc-google-types.yml | 15 ++--- deployments/definition_store.go | 2 +- helper/sizeutil/sizeutil_test.go | 50 ++++++++++++++++ prov/terraform/google/compute_instance.go | 10 +++- .../terraform/google/compute_instance_test.go | 41 +++++++++++++ prov/terraform/google/consul_test.go | 6 ++ prov/terraform/google/persistent_disk.go | 46 ++++++++++----- prov/terraform/google/persistent_disk_test.go | 50 ++++++++++++++++ prov/terraform/google/resources.go | 1 + ...mpleComputeInstanceWithPersistentDisk.yaml | 59 +++++++++++++++++++ .../google/testdata/simplePersistentDisk.yaml | 35 +++++++++++ 11 files changed, 290 insertions(+), 25 deletions(-) create mode 100644 helper/sizeutil/sizeutil_test.go create mode 100644 prov/terraform/google/persistent_disk_test.go create mode 100644 prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml create mode 100644 prov/terraform/google/testdata/simplePersistentDisk.yaml diff --git a/data/tosca/yorc-google-types.yml b/data/tosca/yorc-google-types.yml index 30b7c54e1..e5f266a44 100644 --- a/data/tosca/yorc-google-types.yml +++ b/data/tosca/yorc-google-types.yml @@ -225,32 +225,33 @@ node_types: Customer-supplied encryption keys do not protect access to metadata of the disk. If you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. required: false - source_image: + image_id: type: string description: > The image from which to initialize this disk. This can be one of: the image's self_link, projects/{project}/global/images/{image}, projects/{project}/global/images/family/{family}, global/images/{image}, global/images/family/{family}, family/{family}, {project}/{family}, {project}/{image}, {family}, or {image}. If referred by family, the images names must include the family name. If they don't, use the google_compute_image data source. For instance, the image centos-6-v20180104 includes its family name centos-6. These images can be referred by family name here. required: false - source_image_encryption_key: + image_encryption_key: type: yorc.datatypes.google.EncryptionKey description: > The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. required: false - source_snapshot: + snapshot_id: type: string description: > The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot, projects/project/global/snapshots/snapshot, global/snapshots/snapshot, snapshot are valid values required: false - source_snapshot_encryption_key: + snapshot_encryption_key: type: yorc.datatypes.google.EncryptionKey description: > The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key. required: false - attributes: - users: + mode: type: string description: > - Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance + The mode in which to attach this disk, either READ_WRITE or READ_ONLY. + If not specified, the default is to attach the disk in READ_WRITE mode. + required: false diff --git a/deployments/definition_store.go b/deployments/definition_store.go index b8fb6a117..586bd7f50 100644 --- a/deployments/definition_store.go +++ b/deployments/definition_store.go @@ -1179,7 +1179,7 @@ func createMissingBlockStorageForNode(consulStore consulutil.ConsulStore, kv *ap } /** -This function check if a nodes need a floating IP, and return the name of Floating IP node. +This function check if a nodes need a block storage, and return the name of BlockStorage node. */ func checkBlockStorage(kv *api.KV, deploymentID, nodeName string) (bool, []string, error) { requirementsKey, err := GetRequirementsKeysByTypeForNode(kv, deploymentID, nodeName, "local_storage") diff --git a/helper/sizeutil/sizeutil_test.go b/helper/sizeutil/sizeutil_test.go new file mode 100644 index 000000000..f056208b5 --- /dev/null +++ b/helper/sizeutil/sizeutil_test.go @@ -0,0 +1,50 @@ +// Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizeutil + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestConvertToGB(t *testing.T) { + var testData = []struct { + test string + inputSize string + expectedSize int + expectedError bool + }{ + {"volume1", "1", 1, false}, + {"volume10000000", "100", 1, false}, + {"volume10000000", "1500 M", 2, false}, + {"volume1GB", "1GB", 1, false}, + {"volume1GBS", "1 GB", 1, false}, + {"olume1GiB", "1 GiB", 2, false}, + {"volume2GIB", "2 GIB", 3, false}, + {"volume1TB", "1 tb", 1000, false}, + {"volume1TiB", "1 TiB", 1100, false}, + {"error", "1 deca", 0, true}, + } + for _, tt := range testData { + s, err := ConvertToGB(tt.inputSize) + if !tt.expectedError { + assert.Nil(t, err) + assert.Equal(t, tt.expectedSize, s) + } else { + assert.Error(t, err, "Expected an error") + } + + } +} diff --git a/prov/terraform/google/compute_instance.go b/prov/terraform/google/compute_instance.go index 984bc81d5..1a9d87aa8 100644 --- a/prov/terraform/google/compute_instance.go +++ b/prov/terraform/google/compute_instance.go @@ -297,6 +297,11 @@ func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, return err } + mode, err := deployments.GetStringNodeProperty(kv, deploymentID, volumeNodeName, "mode", true) + if err != nil { + return err + } + volumeIDValue, err := deployments.GetNodePropertyValue(kv, deploymentID, volumeNodeName, "volume_id") if err != nil { return err @@ -317,14 +322,15 @@ func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, Disk: volumeID, Instance: fmt.Sprintf("${google_compute_instance.%s.name}", computeName), Zone: zone, + Mode: mode, } if deviceValue != nil && deviceValue.RawString() != "" { attachedDisk.DeviceName = deviceValue.RawString() } - attachName := strings.ToLower("Vol" + cfg.ResourcesPrefix + volumeNodeName + "-" + instanceName + "to" + nodeName + "-" + instanceName) + attachName := strings.ToLower(cfg.ResourcesPrefix + volumeNodeName + "-" + instanceName + "-attached-to-" + nodeName + "-" + instanceName) attachName = strings.Replace(attachName, "_", "-", -1) - commons.AddResource(infrastructure, "google_compute_attached_disk", attachName, &attachedDisk) + commons.AddResource(infrastructure, "google_compute_attached_disk", attachName, attachedDisk) // Provide Consul Keys consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} diff --git a/prov/terraform/google/compute_instance_test.go b/prov/terraform/google/compute_instance_test.go index 290bfd54d..ab0dc696e 100644 --- a/prov/terraform/google/compute_instance_test.go +++ b/prov/terraform/google/compute_instance_test.go @@ -124,3 +124,44 @@ func testSimpleComputeInstanceWithAddress(t *testing.T, kv *api.KV, srv1 *testut require.Len(t, compute.NetworkInterfaces, 1, "Expected one network interface for external access") assert.Equal(t, "1.2.3.4", compute.NetworkInterfaces[0].AccessConfigs[0].NatIP, "Unexpected external IP address") } + +func testSimpleComputeInstanceWithPersistentDisk(t *testing.T, kv *api.KV, srv1 *testutil.TestServer, cfg config.Configuration) { + t.Parallel() + deploymentID := loadTestYaml(t, kv) + + // Simulate the google persistent disk "volume_id" attribute registration + srv1.PopulateKV(t, map[string][]byte{ + path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/nodes/BS1/type"): []byte("yorc.nodes.google.PersistentDisk"), + path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/instances/BS1/0/attributes/volume_id"): []byte("my_vol_id"), + }) + + infrastructure := commons.Infrastructure{} + g := googleGenerator{} + err := g.generateComputeInstance(context.Background(), kv, cfg, deploymentID, "Compute", "0", 0, &infrastructure, make(map[string]string)) + require.NoError(t, err, "Unexpected error attempting to generate compute instance for %s", deploymentID) + + require.Len(t, infrastructure.Resource["google_compute_instance"], 1, "Expected one compute instance") + instancesMap := infrastructure.Resource["google_compute_instance"].(map[string]interface{}) + require.Len(t, instancesMap, 1) + require.Contains(t, instancesMap, "compute-0") + + compute, ok := instancesMap["compute-0"].(*ComputeInstance) + require.True(t, ok, "compute-0 is not a ComputeInstance") + assert.Equal(t, "n1-standard-1", compute.MachineType) + assert.Equal(t, "europe-west1-b", compute.Zone) + require.NotNil(t, compute.BootDisk, 1, "Expected boot disk") + assert.Equal(t, "centos-cloud/centos-7", compute.BootDisk.InitializeParams.Image, "Unexpected boot disk image") + + require.Len(t, infrastructure.Resource["google_compute_attached_disk"], 1, "Expected one attached disk") + instancesMap = infrastructure.Resource["google_compute_attached_disk"].(map[string]interface{}) + require.Len(t, instancesMap, 1) + + require.Contains(t, instancesMap, "bs1-0-attached-to-compute-0") + attachedDisk, ok := instancesMap["bs1-0-attached-to-compute-0"].(*ComputeAttachedDisk) + require.True(t, ok, "bs1-0-attached-to-compute-0 is not a ComputeAttachedDisk") + assert.Equal(t, "my_vol_id", attachedDisk.Disk) + assert.Equal(t, "${google_compute_instance.compute-0.name}", attachedDisk.Instance) + assert.Equal(t, "europe-west1-b", attachedDisk.Zone) + assert.Equal(t, "foo", attachedDisk.DeviceName) + assert.Equal(t, "READ_ONLY", attachedDisk.Mode) +} diff --git a/prov/terraform/google/consul_test.go b/prov/terraform/google/consul_test.go index 7102177dd..4d5187c5c 100644 --- a/prov/terraform/google/consul_test.go +++ b/prov/terraform/google/consul_test.go @@ -48,5 +48,11 @@ func TestRunConsulGooglePackageTests(t *testing.T) { t.Run("simpleComputeInstanceWithAddress", func(t *testing.T) { testSimpleComputeInstanceWithAddress(t, kv, srv, cfg) }) + t.Run("simplePersistentDisk", func(t *testing.T) { + testSimplePersistentDisk(t, kv, cfg) + }) + t.Run("simpleComputeInstanceWithPersistentDisk", func(t *testing.T) { + testSimpleComputeInstanceWithPersistentDisk(t, kv, srv, cfg) + }) }) } diff --git a/prov/terraform/google/persistent_disk.go b/prov/terraform/google/persistent_disk.go index 7138683cd..beb1dfa3e 100644 --- a/prov/terraform/google/persistent_disk.go +++ b/prov/terraform/google/persistent_disk.go @@ -58,8 +58,7 @@ func (g *googleGenerator) generatePersistentDisk(ctx context.Context, kv *api.KV {&persistentDisk.SourceSnapshot, "snapshot_id", false}, {&persistentDisk.Type, "type", false}, {&persistentDisk.Zone, "zone", false}, - {&persistentDisk.SourceSnapshot, "source_snapshot", false}, - {&persistentDisk.SourceImage, "source_image", false}, + {&persistentDisk.SourceImage, "image_id", false}, {&size, "size", false}, } @@ -93,19 +92,24 @@ func (g *googleGenerator) generatePersistentDisk(ctx context.Context, kv *api.KV log.Debugf("Computed size (in GB): %d", persistentDisk.Size) } - // Get Encryption key - rawEncryptKeyValue, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "disk_encryption_key", "raw_key") + // Get Encryption key if set + persistentDisk.DiskEncryptionKey, err = handleEncryptionKey(kv, deploymentID, nodeName, "disk_encryption_key") if err != nil { return err } - if rawEncryptKeyValue.RawString() != "" { - hashValue, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "disk_encryption_key", "sha256") + // Get Source snapshot encryption key if source snapshot is filled + if persistentDisk.SourceSnapshot != "" { + persistentDisk.SourceSnapshotEncryptionKey, err = handleEncryptionKey(kv, deploymentID, nodeName, "snapshot_encryption_key") + if err != nil { + return err + } + } + // Get Source image encryption key if source image is filled + if persistentDisk.SourceImage != "" { + persistentDisk.SourceImageEncryptionKey, err = handleEncryptionKey(kv, deploymentID, nodeName, "image_encryption_key") if err != nil { return err } - persistentDisk.DiskEncryptionKey = &EncryptionKey{ - Raw: rawEncryptKeyValue.RawString(), - SHA256: hashValue.RawString()} } name := strings.ToLower(cfg.ResourcesPrefix + nodeName + "-" + instanceName) @@ -123,12 +127,24 @@ func (g *googleGenerator) generatePersistentDisk(ctx context.Context, kv *api.KV Path: path.Join(instancesKey, instanceName, "/attributes/volume_id"), Value: volumeID} - consulKeyUsers := commons.ConsulKey{ - Path: path.Join(instancesKey, instanceName, "/attributes/users"), - Value: fmt.Sprintf("${google_compute_disk.%s.users}", - persistentDisk.Name)} - - consulKeys.Keys = append(consulKeys.Keys, consulKeyVolumeID, consulKeyUsers) + consulKeys.Keys = append(consulKeys.Keys, consulKeyVolumeID) commons.AddResource(infrastructure, "consul_keys", persistentDisk.Name, &consulKeys) return nil } + +func handleEncryptionKey(kv *api.KV, deploymentID, nodeName, prop string) (*EncryptionKey, error) { + val, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, prop, "raw_key") + if err != nil { + return nil, err + } + if val.RawString() != "" { + hashValue, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, prop, "sha256") + if err != nil { + return nil, err + } + return &EncryptionKey{ + Raw: val.RawString(), + SHA256: hashValue.RawString()}, nil + } + return nil, nil +} diff --git a/prov/terraform/google/persistent_disk_test.go b/prov/terraform/google/persistent_disk_test.go new file mode 100644 index 000000000..899013016 --- /dev/null +++ b/prov/terraform/google/persistent_disk_test.go @@ -0,0 +1,50 @@ +// Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package google + +import ( + "context" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/ystia/yorc/config" + "github.com/ystia/yorc/prov/terraform/commons" + "testing" +) + +func testSimplePersistentDisk(t *testing.T, kv *api.KV, cfg config.Configuration) { + t.Parallel() + deploymentID := loadTestYaml(t, kv) + infrastructure := commons.Infrastructure{} + g := googleGenerator{} + err := g.generatePersistentDisk(context.Background(), kv, cfg, deploymentID, "PersistentDisk", "0", 0, &infrastructure, make(map[string]string)) + require.NoError(t, err, "Unexpected error attempting to generate persistent disk for %s", deploymentID) + + require.Len(t, infrastructure.Resource["google_compute_disk"], 1, "Expected one persistent disk") + instancesMap := infrastructure.Resource["google_compute_disk"].(map[string]interface{}) + require.Len(t, instancesMap, 1) + require.Contains(t, instancesMap, "persistentdisk-0") + + persistentDisk, ok := instancesMap["persistentdisk-0"].(*PersistentDisk) + require.True(t, ok, "computeaddress-0 is not a PersistentDisk") + assert.Equal(t, "persistentdisk-0", persistentDisk.Name) + assert.Equal(t, "europe-west1-b", persistentDisk.Zone) + assert.Equal(t, 32, persistentDisk.Size) + assert.Equal(t, "pd-ssd", persistentDisk.Type) + assert.Equal(t, "1234", persistentDisk.DiskEncryptionKey.Raw) + assert.Equal(t, "5678", persistentDisk.DiskEncryptionKey.SHA256) + assert.Equal(t, "my description for persistent disk", persistentDisk.Description) + assert.Equal(t, map[string]string{"key1": "value1", "key2": "value2"}, persistentDisk.Labels) +} diff --git a/prov/terraform/google/resources.go b/prov/terraform/google/resources.go index 3d229fcb6..dd13485e4 100644 --- a/prov/terraform/google/resources.go +++ b/prov/terraform/google/resources.go @@ -108,6 +108,7 @@ type PersistentDisk struct { } // ComputeAttachedDisk represents compute instance's attached disk +// See https://www.terraform.io/docs/providers/google/r/compute_attached_disk.html type ComputeAttachedDisk struct { Instance string `json:"instance"` Disk string `json:"disk"` diff --git a/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml b/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml new file mode 100644 index 000000000..9a871e6d9 --- /dev/null +++ b/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml @@ -0,0 +1,59 @@ +tosca_definitions_version: alien_dsl_2_0_0 + +metadata: + template_name: PersistentDiskTest + template_version: 1.0 + template_author: tester + +description: "" + +imports: + - + - + - + +topology_template: + node_templates: + Compute: + metadata: + type: yorc.nodes.google.Compute + properties: + image_project: "centos-cloud" + image_family: "centos-7" + machine_type: "n1-standard-1" + zone: "europe-west1-b" + requirements: + - local_storage: + node: BS1 + capability: tosca.capabilities.Attachment + relationship: + type: tosca.relationships.AttachesTo + properties: + device: foo + capabilities: + endpoint: + properties: + secure: true + protocol: tcp + network_name: PRIVATE + initiator: source + credentials: {user: centos} + scalable: + properties: + min_instances: 1 + max_instances: 1 + default_instances: 1 + BS1: + metadata: + type: yorc.nodes.google.PersistentDisk + properties: + zone: "europe-west1-b" + size: "12 GB" + device: foo + mode: "READ_ONLY" + requirements: + - attachToComputeAttach: + type_requirement: attachment + node: Comp + capability: tosca.capabilities.Attachment + relationship: tosca.relationships.AttachTo diff --git a/prov/terraform/google/testdata/simplePersistentDisk.yaml b/prov/terraform/google/testdata/simplePersistentDisk.yaml new file mode 100644 index 000000000..31267f129 --- /dev/null +++ b/prov/terraform/google/testdata/simplePersistentDisk.yaml @@ -0,0 +1,35 @@ +tosca_definitions_version: alien_dsl_2_0_0 + +metadata: + template_name: PersistentDiskTest + template_version: 1.0 + template_author: tester + +description: "" + +imports: + - + - + - + +topology_template: + node_templates: + PersistentDisk: + type: yorc.nodes.google.PersistentDisk + properties: + zone: "europe-west1-b" + size: "12 GB" + type: "pd-ssd" + description: "my description for persistent disk" + snapshot_id: "projects/project/global/snapshots/snapshot" + size: "32 GB" + labels: "key1=value1, key2=value2" + disk_encryption_key: + raw_key: 1234 + sha256: 5678 + requirements: + - attachToComputeAttach: + type_requirement: attachment + node: Comp + capability: tosca.capabilities.Attachment + relationship: tosca.relationships.AttachTo From 368e14b068d028067a0696c46106626fc680a681 Mon Sep 17 00:00:00 2001 From: stephane benoist <29753229+stebenoist@users.noreply.github.com> Date: Fri, 12 Oct 2018 11:01:33 +0200 Subject: [PATCH 04/19] add scratch disks property for google compute instance --- data/tosca/yorc-google-types.yml | 17 +++++++++++ prov/terraform/google/compute_instance.go | 30 ++++++++++++++++++- .../terraform/google/compute_instance_test.go | 4 +++ prov/terraform/google/consul_test.go | 15 ---------- prov/terraform/google/resources.go | 6 ++++ .../testdata/simpleComputeInstance.yaml | 3 ++ 6 files changed, 59 insertions(+), 16 deletions(-) diff --git a/data/tosca/yorc-google-types.yml b/data/tosca/yorc-google-types.yml index e5f266a44..fce2f7250 100644 --- a/data/tosca/yorc-google-types.yml +++ b/data/tosca/yorc-google-types.yml @@ -25,6 +25,14 @@ data_types: required: false description: The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + yorc.datatypes.google.ScratchDisk: + derived_from: tosca.datatypes.Root + properties: + interface: + type: string + required: false + description: The disk interface to use for attaching the scratch disks; either SCSI or NVME. Defaults to SCSI. + node_types: yorc.nodes.google.Compute: derived_from: yorc.nodes.Compute @@ -116,6 +124,15 @@ node_types: Comma-separated list of tags to apply to the instances for identifying the instances to which network firewall rules will apply. required: false + scratch_disks: + type: list + description: Additional scratch disks to attach to the instance. Maximum allowed is 8. + required: false + entry_schema: + type: yorc.datatypes.google.ScratchDisk + constraints: + - greater_or_equal: 0 + - max_length: 8 requirements: - assignment: capability: yorc.capabilities.Assignable diff --git a/prov/terraform/google/compute_instance.go b/prov/terraform/google/compute_instance.go index 1a9d87aa8..6a13e333c 100644 --- a/prov/terraform/google/compute_instance.go +++ b/prov/terraform/google/compute_instance.go @@ -175,10 +175,38 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K return err } + // Add additional Scratch disks + scratchDisks, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "scratch_disks") + if err != nil { + return err + } + + if scratchDisks != nil && scratchDisks.RawString() != "" { + list, ok := scratchDisks.Value.([]interface{}) + if !ok { + return errors.New("failed to retrieve scratch disk Tosca Value: not expected type") + } + instance.ScratchDisks = make([]ScratchDisk, 0) + for _, n := range list { + v, ok := n.(map[string]interface{}) + if !ok { + return errors.New("failed to retrieve scratch disk map: not expected type") + } + for _, val := range v { + i, ok := val.(string) + if !ok { + return errors.New("failed to retrieve scratch disk interface value: not expected type") + } + scratch := ScratchDisk{Interface: i} + instance.ScratchDisks = append(instance.ScratchDisks, scratch) + } + } + } + // Add the compute instance commons.AddResource(infrastructure, "google_compute_instance", instance.Name, &instance) - // Add Persistent disks + // Attach Persistent disks err = addAttachedDisks(ctx, cfg, kv, deploymentID, nodeName, instanceName, instance.Name, infrastructure) if err != nil { return err diff --git a/prov/terraform/google/compute_instance_test.go b/prov/terraform/google/compute_instance_test.go index ab0dc696e..917238f5b 100644 --- a/prov/terraform/google/compute_instance_test.go +++ b/prov/terraform/google/compute_instance_test.go @@ -81,6 +81,10 @@ func testSimpleComputeInstance(t *testing.T, kv *api.KV, cfg config.Configuratio require.True(t, ok) assert.Equal(t, "centos", rex.Connection.User) assert.Equal(t, `${file("~/.ssh/yorc.pem")}`, rex.Connection.PrivateKey) + + require.Len(t, compute.ScratchDisks, 2, "Expected 2 scratch disks") + assert.Equal(t, "SCSI", compute.ScratchDisks[0].Interface, "SCSI interface expected for 1st scratch") + assert.Equal(t, "NVME", compute.ScratchDisks[1].Interface, "NVME interface expected for 2nd scratch") } func testSimpleComputeInstanceMissingMandatoryParameter(t *testing.T, kv *api.KV, cfg config.Configuration) { diff --git a/prov/terraform/google/consul_test.go b/prov/terraform/google/consul_test.go index 4d5187c5c..8d805f910 100644 --- a/prov/terraform/google/consul_test.go +++ b/prov/terraform/google/consul_test.go @@ -39,20 +39,5 @@ func TestRunConsulGooglePackageTests(t *testing.T) { t.Run("simpleComputeInstance", func(t *testing.T) { testSimpleComputeInstance(t, kv, cfg) }) - t.Run("simpleComputeInstanceMissingMandatoryParameter", func(t *testing.T) { - testSimpleComputeInstanceMissingMandatoryParameter(t, kv, cfg) - }) - t.Run("simpleComputeAddress", func(t *testing.T) { - testSimpleComputeAddress(t, kv, cfg) - }) - t.Run("simpleComputeInstanceWithAddress", func(t *testing.T) { - testSimpleComputeInstanceWithAddress(t, kv, srv, cfg) - }) - t.Run("simplePersistentDisk", func(t *testing.T) { - testSimplePersistentDisk(t, kv, cfg) - }) - t.Run("simpleComputeInstanceWithPersistentDisk", func(t *testing.T) { - testSimpleComputeInstanceWithPersistentDisk(t, kv, srv, cfg) - }) }) } diff --git a/prov/terraform/google/resources.go b/prov/terraform/google/resources.go index dd13485e4..add650018 100644 --- a/prov/terraform/google/resources.go +++ b/prov/terraform/google/resources.go @@ -30,6 +30,7 @@ type ComputeInstance struct { // ServiceAccounts is an array of at most one element ServiceAccounts []ServiceAccount `json:"service_account,omitempty"` Tags []string `json:"tags,omitempty"` + ScratchDisks []ScratchDisk `json:"scratch_disk,omitempty"` } // BootDisk represents the required boot disk for compute instance @@ -91,6 +92,11 @@ type EncryptionKey struct { SHA256 string `json:"sha256,omitempty"` } +// ScratchDisk represents an additional Compute instance local scratch disk +type ScratchDisk struct { + Interface string `json:"interface,omitempty"` +} + // PersistentDisk represents a Google persistent disk // See https://www.terraform.io/docs/providers/google/r/compute_disk.html type PersistentDisk struct { diff --git a/prov/terraform/google/testdata/simpleComputeInstance.yaml b/prov/terraform/google/testdata/simpleComputeInstance.yaml index 387fcf289..557530126 100644 --- a/prov/terraform/google/testdata/simpleComputeInstance.yaml +++ b/prov/terraform/google/testdata/simpleComputeInstance.yaml @@ -22,6 +22,9 @@ topology_template: service_account: "yorc@yorc.net" tags: "tag1, tag2" labels: "key1=value1, key2=value2" + scratch_disks: + - interface: SCSI + - interface: NVME capabilities: scalable: properties: From 0511654873fc7e944e2528257f4ac7d1893bc003 Mon Sep 17 00:00:00 2001 From: stephane benoist <29753229+stebenoist@users.noreply.github.com> Date: Fri, 12 Oct 2018 14:26:48 +0200 Subject: [PATCH 05/19] Create relationship yorc.relationships.google.AttachesTo with mode property Remove yorc.node.google.PersistentDisk mode proerty --- data/tosca/yorc-google-types.yml | 20 +++++++++++++------ deployments/definition_store.go | 13 ++++++++++++ prov/terraform/google/compute_instance.go | 6 ++++-- prov/terraform/google/consul_test.go | 15 ++++++++++++++ ...mpleComputeInstanceWithPersistentDisk.yaml | 8 +------- 5 files changed, 47 insertions(+), 15 deletions(-) diff --git a/data/tosca/yorc-google-types.yml b/data/tosca/yorc-google-types.yml index fce2f7250..216157cf9 100644 --- a/data/tosca/yorc-google-types.yml +++ b/data/tosca/yorc-google-types.yml @@ -33,6 +33,20 @@ data_types: required: false description: The disk interface to use for attaching the scratch disks; either SCSI or NVME. Defaults to SCSI. +relationship_types: + yorc.relationships.google.AttachesTo: + derived_from: tosca.relationships.AttachTo + description: > + This type represents an attachment relationship between two nodes. + For example, an AttachesTo relationship type would be used for attaching a storage node to a Compute node. + valid_target_types: [ tosca.capabilities.Attachment ] + properties: + mode: + type: string + description: > + The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. + required: false + node_types: yorc.nodes.google.Compute: derived_from: yorc.nodes.Compute @@ -264,11 +278,5 @@ node_types: description: > The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key. required: false - mode: - type: string - description: > - The mode in which to attach this disk, either READ_WRITE or READ_ONLY. - If not specified, the default is to attach the disk in READ_WRITE mode. - required: false diff --git a/deployments/definition_store.go b/deployments/definition_store.go index 586bd7f50..cea86c649 100644 --- a/deployments/definition_store.go +++ b/deployments/definition_store.go @@ -1103,6 +1103,19 @@ func fixAlienBlockStorages(ctx context.Context, kv *api.KV, deploymentID, nodeNa req.RelationshipProps["device"] = va } + // Get all requirement properties + kvps, _, err := kv.List(path.Join(attachReq, "properties"), nil) + if err != nil { + return errors.Wrapf(err, "Failed to fix Alien-specific BlockStorage %q", nodeName) + } + for _, kvp := range kvps { + va := &tosca.ValueAssignment{} + err := yaml.Unmarshal(kvp.Value, va) + if err != nil { + return errors.Wrapf(err, "Failed to fix Alien-specific BlockStorage %q", nodeName) + } + req.RelationshipProps[path.Base(kvp.Key)] = va + } newReqID, err := GetNbRequirementsForNode(kv, deploymentID, computeNodeName) if err != nil { return err diff --git a/prov/terraform/google/compute_instance.go b/prov/terraform/google/compute_instance.go index 6a13e333c..f62b94dad 100644 --- a/prov/terraform/google/compute_instance.go +++ b/prov/terraform/google/compute_instance.go @@ -325,7 +325,7 @@ func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, return err } - mode, err := deployments.GetStringNodeProperty(kv, deploymentID, volumeNodeName, "mode", true) + modeValue, err := deployments.GetRelationshipPropertyValueFromRequirement(kv, deploymentID, nodeName, requirementIndex, "mode") if err != nil { return err } @@ -350,11 +350,13 @@ func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, Disk: volumeID, Instance: fmt.Sprintf("${google_compute_instance.%s.name}", computeName), Zone: zone, - Mode: mode, } if deviceValue != nil && deviceValue.RawString() != "" { attachedDisk.DeviceName = deviceValue.RawString() } + if modeValue != nil && modeValue.RawString() != "" { + attachedDisk.Mode = modeValue.RawString() + } attachName := strings.ToLower(cfg.ResourcesPrefix + volumeNodeName + "-" + instanceName + "-attached-to-" + nodeName + "-" + instanceName) attachName = strings.Replace(attachName, "_", "-", -1) diff --git a/prov/terraform/google/consul_test.go b/prov/terraform/google/consul_test.go index 8d805f910..4d5187c5c 100644 --- a/prov/terraform/google/consul_test.go +++ b/prov/terraform/google/consul_test.go @@ -39,5 +39,20 @@ func TestRunConsulGooglePackageTests(t *testing.T) { t.Run("simpleComputeInstance", func(t *testing.T) { testSimpleComputeInstance(t, kv, cfg) }) + t.Run("simpleComputeInstanceMissingMandatoryParameter", func(t *testing.T) { + testSimpleComputeInstanceMissingMandatoryParameter(t, kv, cfg) + }) + t.Run("simpleComputeAddress", func(t *testing.T) { + testSimpleComputeAddress(t, kv, cfg) + }) + t.Run("simpleComputeInstanceWithAddress", func(t *testing.T) { + testSimpleComputeInstanceWithAddress(t, kv, srv, cfg) + }) + t.Run("simplePersistentDisk", func(t *testing.T) { + testSimplePersistentDisk(t, kv, cfg) + }) + t.Run("simpleComputeInstanceWithPersistentDisk", func(t *testing.T) { + testSimpleComputeInstanceWithPersistentDisk(t, kv, srv, cfg) + }) }) } diff --git a/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml b/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml index 9a871e6d9..8e4569a63 100644 --- a/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml +++ b/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml @@ -30,6 +30,7 @@ topology_template: type: tosca.relationships.AttachesTo properties: device: foo + mode: "READ_ONLY" capabilities: endpoint: properties: @@ -50,10 +51,3 @@ topology_template: zone: "europe-west1-b" size: "12 GB" device: foo - mode: "READ_ONLY" - requirements: - - attachToComputeAttach: - type_requirement: attachment - node: Comp - capability: tosca.capabilities.Attachment - relationship: tosca.relationships.AttachTo From 4c5793a9ab10ab81a2fe977c6e34728c438d9767 Mon Sep 17 00:00:00 2001 From: stephane benoist <29753229+stebenoist@users.noreply.github.com> Date: Fri, 12 Oct 2018 14:47:30 +0200 Subject: [PATCH 06/19] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e0cc0ca2..b7a24cc2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## UNRELEASED +### FEATURES + +* Support GCE Block storages. ([GH-82](https://github.com/ystia/yorc/issues/81)) + ## 3.1.0-M4 (October 08, 2018) ### DEPENDENCIES From 03b55cc2e1f97744712d8ac6aa2b2efe11f54605 Mon Sep 17 00:00:00 2001 From: stephane benoist <29753229+stebenoist@users.noreply.github.com> Date: Wed, 17 Oct 2018 09:52:25 +0200 Subject: [PATCH 07/19] Add unit test about property definition with default empty string --- deployments/consul_test.go | 3 +++ deployments/definition_store_test.go | 21 +++++++++++++++++++ .../testdata/issue_get_empty_prop_rel.yaml | 9 ++++++++ 3 files changed, 33 insertions(+) diff --git a/deployments/consul_test.go b/deployments/consul_test.go index 4e235c5f2..1a6cb7fe7 100644 --- a/deployments/consul_test.go +++ b/deployments/consul_test.go @@ -96,5 +96,8 @@ func TestRunConsulDeploymentsPackageTests(t *testing.T) { t.Run("TestOperationHost", func(t *testing.T) { testOperationHost(t, kv) }) + t.Run("testIssueGetEmptyPropOnRelationship", func(t *testing.T) { + testIssueGetEmptyPropOnRelationship(t, kv) + }) }) } diff --git a/deployments/definition_store_test.go b/deployments/definition_store_test.go index 2e58a76f1..4f2dd7039 100644 --- a/deployments/definition_store_test.go +++ b/deployments/definition_store_test.go @@ -765,6 +765,27 @@ func testIssueGetEmptyPropRel(t *testing.T, kv *api.KV) { require.Equal(t, "", results[0].Value) } +func testIssueGetEmptyPropOnRelationship(t *testing.T, kv *api.KV) { + // t.Parallel() + deploymentID := strings.Replace(t.Name(), "/", "_", -1) + err := StoreDeploymentDefinition(context.Background(), kv, deploymentID, "testdata/issue_get_empty_prop_rel.yaml") + require.Nil(t, err) + // First test operation outputs detection + + results, err := GetOperationInput(kv, deploymentID, "ValueAssignmentNode2", prov.Operation{ + Name: "configure.pre_configure_source", + ImplementedInType: "yorc.tests.relationships.ValueAssignmentConnectsTo", + ImplementationArtifact: "", + RelOp: prov.RelationshipOperation{ + IsRelationshipOperation: true, + RequirementIndex: "1", + TargetNodeName: "ValueAssignmentNode1", + }}, "input_empty_prop") + require.Nil(t, err) + require.Len(t, results, 1) + require.Equal(t, "", results[0].Value) +} + func testRelationshipWorkflow(t *testing.T, kv *api.KV) { // t.Parallel() deploymentID := strings.Replace(t.Name(), "/", "_", -1) diff --git a/deployments/testdata/issue_get_empty_prop_rel.yaml b/deployments/testdata/issue_get_empty_prop_rel.yaml index b4c995375..709e48163 100644 --- a/deployments/testdata/issue_get_empty_prop_rel.yaml +++ b/deployments/testdata/issue_get_empty_prop_rel.yaml @@ -166,6 +166,11 @@ node_types: relationship_types: yorc.tests.relationships.ValueAssignmentConnectsTo: derived_from: tosca.relationships.ConnectsTo + properties: + empty_prop: + type: string + required: false + default: "" interfaces: Configure: pre_configure_source: @@ -174,6 +179,9 @@ relationship_types: input_list: ["l1", "l2"] input_list_ex: - "le1" + + + - "le2" input_map: {"key1": "value1", "key2": "value2"} input_map_ex: @@ -188,6 +196,7 @@ relationship_types: input_propList_all: { get_property: [SOURCE, list] } input_propList_0_alien: { get_property: [SOURCE, "list[0]"] } input_propList_0_tosca: { get_property: [SOURCE, list, 0] } + input_empty_prop: { get_property: [SELF, empty_prop] } implementation: scripts/show_inputs.sh post_configure_source: inputs: From f12125e3d4c7312b8eb24cc3764d9e74718f7e09 Mon Sep 17 00:00:00 2001 From: stephane benoist <29753229+stebenoist@users.noreply.github.com> Date: Wed, 17 Oct 2018 17:09:39 +0200 Subject: [PATCH 08/19] Allow to retrieve logical volume device from google device ID --- prov/terraform/commons/resources.go | 8 ++ prov/terraform/google/compute_instance.go | 80 +++++++++++++------ .../terraform/google/compute_instance_test.go | 8 +- ...mpleComputeInstanceWithPersistentDisk.yaml | 2 - 4 files changed, 68 insertions(+), 30 deletions(-) diff --git a/prov/terraform/commons/resources.go b/prov/terraform/commons/resources.go index 2e3b96486..4ded222bc 100644 --- a/prov/terraform/commons/resources.go +++ b/prov/terraform/commons/resources.go @@ -82,6 +82,14 @@ type RemoteExec struct { Scripts []string `json:"scripts,omitempty"` } +// LocalExec allows to invoke a local executable after a resource is created. This invokes a process on the machine running Terraform, not on the resource +type LocalExec struct { + Command string `json:"command"` + WorkingDir string `json:"working_dir,omitempty"` + interpreter string `json:"interpreter,omitempty"` + environment string `json:"environment,omitempty"` +} + // A Connection allows to overwrite the way Terraform connects to a resource type Connection struct { ConnType string `json:"type,omitempty"` diff --git a/prov/terraform/google/compute_instance.go b/prov/terraform/google/compute_instance.go index f62b94dad..bac3fc89e 100644 --- a/prov/terraform/google/compute_instance.go +++ b/prov/terraform/google/compute_instance.go @@ -207,7 +207,7 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K commons.AddResource(infrastructure, "google_compute_instance", instance.Name, &instance) // Attach Persistent disks - err = addAttachedDisks(ctx, cfg, kv, deploymentID, nodeName, instanceName, instance.Name, infrastructure) + devices, err := addAttachedDisks(ctx, cfg, kv, deploymentID, nodeName, instanceName, instance.Name, infrastructure) if err != nil { return err } @@ -270,6 +270,34 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K commons.AddResource(infrastructure, "null_resource", instance.Name+"-ConnectionCheck", &nullResource) + // Retrieve devices + if len(devices) > 0 { + for _, dev := range devices { + devResource := commons.Resource{} + + // Remote exec to retrieve the logical device for google device ID and to redirect stdout to file + re := commons.RemoteExec{Inline: []string{fmt.Sprintf("readlink -f /dev/disk/by-id/%s > out-%s", dev, dev)}, + Connection: &commons.Connection{User: user, Host: accessIP, + PrivateKey: `${file("` + privateKeyFilePath + `")}`}} + devResource.Provisioners = make([]map[string]interface{}, 0) + provMap := make(map[string]interface{}) + provMap["remote-exec"] = re + devResource.Provisioners = append(devResource.Provisioners, provMap) + devResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", instance.Name+"-ConnectionCheck")} + commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-GetDevice-%s", instance.Name, dev), &devResource) + + // local exec to scp the stdout file locally + scpCommand := fmt.Sprintf("scp -i %s %s@%s:out-%s out-%s", privateKeyFilePath, user, accessIP, dev, dev) + loc := commons.LocalExec{Command: scpCommand} + locMap := make(map[string]interface{}) + locMap["local-exec"] = loc + locResource := commons.Resource{} + locResource.Provisioners = append(locResource.Provisioners, locMap) + locResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", fmt.Sprintf("%s-GetDevice-%s", instance.Name, dev))} + commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-CopyOut-%s", instance.Name, dev), &locResource) + } + } + return nil } @@ -301,45 +329,42 @@ func attributeLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName } } -func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, deploymentID, nodeName, instanceName, computeName string, infrastructure *commons.Infrastructure) error { +func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, deploymentID, nodeName, instanceName, computeName string, infrastructure *commons.Infrastructure) ([]string, error) { + devices := make([]string, 0) + storageKeys, err := deployments.GetRequirementsKeysByTypeForNode(kv, deploymentID, nodeName, "local_storage") if err != nil { - return err + return nil, err } for _, storagePrefix := range storageKeys { requirementIndex := deployments.GetRequirementIndexFromRequirementKey(storagePrefix) volumeNodeName, err := deployments.GetTargetNodeForRequirement(kv, deploymentID, nodeName, requirementIndex) if err != nil { - return err + return nil, err } log.Debugf("Volume attachment required form Volume named %s", volumeNodeName) - deviceValue, err := deployments.GetRelationshipPropertyValueFromRequirement(kv, deploymentID, nodeName, requirementIndex, "device") - if err != nil { - return err - } - zone, err := deployments.GetStringNodeProperty(kv, deploymentID, volumeNodeName, "zone", true) if err != nil { - return err + return nil, err } modeValue, err := deployments.GetRelationshipPropertyValueFromRequirement(kv, deploymentID, nodeName, requirementIndex, "mode") if err != nil { - return err + return nil, err } volumeIDValue, err := deployments.GetNodePropertyValue(kv, deploymentID, volumeNodeName, "volume_id") if err != nil { - return err + return nil, err } var volumeID string if volumeIDValue == nil || volumeIDValue.RawString() == "" { // Lookup for attribute volume_id volumeID, err = attributeLookup(ctx, kv, deploymentID, instanceName, volumeNodeName, "volume_id") if err != nil { - return err + return nil, err } } else { @@ -351,35 +376,42 @@ func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, Instance: fmt.Sprintf("${google_compute_instance.%s.name}", computeName), Zone: zone, } - if deviceValue != nil && deviceValue.RawString() != "" { - attachedDisk.DeviceName = deviceValue.RawString() - } if modeValue != nil && modeValue.RawString() != "" { attachedDisk.Mode = modeValue.RawString() } - attachName := strings.ToLower(cfg.ResourcesPrefix + volumeNodeName + "-" + instanceName + "-attached-to-" + nodeName + "-" + instanceName) + attachName := strings.ToLower(cfg.ResourcesPrefix + volumeNodeName + "-" + instanceName + "-to-" + nodeName + "-" + instanceName) attachName = strings.Replace(attachName, "_", "-", -1) + // attachName is used as device name to retrieve device attribute as logical volume name + attachedDisk.DeviceName = attachName commons.AddResource(infrastructure, "google_compute_attached_disk", attachName, attachedDisk) + // Add specific output for logical device + //commons.AddOutput(infrastructure, attachName, &commons.Output{Value: fmt.Sprintf("${file(\"%s\")}", "out-"+attachName)}) + // Add device + devices = append(devices, fmt.Sprintf("google-%s", attachName)) + // Provide Consul Keys consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} instancesPrefix := path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "instances") volumeDevConsulKey := commons.ConsulKey{ - Path: path.Join(instancesPrefix, volumeNodeName, instanceName, "attributes/device"), - Value: attachedDisk.DeviceName, + Path: path.Join(instancesPrefix, volumeNodeName, instanceName, "attributes/device"), + //Value: fmt.Sprintf("${local.%s}", attachName), + Value: "test", } relDevConsulKey := commons.ConsulKey{ - Path: path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", nodeName, requirementIndex, instanceName, "attributes/device"), - Value: attachedDisk.DeviceName, + Path: path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", nodeName, requirementIndex, instanceName, "attributes/device"), + //Value: fmt.Sprintf("${local.%s}", attachName), + Value: "test", } relVolDevConsulKey := commons.ConsulKey{ - Path: path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", volumeNodeName, requirementIndex, instanceName, "attributes/device"), - Value: attachedDisk.DeviceName, + Path: path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", volumeNodeName, requirementIndex, instanceName, "attributes/device"), + //Value: fmt.Sprintf("${local.%s}", attachName), + Value: "test", } consulKeys.Keys = append(consulKeys.Keys, volumeDevConsulKey, relDevConsulKey, relVolDevConsulKey) commons.AddResource(infrastructure, "consul_keys", attachName, &consulKeys) } - return nil + return devices, nil } diff --git a/prov/terraform/google/compute_instance_test.go b/prov/terraform/google/compute_instance_test.go index 917238f5b..a8bdcae61 100644 --- a/prov/terraform/google/compute_instance_test.go +++ b/prov/terraform/google/compute_instance_test.go @@ -160,12 +160,12 @@ func testSimpleComputeInstanceWithPersistentDisk(t *testing.T, kv *api.KV, srv1 instancesMap = infrastructure.Resource["google_compute_attached_disk"].(map[string]interface{}) require.Len(t, instancesMap, 1) - require.Contains(t, instancesMap, "bs1-0-attached-to-compute-0") - attachedDisk, ok := instancesMap["bs1-0-attached-to-compute-0"].(*ComputeAttachedDisk) - require.True(t, ok, "bs1-0-attached-to-compute-0 is not a ComputeAttachedDisk") + require.Contains(t, instancesMap, "bs1-0-to-compute-0") + attachedDisk, ok := instancesMap["bs1-0-to-compute-0"].(*ComputeAttachedDisk) + require.True(t, ok, "bs1-0-to-compute-0 is not a ComputeAttachedDisk") assert.Equal(t, "my_vol_id", attachedDisk.Disk) assert.Equal(t, "${google_compute_instance.compute-0.name}", attachedDisk.Instance) assert.Equal(t, "europe-west1-b", attachedDisk.Zone) - assert.Equal(t, "foo", attachedDisk.DeviceName) + assert.Equal(t, "bs1-0-to-compute-0", attachedDisk.DeviceName) assert.Equal(t, "READ_ONLY", attachedDisk.Mode) } diff --git a/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml b/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml index 8e4569a63..335b6a2a5 100644 --- a/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml +++ b/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml @@ -29,7 +29,6 @@ topology_template: relationship: type: tosca.relationships.AttachesTo properties: - device: foo mode: "READ_ONLY" capabilities: endpoint: @@ -50,4 +49,3 @@ topology_template: properties: zone: "europe-west1-b" size: "12 GB" - device: foo From 38cc6ba7a91ac42ba3d8cb9b3ad31b0a8ce51715 Mon Sep 17 00:00:00 2001 From: benoist-s Date: Thu, 18 Oct 2018 17:12:27 +0200 Subject: [PATCH 09/19] Still need to find a way to retrieve logical device in file with tf --- data/tosca/yorc-google-types.yml | 6 ++ prov/terraform/google/compute_instance.go | 119 ++++++++++++---------- 2 files changed, 73 insertions(+), 52 deletions(-) diff --git a/data/tosca/yorc-google-types.yml b/data/tosca/yorc-google-types.yml index 216157cf9..d98440040 100644 --- a/data/tosca/yorc-google-types.yml +++ b/data/tosca/yorc-google-types.yml @@ -46,6 +46,12 @@ relationship_types: description: > The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. required: false + attributes: + device: + type: string + description: > + The logical name of the device as exposed to the instance. + Note: A runtime property that gets set when the model gets instantiated by the orchestrator. node_types: yorc.nodes.google.Compute: diff --git a/prov/terraform/google/compute_instance.go b/prov/terraform/google/compute_instance.go index bac3fc89e..bcb5b3ec9 100644 --- a/prov/terraform/google/compute_instance.go +++ b/prov/terraform/google/compute_instance.go @@ -271,36 +271,68 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K commons.AddResource(infrastructure, "null_resource", instance.Name+"-ConnectionCheck", &nullResource) // Retrieve devices - if len(devices) > 0 { - for _, dev := range devices { - devResource := commons.Resource{} - - // Remote exec to retrieve the logical device for google device ID and to redirect stdout to file - re := commons.RemoteExec{Inline: []string{fmt.Sprintf("readlink -f /dev/disk/by-id/%s > out-%s", dev, dev)}, - Connection: &commons.Connection{User: user, Host: accessIP, - PrivateKey: `${file("` + privateKeyFilePath + `")}`}} - devResource.Provisioners = make([]map[string]interface{}, 0) - provMap := make(map[string]interface{}) - provMap["remote-exec"] = re - devResource.Provisioners = append(devResource.Provisioners, provMap) - devResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", instance.Name+"-ConnectionCheck")} - commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-GetDevice-%s", instance.Name, dev), &devResource) - - // local exec to scp the stdout file locally - scpCommand := fmt.Sprintf("scp -i %s %s@%s:out-%s out-%s", privateKeyFilePath, user, accessIP, dev, dev) - loc := commons.LocalExec{Command: scpCommand} - locMap := make(map[string]interface{}) - locMap["local-exec"] = loc - locResource := commons.Resource{} - locResource.Provisioners = append(locResource.Provisioners, locMap) - locResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", fmt.Sprintf("%s-GetDevice-%s", instance.Name, dev))} - commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-CopyOut-%s", instance.Name, dev), &locResource) - } - } + handleDeviceAttributes(infrastructure, &instance, devices, user, privateKeyFilePath, accessIP) return nil } +func handleDeviceAttributes(infrastructure *commons.Infrastructure, instance *ComputeInstance, devices map[string][]string, user, privateKeyFilePath, accessIP string) { + // Retrieve devices { + for dev, keyPaths := range devices { + devResource := commons.Resource{} + + // Remote exec to retrieve the logical device for google device ID and to redirect stdout to file + re := commons.RemoteExec{Inline: []string{fmt.Sprintf("readlink -f /dev/disk/by-id/%s > %s", dev, dev)}, + Connection: &commons.Connection{User: user, Host: accessIP, + PrivateKey: `${file("` + privateKeyFilePath + `")}`}} + devResource.Provisioners = make([]map[string]interface{}, 0) + provMap := make(map[string]interface{}) + provMap["remote-exec"] = re + devResource.Provisioners = append(devResource.Provisioners, provMap) + devResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", instance.Name+"-ConnectionCheck")} + commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-GetDevice-%s", instance.Name, dev), &devResource) + + // local exec to scp the stdout file locally + scpCommand := fmt.Sprintf("scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s %s@%s:~/%s %s", privateKeyFilePath, user, accessIP, dev, dev) + loc := commons.LocalExec{ + Command: scpCommand, + } + locMap := make(map[string]interface{}) + locMap["local-exec"] = loc + locResource := commons.Resource{} + locResource.Provisioners = append(locResource.Provisioners, locMap) + locResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", fmt.Sprintf("%s-GetDevice-%s", instance.Name, dev))} + commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-CopyOut-%s", instance.Name, dev), &locResource) + + // Remote exec to cleanup created file + cleanResource := commons.Resource{} + re = commons.RemoteExec{Inline: []string{fmt.Sprintf("rm -f %s", dev)}, + Connection: &commons.Connection{User: user, Host: accessIP, + PrivateKey: `${file("` + privateKeyFilePath + `")}`}} + cleanResource.Provisioners = make([]map[string]interface{}, 0) + m := make(map[string]interface{}) + m["remote-exec"] = re + cleanResource.Provisioners = append(devResource.Provisioners, m) + cleanResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", fmt.Sprintf("%s-CopyOut-%s", instance.Name, dev))} + commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-cleanup-%s", instance.Name, dev), &cleanResource) + + consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} + consulKeys.DependsOn = []string{fmt.Sprintf("null_resource.%s", fmt.Sprintf("%s-CopyOut-%s", instance.Name, dev))} + //FIXME still need to get device from file ! + for _, keyPath := range keyPaths { + k := commons.ConsulKey{ + Path: keyPath, + //Value: "${data.external.getContent.result.content}", + Value: "toBeDone", + } + log.Debugf(k.Path) + consulKeys.Keys = append(consulKeys.Keys, k) + //commons.AddOutput(infrastructure, dev, &commons.Output{Value: fmt.Sprintf("${file(\"%s\")}", dev)}) + } + commons.AddResource(infrastructure, "consul_keys", dev, &consulKeys) + } +} + func attributeLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName, nodeName, attribute string) (string, error) { log.Debugf("Attribute:%q lookup for deploymentID:%q, node name:%q, instance:%q", attribute, deploymentID, nodeName, instanceName) res := make(chan string, 1) @@ -329,8 +361,8 @@ func attributeLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName } } -func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, deploymentID, nodeName, instanceName, computeName string, infrastructure *commons.Infrastructure) ([]string, error) { - devices := make([]string, 0) +func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, deploymentID, nodeName, instanceName, computeName string, infrastructure *commons.Infrastructure) (map[string][]string, error) { + devices := make(map[string][]string, 0) storageKeys, err := deployments.GetRequirementsKeysByTypeForNode(kv, deploymentID, nodeName, "local_storage") if err != nil { @@ -386,32 +418,15 @@ func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, attachedDisk.DeviceName = attachName commons.AddResource(infrastructure, "google_compute_attached_disk", attachName, attachedDisk) - // Add specific output for logical device - //commons.AddOutput(infrastructure, attachName, &commons.Output{Value: fmt.Sprintf("${file(\"%s\")}", "out-"+attachName)}) - // Add device - devices = append(devices, fmt.Sprintf("google-%s", attachName)) - - // Provide Consul Keys - consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} + // Provide relative consul key paths instancesPrefix := path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "instances") - - volumeDevConsulKey := commons.ConsulKey{ - Path: path.Join(instancesPrefix, volumeNodeName, instanceName, "attributes/device"), - //Value: fmt.Sprintf("${local.%s}", attachName), - Value: "test", - } - relDevConsulKey := commons.ConsulKey{ - Path: path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", nodeName, requirementIndex, instanceName, "attributes/device"), - //Value: fmt.Sprintf("${local.%s}", attachName), - Value: "test", - } - relVolDevConsulKey := commons.ConsulKey{ - Path: path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", volumeNodeName, requirementIndex, instanceName, "attributes/device"), - //Value: fmt.Sprintf("${local.%s}", attachName), - Value: "test", + keyPaths := []string{ + path.Join(instancesPrefix, volumeNodeName, instanceName, "attributes/device"), + path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", nodeName, requirementIndex, instanceName, "attributes/device"), + path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", volumeNodeName, requirementIndex, instanceName, "attributes/device"), } - consulKeys.Keys = append(consulKeys.Keys, volumeDevConsulKey, relDevConsulKey, relVolDevConsulKey) - commons.AddResource(infrastructure, "consul_keys", attachName, &consulKeys) + // Add device and related consul keys + devices[fmt.Sprintf("google-%s", attachName)] = keyPaths } return devices, nil } From 795915c9213ed15e650f39d1666da7d460554173 Mon Sep 17 00:00:00 2001 From: stephane benoist <29753229+stebenoist@users.noreply.github.com> Date: Fri, 19 Oct 2018 13:47:22 +0200 Subject: [PATCH 10/19] Retrieve logical volume device and set related attributes --- prov/terraform/commons/generator.go | 3 ++ prov/terraform/executor.go | 40 ++++++++++++++++++- prov/terraform/google/compute_instance.go | 38 ++++++------------ .../terraform/google/compute_instance_test.go | 14 ++++++- 4 files changed, 68 insertions(+), 27 deletions(-) diff --git a/prov/terraform/commons/generator.go b/prov/terraform/commons/generator.go index 9b5b80a24..c16635a48 100644 --- a/prov/terraform/commons/generator.go +++ b/prov/terraform/commons/generator.go @@ -21,6 +21,9 @@ import ( "github.com/ystia/yorc/config" ) +// FileOutputPrefix is the prefix to identify file output +const FileOutputPrefix = "file:" + // A Generator is used to generate the Terraform infrastructure for a given TOSCA node type Generator interface { // GenerateTerraformInfraForNode generates the Terraform infrastructure file for the given node. diff --git a/prov/terraform/executor.go b/prov/terraform/executor.go index def6a93b5..c80ca8b98 100644 --- a/prov/terraform/executor.go +++ b/prov/terraform/executor.go @@ -28,10 +28,13 @@ import ( "github.com/ystia/yorc/events" "github.com/ystia/yorc/helper/consulutil" "github.com/ystia/yorc/helper/executil" + "github.com/ystia/yorc/log" "github.com/ystia/yorc/prov" "github.com/ystia/yorc/prov/terraform/commons" "github.com/ystia/yorc/tasks" "github.com/ystia/yorc/tosca" + "io/ioutil" + "path" ) type defaultExecutor struct { @@ -165,6 +168,17 @@ func (e *defaultExecutor) retrieveOutputs(ctx context.Context, kv *api.KV, infra if len(outputs) == 0 { return nil } + + // Filter and handle file output + filteredOutputs, err := e.handleFileOutputs(ctx, kv, infraPath, outputs) + if err != nil { + return err + } + + if len(filteredOutputs) == 0 { + return nil + } + type tfJSONOutput struct { Sensitive bool `json:"sensitive,omitempty"` Type string `json:"type,omitempty"` @@ -183,7 +197,7 @@ func (e *defaultExecutor) retrieveOutputs(ctx context.Context, kv *api.KV, infra if err != nil { return errors.Wrap(err, "Failed to retrieve the infrastructure outputs via terraform") } - for outPath, outName := range outputs { + for outPath, outName := range filteredOutputs { output, ok := outputsList[outName] if !ok { return errors.Errorf("failed to retrieve output %q in terraform result", outName) @@ -197,6 +211,30 @@ func (e *defaultExecutor) retrieveOutputs(ctx context.Context, kv *api.KV, infra return nil } +// File outputs are outputs that terraform can't resolve and which need to be retrieved in local files +func (e *defaultExecutor) handleFileOutputs(ctx context.Context, kv *api.KV, infraPath string, outputs map[string]string) (map[string]string, error) { + filteredOutputs := make(map[string]string, 0) + for k, v := range outputs { + if strings.HasPrefix(v, commons.FileOutputPrefix) { + file := strings.TrimPrefix(v, commons.FileOutputPrefix) + log.Debugf("Handle file output:%q", file) + content, err := ioutil.ReadFile(path.Join(infraPath, file)) + if err != nil { + return nil, errors.Wrapf(err, "Failed to retrieve file output from file:%q", file) + } + contentStr := strings.Trim(string(content), "\r\n") + // Store keyValue in Consul + _, err = kv.Put(&api.KVPair{Key: k, Value: []byte(contentStr)}, nil) + if err != nil { + return nil, errors.Wrap(err, consulutil.ConsulGenericErrMsg) + } + } else { + filteredOutputs[k] = v + } + } + return filteredOutputs, nil +} + func (e *defaultExecutor) applyInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, outputs map[string]string, env []string) error { // Remote Configuration for Terraform State to store it in the Consul KV store diff --git a/prov/terraform/google/compute_instance.go b/prov/terraform/google/compute_instance.go index bcb5b3ec9..3c0a5eea1 100644 --- a/prov/terraform/google/compute_instance.go +++ b/prov/terraform/google/compute_instance.go @@ -207,7 +207,7 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K commons.AddResource(infrastructure, "google_compute_instance", instance.Name, &instance) // Attach Persistent disks - devices, err := addAttachedDisks(ctx, cfg, kv, deploymentID, nodeName, instanceName, instance.Name, infrastructure) + devices, err := addAttachedDisks(ctx, cfg, kv, deploymentID, nodeName, instanceName, instance.Name, infrastructure, outputs) if err != nil { return err } @@ -276,9 +276,9 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K return nil } -func handleDeviceAttributes(infrastructure *commons.Infrastructure, instance *ComputeInstance, devices map[string][]string, user, privateKeyFilePath, accessIP string) { +func handleDeviceAttributes(infrastructure *commons.Infrastructure, instance *ComputeInstance, devices []string, user, privateKeyFilePath, accessIP string) { // Retrieve devices { - for dev, keyPaths := range devices { + for _, dev := range devices { devResource := commons.Resource{} // Remote exec to retrieve the logical device for google device ID and to redirect stdout to file @@ -318,18 +318,6 @@ func handleDeviceAttributes(infrastructure *commons.Infrastructure, instance *Co consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} consulKeys.DependsOn = []string{fmt.Sprintf("null_resource.%s", fmt.Sprintf("%s-CopyOut-%s", instance.Name, dev))} - //FIXME still need to get device from file ! - for _, keyPath := range keyPaths { - k := commons.ConsulKey{ - Path: keyPath, - //Value: "${data.external.getContent.result.content}", - Value: "toBeDone", - } - log.Debugf(k.Path) - consulKeys.Keys = append(consulKeys.Keys, k) - //commons.AddOutput(infrastructure, dev, &commons.Output{Value: fmt.Sprintf("${file(\"%s\")}", dev)}) - } - commons.AddResource(infrastructure, "consul_keys", dev, &consulKeys) } } @@ -361,8 +349,8 @@ func attributeLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName } } -func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, deploymentID, nodeName, instanceName, computeName string, infrastructure *commons.Infrastructure) (map[string][]string, error) { - devices := make(map[string][]string, 0) +func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, deploymentID, nodeName, instanceName, computeName string, infrastructure *commons.Infrastructure, outputs map[string]string) ([]string, error) { + devices := make([]string, 0) storageKeys, err := deployments.GetRequirementsKeysByTypeForNode(kv, deploymentID, nodeName, "local_storage") if err != nil { @@ -418,15 +406,15 @@ func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, attachedDisk.DeviceName = attachName commons.AddResource(infrastructure, "google_compute_attached_disk", attachName, attachedDisk) - // Provide relative consul key paths + // Provide file outputs for device attributes which can't be resolved with Terraform + device := fmt.Sprintf("google-%s", attachName) + outputDeviceVal := commons.FileOutputPrefix + device instancesPrefix := path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "instances") - keyPaths := []string{ - path.Join(instancesPrefix, volumeNodeName, instanceName, "attributes/device"), - path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", nodeName, requirementIndex, instanceName, "attributes/device"), - path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", volumeNodeName, requirementIndex, instanceName, "attributes/device"), - } - // Add device and related consul keys - devices[fmt.Sprintf("google-%s", attachName)] = keyPaths + outputs[path.Join(instancesPrefix, volumeNodeName, instanceName, "attributes/device")] = outputDeviceVal + outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", nodeName, requirementIndex, instanceName, "attributes/device")] = outputDeviceVal + outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", volumeNodeName, requirementIndex, instanceName, "attributes/device")] = outputDeviceVal + // Add device + devices = append(devices, device) } return devices, nil } diff --git a/prov/terraform/google/compute_instance_test.go b/prov/terraform/google/compute_instance_test.go index a8bdcae61..484a4b9c9 100644 --- a/prov/terraform/google/compute_instance_test.go +++ b/prov/terraform/google/compute_instance_test.go @@ -141,7 +141,8 @@ func testSimpleComputeInstanceWithPersistentDisk(t *testing.T, kv *api.KV, srv1 infrastructure := commons.Infrastructure{} g := googleGenerator{} - err := g.generateComputeInstance(context.Background(), kv, cfg, deploymentID, "Compute", "0", 0, &infrastructure, make(map[string]string)) + outputs := make(map[string]string, 0) + err := g.generateComputeInstance(context.Background(), kv, cfg, deploymentID, "Compute", "0", 0, &infrastructure, outputs) require.NoError(t, err, "Unexpected error attempting to generate compute instance for %s", deploymentID) require.Len(t, infrastructure.Resource["google_compute_instance"], 1, "Expected one compute instance") @@ -168,4 +169,15 @@ func testSimpleComputeInstanceWithPersistentDisk(t *testing.T, kv *api.KV, srv1 assert.Equal(t, "europe-west1-b", attachedDisk.Zone) assert.Equal(t, "bs1-0-to-compute-0", attachedDisk.DeviceName) assert.Equal(t, "READ_ONLY", attachedDisk.Mode) + + require.Contains(t, infrastructure.Resource, "null_resource") + require.Len(t, infrastructure.Resource["null_resource"], 4) + + require.Len(t, outputs, 3, "three outputs are expected") + require.Contains(t, outputs, path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/instances/", "BS1", "0", "attributes/device"), "expected instances attribute output") + require.Contains(t, outputs, path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/relationship_instances/", "Compute", "0", "0", "attributes/device"), "expected relationship attribute output for Compute") + require.Contains(t, outputs, path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/relationship_instances/", "BS1", "0", "0", "attributes/device"), "expected relationship attribute output for Block storage") + require.Equal(t, "file:google-bs1-0-to-compute-0", outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/instances/", "BS1", "0", "attributes/device")], "output file value expected") + require.Equal(t, "file:google-bs1-0-to-compute-0", outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/relationship_instances/", "Compute", "0", "0", "attributes/device")], "output file value expected") + require.Equal(t, "file:google-bs1-0-to-compute-0", outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/relationship_instances/", "BS1", "0", "0", "attributes/device")], "output file value expected") } From 90b53b5a6f995c223bc70ce79ef1a5b844329ef9 Mon Sep 17 00:00:00 2001 From: Albertin Loic Date: Tue, 23 Oct 2018 15:26:06 +0200 Subject: [PATCH 11/19] Allow to run multiple workflows and custom commands in //Now terraform and ansible recipes are stored under a path containingthe execution task id.By default those files are deleted after the execution by config optionsare available to keep them --- commands/server.go | 6 +++ config/config.go | 2 + doc/configuration.rst | 34 ++++++++++++----- prov/ansible/execution.go | 19 ++++++++-- prov/terraform/aws/generator.go | 11 ++---- prov/terraform/commons/generator.go | 4 +- prov/terraform/executor.go | 53 +++++++++++++++++---------- prov/terraform/google/generator.go | 11 ++---- prov/terraform/openstack/generator.go | 11 ++---- prov/terraform/openstack/init.go | 2 +- tasks/tasks.go | 30 ++++++++------- tasks/tasks_test.go | 14 +++---- 12 files changed, 116 insertions(+), 81 deletions(-) diff --git a/commands/server.go b/commands/server.go index 59fb2bf16..a6502d426 100644 --- a/commands/server.go +++ b/commands/server.go @@ -63,6 +63,7 @@ var ansibleConfiguration = map[string]interface{}{ "ansible.keep_operation_remote_path": config.DefaultKeepOperationRemotePath, "ansible.archive_artifacts": config.DefaultArchiveArtifacts, "ansible.cache_facts": config.DefaultCacheFacts, + "ansible.keep_generated_recipes": false, } var consulConfiguration = map[string]interface{}{ @@ -84,6 +85,7 @@ var terraformConfiguration = map[string]interface{}{ "terraform.aws_plugin_version_constraint": tfAWSPluginVersionConstraint, "terraform.google_plugin_version_constraint": tfGooglePluginVersionConstraint, "terraform.openstack_plugin_version_constraint": tfOpenStackPluginVersionConstraint, + "terraform.keep_generated_files": false, } var cfgFile string @@ -252,6 +254,10 @@ func setConfig() { serverCmd.PersistentFlags().Bool("keep_operation_remote_path", config.DefaultKeepOperationRemotePath, "Define wether the path created to store artifacts on the nodes will be removed at the end of workflow executions.") serverCmd.PersistentFlags().Bool("ansible_archive_artifacts", config.DefaultArchiveArtifacts, "Define wether artifacts should be ./archived before being copied on remote nodes (requires tar to be installed on remote nodes).") serverCmd.PersistentFlags().Bool("ansible_cache_facts", config.DefaultCacheFacts, "Define wether Ansible facts (useful variables about remote hosts) should be cached.") + serverCmd.PersistentFlags().Bool("ansible_keep_generated_recipes", false, "Define if Yorc should not delete generated Ansible recipes") + + //Flags definition for Terraform + serverCmd.PersistentFlags().Bool("terraform_keep_generated_files", false, "Define if Yorc should not delete generated Terraform infrastructures files") //Flags definition for Terraform serverCmd.PersistentFlags().StringP("terraform_plugins_dir", "", "", "The directory where to find Terraform plugins") diff --git a/config/config.go b/config/config.go index ae8fd39ac..36bb12e64 100644 --- a/config/config.go +++ b/config/config.go @@ -115,6 +115,7 @@ type Ansible struct { ConnectionRetries int `mapstructure:"connection_retries"` OperationRemoteBaseDir string `mapstructure:"operation_remote_base_dir"` KeepOperationRemotePath bool `mapstructure:"keep_operation_remote_path"` + KeepGeneratedRecipes bool `mapstructure:"keep_generated_recipes"` ArchiveArtifacts bool `mapstructure:"archive_artifacts"` CacheFacts bool `mapstructure:"cache_facts"` HostedOperations HostedOperations `mapstructure:"hosted_operations"` @@ -151,6 +152,7 @@ type Terraform struct { AWSPluginVersionConstraint string `mapstructure:"aws_plugin_version_constraint"` GooglePluginVersionConstraint string `mapstructure:"google_plugin_version_constraint"` OpenStackPluginVersionConstraint string `mapstructure:"openstack_plugin_version_constraint"` + KeepGeneratedFiles bool `mapstructure:"keep_generated_files"` } // DynamicMap allows to store configuration parameters that are not known in advance. diff --git a/doc/configuration.rst b/doc/configuration.rst index 1346837d9..89948cc8e 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -46,6 +46,10 @@ Globals Command-line options * ``--ansible_archive_artifacts``: If set to true, archives operation bash/python scripts locally, copies this archive and unarchives it on remote hosts (requires tar to be installed on remote hosts), to avoid multiple time consuming remote copy operations of individual scripts (false by default: no archive). +.. _option_ansible_keep_generated_recipes_cmd: + + * ``--ansible_keep_generated_recipes``: If set to true, generated Ansible recipes on Yorc server are not delete. (false by default: generated recipes are deleted). + .. _option_operation_remote_base_dir_cmd: * ``--operation_remote_base_dir``: Specify an alternative working directory for Ansible on provisioned Compute. @@ -109,6 +113,9 @@ Globals Command-line options .. _option_terraform_openstack_plugin_version_constraint_cmd: * ``--terraform_openstack_plugin_version_constraint``: Specify the Terraform OpenStack plugin version constraint. Default one compatible with our source code is ``"~> 1.9"``. If you choose another, it's at your own risk. See https://www.terraform.io/docs/configuration/providers.html#provider-versions for more information. +.. _option_terraform_keep_generated_files_cmd: + + * ``--terraform_keep_generated_files``: If set to true, generated Terraform infrastructures files on Yorc server are not delete. (false by default: generated files are deleted). .. _option_pub_routines_cmd: @@ -336,6 +343,10 @@ All available configuration options for Ansible are: * ``keep_operation_remote_path``: Equivalent to :ref:`--keep_operation_remote_path ` command-line flag. +.. _option_ansible_keep_generated_recipes_cfg: + + * ``keep_generated_recipes``: Equivalent to :ref:`--ansible_keep_generated_recipes ` command-line flag. + .. _option_ansible_sandbox_hosted_ops_cfg: * ``hosted_operations``: This is a complex structure that allow to define the behavior of a Yorc server when it executes an hosted operation. @@ -478,7 +489,7 @@ Below is an example of configuration file with Terraform configuration options. } } -All available configuration options for Consul are: +All available configuration options for Terraform are: .. _option_plugins_dir_cfg: @@ -500,6 +511,11 @@ All available configuration options for Consul are: * ``openstack_plugin_version_constraint``: Equivalent to :ref:`--terraform_openstack_plugin_version_constraint ` command-line flag. +.. _option_terraform_keep_generated_files_cfg: + + * ``keep_generated_files``: Equivalent to :ref:`--terraform_keep_generated_files ` command-line flag. + + .. _yorc_config_file_telemetry_section: Telemetry configuration @@ -647,6 +663,10 @@ Environment variables * ``YORC_ANSIBLE_ARCHIVE_ARTIFACTS``: Equivalent to :ref:`--ansible_archive_artifacts ` command-line flag. +.. _option_ansible_keep_generated_recipes_env: + + * ``YORC_ANSIBLE_KEEP_GENERATED_RECIPES``: Equivalent to :ref:`--ansible_keep_generated_recipes ` command-line flag. + .. _option_operation_remote_base_dir_env: * ``YORC_OPERATION_REMOTE_BASE_DIR``: Equivalent to :ref:`--operation_remote_base_dir ` command-line flag. @@ -751,14 +771,6 @@ Environment variables * ``YORC_LOG``: If set to ``1`` or ``DEBUG``, enables debug logging for Yorc. -.. _option_aws_access_key: - - * ``YORC_INFRA_AWS_ACCESS_KEY``: The AWS access key credential. - -.. _option_aws_secret_key: - - * ``YORC_INFRA_AWS_SECRET_KEY``: The AWS secret key credential. - .. _option_terraform_plugins_dir_env: * ``YORC_TERRAFORM_PLUGINS_DIR``: Equivalent to :ref:`--terraform_plugins_dir ` command-line flag. @@ -779,6 +791,10 @@ Environment variables * ``YORC_TERRAFORM_OPENSTACK_PLUGIN_VERSION_CONSTRAINT``: Equivalent to :ref:`--terraform_openstack_plugin_version_constraint ` command-line flag. +.. _option_terraform_keep_generated_files_env: + + * ``YORC_TERRAFORM_KEEP_GENERATED_FILES``: Equivalent to :ref:`--terraform_keep_generated_files ` command-line flag. + Infrastructures configuration ----------------------------- diff --git a/prov/ansible/execution.go b/prov/ansible/execution.go index 4fe8d62a5..9f05df749 100644 --- a/prov/ansible/execution.go +++ b/prov/ansible/execution.go @@ -621,7 +621,7 @@ func (e *executionCommon) resolveOperationOutputPath() error { } else { //If we are with an expression type {get_operation_output : [ SELF, ...]} in a relationship we store the result in the corresponding relationship instance if oof.Operands[0].String() == "SELF" && e.operation.RelOp.IsRelationshipOperation { - relationShipPrefix := filepath.Join("relationship_instances", e.NodeName, e.operation.RelOp.RequirementIndex, instanceID) + relationShipPrefix := path.Join("relationship_instances", e.NodeName, e.operation.RelOp.RequirementIndex, instanceID) e.Outputs[outputVariableName+"_"+fmt.Sprint(b)] = path.Join(relationShipPrefix, "outputs", interfaceName, operationName, outputVariableName) } else if oof.Operands[0].String() == "HOST" { // In this case we continue because the parsing has change this type on {get_operation_output : [ SELF, ...]} on the host node @@ -799,11 +799,11 @@ func (e *executionCommon) executeWithCurrentInstance(ctx context.Context, retry return err } - var ansibleRecipePath string + ansibleRecipePath := filepath.Join(ansiblePath, e.taskID, e.NodeName) if e.operation.RelOp.IsRelationshipOperation { - ansibleRecipePath = filepath.Join(ansiblePath, e.NodeName, e.relationshipType, e.operation.RelOp.TargetRelationship, e.operation.Name, currentInstance) + ansibleRecipePath = filepath.Join(ansibleRecipePath, e.relationshipType, e.operation.RelOp.TargetRelationship, e.operation.Name, currentInstance) } else { - ansibleRecipePath = filepath.Join(ansiblePath, e.NodeName, e.operation.Name, currentInstance) + ansibleRecipePath = filepath.Join(ansibleRecipePath, e.operation.Name, currentInstance) } if err = os.RemoveAll(ansibleRecipePath); err != nil { @@ -812,6 +812,17 @@ func (e *executionCommon) executeWithCurrentInstance(ctx context.Context, retry events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelERROR, e.deploymentID).RegisterAsString(err.Error()) return err } + + defer func() { + if !e.cfg.Ansible.KeepGeneratedRecipes { + err := os.RemoveAll(ansibleRecipePath) + if err != nil { + err = errors.Wrapf(err, "Failed to remove ansible recipe directory %q for node %q operation %q", ansibleRecipePath, e.NodeName, e.operation.Name) + log.Debugf("%+v", err) + events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelERROR, e.deploymentID).RegisterAsString(err.Error()) + } + } + }() ansibleHostVarsPath := filepath.Join(ansibleRecipePath, "host_vars") if err = os.MkdirAll(ansibleHostVarsPath, 0775); err != nil { events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelERROR, e.deploymentID).RegisterAsString(err.Error()) diff --git a/prov/terraform/aws/generator.go b/prov/terraform/aws/generator.go index 5c82b2ab2..03620c85d 100644 --- a/prov/terraform/aws/generator.go +++ b/prov/terraform/aws/generator.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "os" "path" "path/filepath" @@ -37,7 +36,7 @@ const infrastructureName = "aws" type awsGenerator struct { } -func (g *awsGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName string) (bool, map[string]string, []string, error) { +func (g *awsGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, map[string]string, []string, error) { log.Debugf("Generating infrastructure for deployment with id %s", deploymentID) cClient, err := cfg.GetConsulClient() if err != nil { @@ -141,13 +140,9 @@ func (g *awsGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg co if err != nil { return false, nil, nil, errors.Wrap(err, "Failed to generate JSON of terraform Infrastructure description") } - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", fmt.Sprint(deploymentID), "infra", nodeName) - if err = os.MkdirAll(infraPath, 0775); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to create infrastructure working directory %q", infraPath) - } - if err = ioutil.WriteFile(filepath.Join(infraPath, "infra.tf.json"), jsonInfra, 0664); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infraPath, "infra.tf.json")) + if err = ioutil.WriteFile(filepath.Join(infrastructurePath, "infra.tf.json"), jsonInfra, 0664); err != nil { + return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infrastructurePath, "infra.tf.json")) } log.Debugf("Infrastructure generated for deployment with id %s", deploymentID) diff --git a/prov/terraform/commons/generator.go b/prov/terraform/commons/generator.go index 9b5b80a24..00ae3343d 100644 --- a/prov/terraform/commons/generator.go +++ b/prov/terraform/commons/generator.go @@ -29,8 +29,8 @@ type Generator interface { // GenerateTerraformInfraForNode can also return a map of outputs names indexed by consul keys into which the outputs results should be stored. // And a list of environment variables in form "key=value" to be added to the current process environment when running terraform commands. // This is particularly useful for adding secrets that should not be in tf files. - GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName string) (bool, map[string]string, []string, error) + GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, map[string]string, []string, error) } // PreDestroyInfraCallback is a function that is call before destroying an infrastructure. If it returns false the node will not be destroyed. -type PreDestroyInfraCallback func(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string) (bool, error) +type PreDestroyInfraCallback func(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, error) diff --git a/prov/terraform/executor.go b/prov/terraform/executor.go index def6a93b5..580837158 100644 --- a/prov/terraform/executor.go +++ b/prov/terraform/executor.go @@ -28,6 +28,7 @@ import ( "github.com/ystia/yorc/events" "github.com/ystia/yorc/helper/consulutil" "github.com/ystia/yorc/helper/executil" + "github.com/ystia/yorc/log" "github.com/ystia/yorc/prov" "github.com/ystia/yorc/prov/terraform/commons" "github.com/ystia/yorc/tasks" @@ -65,32 +66,46 @@ func (e *defaultExecutor) ExecDelegate(ctx context.Context, cfg config.Configura if err != nil { return err } - + infrastructurePath := filepath.Join(cfg.WorkingDirectory, "deployments", deploymentID, "terraform", taskID, nodeName) + if err = os.MkdirAll(infrastructurePath, 0775); err != nil { + return errors.Wrapf(err, "Failed to create infrastructure working directory %q", infrastructurePath) + } + defer func() { + if !cfg.Terraform.KeepGeneratedFiles { + err := os.RemoveAll(infrastructurePath) + if err != nil { + err = errors.Wrapf(err, "Failed to remove Terraform infrastructure directory %q for node %q operation %q", infrastructurePath, nodeName, delegateOperation) + log.Debugf("%+v", err) + events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelERROR, deploymentID).RegisterAsString(err.Error()) + } + } + }() op := strings.ToLower(delegateOperation) switch { case op == "install": - err = e.installNode(ctx, kv, cfg, deploymentID, nodeName, instances) + err = e.installNode(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, instances) case op == "uninstall": - err = e.uninstallNode(ctx, kv, cfg, deploymentID, nodeName, instances) + err = e.uninstallNode(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, instances) default: return errors.Errorf("Unsupported operation %q", delegateOperation) } return err } -func (e *defaultExecutor) installNode(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, instances []string) error { +func (e *defaultExecutor) installNode(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, instances []string) error { for _, instance := range instances { err := deployments.SetInstanceStateWithContextualLogs(events.AddLogOptionalFields(ctx, events.LogOptionalFields{events.InstanceID: instance}), kv, deploymentID, nodeName, instance, tosca.NodeStateCreating) if err != nil { return err } } - infraGenerated, outputs, env, err := e.generator.GenerateTerraformInfraForNode(ctx, cfg, deploymentID, nodeName) + + infraGenerated, outputs, env, err := e.generator.GenerateTerraformInfraForNode(ctx, cfg, deploymentID, nodeName, infrastructurePath) if err != nil { return err } if infraGenerated { - if err = e.applyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, outputs, env); err != nil { + if err = e.applyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, outputs, env); err != nil { return err } } @@ -103,19 +118,19 @@ func (e *defaultExecutor) installNode(ctx context.Context, kv *api.KV, cfg confi return nil } -func (e *defaultExecutor) uninstallNode(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, instances []string) error { +func (e *defaultExecutor) uninstallNode(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, instances []string) error { for _, instance := range instances { err := deployments.SetInstanceStateWithContextualLogs(events.AddLogOptionalFields(ctx, events.LogOptionalFields{events.InstanceID: instance}), kv, deploymentID, nodeName, instance, tosca.NodeStateDeleting) if err != nil { return err } } - infraGenerated, outputs, env, err := e.generator.GenerateTerraformInfraForNode(ctx, cfg, deploymentID, nodeName) + infraGenerated, outputs, env, err := e.generator.GenerateTerraformInfraForNode(ctx, cfg, deploymentID, nodeName, infrastructurePath) if err != nil { return err } if infraGenerated { - if err = e.destroyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, outputs, env); err != nil { + if err = e.destroyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, outputs, env); err != nil { return err } } @@ -128,9 +143,8 @@ func (e *defaultExecutor) uninstallNode(ctx context.Context, kv *api.KV, cfg con return nil } -func (e *defaultExecutor) remoteConfigInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, env []string) error { +func (e *defaultExecutor) remoteConfigInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, env []string) error { events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, deploymentID).RegisterAsString("Remote configuring the infrastructure") - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", deploymentID, "infra", nodeName) var cmd *executil.Cmd // Use pre-installed Terraform providers plugins if plugins directory exists // https://www.terraform.io/guides/running-terraform-in-automation.html#pre-installed-plugins @@ -140,7 +154,7 @@ func (e *defaultExecutor) remoteConfigInfrastructure(ctx context.Context, kv *ap cmd = executil.Command(ctx, "terraform", "init") } - cmd.Dir = infraPath + cmd.Dir = infrastructurePath cmd.Env = mergeEnvironments(env) errbuf := events.NewBufferedLogEntryWriter() out := events.NewBufferedLogEntryWriter() @@ -197,17 +211,16 @@ func (e *defaultExecutor) retrieveOutputs(ctx context.Context, kv *api.KV, infra return nil } -func (e *defaultExecutor) applyInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, outputs map[string]string, env []string) error { +func (e *defaultExecutor) applyInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, outputs map[string]string, env []string) error { // Remote Configuration for Terraform State to store it in the Consul KV store - if err := e.remoteConfigInfrastructure(ctx, kv, cfg, deploymentID, nodeName, env); err != nil { + if err := e.remoteConfigInfrastructure(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, env); err != nil { return err } events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, deploymentID).RegisterAsString("Applying the infrastructure") - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", deploymentID, "infra", nodeName) cmd := executil.Command(ctx, "terraform", "apply", "-input=false", "-auto-approve") - cmd.Dir = infraPath + cmd.Dir = infrastructurePath cmd.Env = mergeEnvironments(env) errbuf := events.NewBufferedLogEntryWriter() out := events.NewBufferedLogEntryWriter() @@ -225,20 +238,20 @@ func (e *defaultExecutor) applyInfrastructure(ctx context.Context, kv *api.KV, c return errors.Wrap(err, "Failed to apply the infrastructure changes via terraform") } - return e.retrieveOutputs(ctx, kv, infraPath, outputs) + return e.retrieveOutputs(ctx, kv, infrastructurePath, outputs) } -func (e *defaultExecutor) destroyInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, outputs map[string]string, env []string) error { +func (e *defaultExecutor) destroyInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, outputs map[string]string, env []string) error { if e.preDestroyCheck != nil { - check, err := e.preDestroyCheck(ctx, kv, cfg, deploymentID, nodeName) + check, err := e.preDestroyCheck(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath) if err != nil || !check { return err } } - return e.applyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, outputs, env) + return e.applyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, outputs, env) } // mergeEnvironments merges given env with current process env diff --git a/prov/terraform/google/generator.go b/prov/terraform/google/generator.go index 14949a7f1..5772217ea 100644 --- a/prov/terraform/google/generator.go +++ b/prov/terraform/google/generator.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "os" "path" "path/filepath" "strings" @@ -38,7 +37,7 @@ const infrastructureName = "google" type googleGenerator struct { } -func (g *googleGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName string) (bool, map[string]string, []string, error) { +func (g *googleGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, map[string]string, []string, error) { log.Debugf("Generating infrastructure for deployment with id %s", deploymentID) cClient, err := cfg.GetConsulClient() if err != nil { @@ -158,13 +157,9 @@ func (g *googleGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg if err != nil { return false, nil, nil, errors.Wrap(err, "Failed to generate JSON of terraform Infrastructure description") } - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", fmt.Sprint(deploymentID), "infra", nodeName) - if err = os.MkdirAll(infraPath, 0775); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to create infrastructure working directory %q", infraPath) - } - if err = ioutil.WriteFile(filepath.Join(infraPath, "infra.tf.json"), jsonInfra, 0664); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infraPath, "infra.tf.json")) + if err = ioutil.WriteFile(filepath.Join(infrastructurePath, "infra.tf.json"), jsonInfra, 0664); err != nil { + return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infrastructurePath, "infra.tf.json")) } log.Debugf("Infrastructure generated for deployment with id %s", deploymentID) diff --git a/prov/terraform/openstack/generator.go b/prov/terraform/openstack/generator.go index 6525cfb3a..3c9cf2c97 100644 --- a/prov/terraform/openstack/generator.go +++ b/prov/terraform/openstack/generator.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "os" "path" "path/filepath" "strconv" @@ -50,7 +49,7 @@ func (g *osGenerator) getStringFormConsul(kv *api.KV, baseURL, property string) return string(getResult.Value), nil } -func (g *osGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName string) (bool, map[string]string, []string, error) { +func (g *osGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, map[string]string, []string, error) { log.Debugf("Generating infrastructure for deployment with id %s", deploymentID) cClient, err := cfg.GetConsulClient() if err != nil { @@ -273,13 +272,9 @@ func (g *osGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg con if err != nil { return false, nil, nil, errors.Wrap(err, "Failed to generate JSON of terraform Infrastructure description") } - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", deploymentID, "infra", nodeName) - if err = os.MkdirAll(infraPath, 0775); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to create infrastructure working directory %q", infraPath) - } - if err = ioutil.WriteFile(filepath.Join(infraPath, "infra.tf.json"), jsonInfra, 0664); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infraPath, "infra.tf.json")) + if err = ioutil.WriteFile(filepath.Join(infrastructurePath, "infra.tf.json"), jsonInfra, 0664); err != nil { + return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infrastructurePath, "infra.tf.json")) } log.Debugf("Infrastructure generated for deployment with id %s", deploymentID) diff --git a/prov/terraform/openstack/init.go b/prov/terraform/openstack/init.go index 3f1780c48..69a974240 100644 --- a/prov/terraform/openstack/init.go +++ b/prov/terraform/openstack/init.go @@ -33,7 +33,7 @@ func init() { reg.RegisterDelegates([]string{`yorc\.nodes\.openstack\..*`}, terraform.NewExecutor(&osGenerator{}, preDestroyInfraCallback), registry.BuiltinOrigin) } -func preDestroyInfraCallback(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string) (bool, error) { +func preDestroyInfraCallback(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, error) { nodeType, err := deployments.GetNodeType(kv, deploymentID, nodeName) if err != nil { return false, err diff --git a/tasks/tasks.go b/tasks/tasks.go index f57dbffd7..a5f3cb3ac 100644 --- a/tasks/tasks.go +++ b/tasks/tasks.go @@ -216,32 +216,34 @@ func DeleteTask(kv *api.KV, taskID string) error { } // TargetHasLivingTasks checks if a targetID has associated tasks in status INITIAL or RUNNING and returns the id and status of the first one found +// +// Only Deploy, UnDeploy, ScaleOut, ScaleIn and Purge task type are considered. func TargetHasLivingTasks(kv *api.KV, targetID string) (bool, string, string, error) { tasksKeys, _, err := kv.Keys(consulutil.TasksPrefix+"/", "/", nil) if err != nil { return false, "", "", errors.Wrap(err, consulutil.ConsulGenericErrMsg) } for _, taskKey := range tasksKeys { - kvp, _, err := kv.Get(path.Join(taskKey, "targetId"), nil) + taskID := path.Base(taskKey) + ttID, err := GetTaskTarget(kv, taskID) if err != nil { - return false, "", "", errors.Wrap(err, consulutil.ConsulGenericErrMsg) + return false, "", "", err } - if kvp != nil && len(kvp.Value) > 0 && string(kvp.Value) == targetID { - kvp, _, err := kv.Get(path.Join(taskKey, "status"), nil) - taskID := path.Base(taskKey) + if ttID == targetID { + tStatus, err := GetTaskStatus(kv, taskID) if err != nil { - return false, "", "", errors.Wrap(err, consulutil.ConsulGenericErrMsg) + return false, "", "", err } - if kvp == nil || len(kvp.Value) == 0 { - return false, "", "", errors.Errorf("Missing status for task with id %q", taskID) - } - statusInt, err := strconv.Atoi(string(kvp.Value)) + tType, err := GetTaskType(kv, taskID) if err != nil { - return false, "", "", errors.Wrap(err, "Invalid task status") + return false, "", "", err } - switch TaskStatus(statusInt) { - case TaskStatusINITIAL, TaskStatusRUNNING: - return true, taskID, TaskStatus(statusInt).String(), nil + + switch tType { + case TaskTypeDeploy, TaskTypeUnDeploy, TaskTypePurge, TaskTypeScaleIn, TaskTypeScaleOut: + if tStatus == TaskStatusINITIAL || tStatus == TaskStatusRUNNING { + return true, taskID, tStatus.String(), nil + } } } } diff --git a/tasks/tasks_test.go b/tasks/tasks_test.go index 411dd0149..558309a7e 100644 --- a/tasks/tasks_test.go +++ b/tasks/tasks_test.go @@ -15,17 +15,16 @@ package tasks import ( + "encoding/json" + "fmt" + "path" "reflect" "testing" - "github.com/ystia/yorc/helper/consulutil" - - "path" - - "encoding/json" - "fmt" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testutil" + + "github.com/ystia/yorc/helper/consulutil" ) func populateKV(t *testing.T, srv *testutil.TestServer) { @@ -80,7 +79,8 @@ func populateKV(t *testing.T, srv *testutil.TestServer) { consulutil.TasksPrefix + "/t13/type": []byte("5"), consulutil.TasksPrefix + "/t13/status": []byte("3"), consulutil.TasksPrefix + "/t14/status": []byte("3"), - consulutil.TasksPrefix + "/t14/type": []byte("5"), + consulutil.TasksPrefix + "/t14/type": []byte("6"), + consulutil.TasksPrefix + "/t14/targetId": []byte("id"), consulutil.TasksPrefix + "/t15/targetId": []byte("xxx"), consulutil.TasksPrefix + "/t15/status": []byte("2"), From bb9174f98a91951922d4af2d3f20baa3b07e7ade Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Albertin?= Date: Tue, 23 Oct 2018 17:44:04 +0200 Subject: [PATCH 12/19] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e0cc0ca2..70e326b80 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ ## 3.1.0-M4 (October 08, 2018) +### ENHANCEMENTS + +* Concurrent workflows and custom commands executions are now allowed except when a deployment/undeployment/scaling operation is in progress ([GH-182](https://github.com/ystia/yorc/issues/182)) + ### DEPENDENCIES * The orchestrator requires now at least Terraform 0.11.8 and following Terraform plugins (with corresponding version constraints): `Consul (~> 2.1)`, `AWS (~> 1.36)`, `OpenStack (~> 1.9)`, `Google (~ 1.18)` and `null provider (~ 1.0)`. (Terraform upgrade from 0.9.11 introduced in [GH-82](https://github.com/ystia/yorc/issues/82)) From 1f9dd5b2c129e4893cd05b7238ed0118bdc1b86b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Albertin?= Date: Tue, 23 Oct 2018 17:44:47 +0200 Subject: [PATCH 13/19] Update CHANGELOG.md Oups wrong place :) --- CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70e326b80..ef6a3bda1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,12 +2,13 @@ ## UNRELEASED -## 3.1.0-M4 (October 08, 2018) - ### ENHANCEMENTS * Concurrent workflows and custom commands executions are now allowed except when a deployment/undeployment/scaling operation is in progress ([GH-182](https://github.com/ystia/yorc/issues/182)) + +## 3.1.0-M4 (October 08, 2018) + ### DEPENDENCIES * The orchestrator requires now at least Terraform 0.11.8 and following Terraform plugins (with corresponding version constraints): `Consul (~> 2.1)`, `AWS (~> 1.36)`, `OpenStack (~> 1.9)`, `Google (~ 1.18)` and `null provider (~ 1.0)`. (Terraform upgrade from 0.9.11 introduced in [GH-82](https://github.com/ystia/yorc/issues/82)) From 5f345202d75ddc2138a0e79760ed62272c77b3c2 Mon Sep 17 00:00:00 2001 From: Laurent Ganne Date: Tue, 23 Oct 2018 16:03:39 +0000 Subject: [PATCH 14/19] get device name' needs to depend on 'attach disk' --- prov/terraform/google/compute_instance.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/prov/terraform/google/compute_instance.go b/prov/terraform/google/compute_instance.go index 3c0a5eea1..8cc3338f3 100644 --- a/prov/terraform/google/compute_instance.go +++ b/prov/terraform/google/compute_instance.go @@ -289,7 +289,10 @@ func handleDeviceAttributes(infrastructure *commons.Infrastructure, instance *Co provMap := make(map[string]interface{}) provMap["remote-exec"] = re devResource.Provisioners = append(devResource.Provisioners, provMap) - devResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", instance.Name+"-ConnectionCheck")} + devResource.DependsOn = []string{ + fmt.Sprintf("null_resource.%s", instance.Name+"-ConnectionCheck"), + fmt.Sprintf("google_compute_attached_disk.%s", dev), + } commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-GetDevice-%s", instance.Name, dev), &devResource) // local exec to scp the stdout file locally @@ -404,10 +407,10 @@ func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, attachName = strings.Replace(attachName, "_", "-", -1) // attachName is used as device name to retrieve device attribute as logical volume name attachedDisk.DeviceName = attachName - commons.AddResource(infrastructure, "google_compute_attached_disk", attachName, attachedDisk) // Provide file outputs for device attributes which can't be resolved with Terraform device := fmt.Sprintf("google-%s", attachName) + commons.AddResource(infrastructure, "google_compute_attached_disk", device, attachedDisk) outputDeviceVal := commons.FileOutputPrefix + device instancesPrefix := path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "instances") outputs[path.Join(instancesPrefix, volumeNodeName, instanceName, "attributes/device")] = outputDeviceVal From 17a142d1bd99af496b536db170cb98a67c6bc7f0 Mon Sep 17 00:00:00 2001 From: Laurent Ganne Date: Tue, 23 Oct 2018 16:25:56 +0000 Subject: [PATCH 15/19] Updated tests after a resource name change --- prov/terraform/google/compute_instance_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/prov/terraform/google/compute_instance_test.go b/prov/terraform/google/compute_instance_test.go index 484a4b9c9..fd5b97db2 100644 --- a/prov/terraform/google/compute_instance_test.go +++ b/prov/terraform/google/compute_instance_test.go @@ -161,9 +161,9 @@ func testSimpleComputeInstanceWithPersistentDisk(t *testing.T, kv *api.KV, srv1 instancesMap = infrastructure.Resource["google_compute_attached_disk"].(map[string]interface{}) require.Len(t, instancesMap, 1) - require.Contains(t, instancesMap, "bs1-0-to-compute-0") - attachedDisk, ok := instancesMap["bs1-0-to-compute-0"].(*ComputeAttachedDisk) - require.True(t, ok, "bs1-0-to-compute-0 is not a ComputeAttachedDisk") + require.Contains(t, instancesMap, "google-bs1-0-to-compute-0") + attachedDisk, ok := instancesMap["google-bs1-0-to-compute-0"].(*ComputeAttachedDisk) + require.True(t, ok, "google-bs1-0-to-compute-0 is not a ComputeAttachedDisk") assert.Equal(t, "my_vol_id", attachedDisk.Disk) assert.Equal(t, "${google_compute_instance.compute-0.name}", attachedDisk.Instance) assert.Equal(t, "europe-west1-b", attachedDisk.Zone) From edecb828e944df58ad15842233d6f3d180c76e78 Mon Sep 17 00:00:00 2001 From: Laurent Ganne <33217305+laurentganne@users.noreply.github.com> Date: Tue, 23 Oct 2018 22:37:06 +0200 Subject: [PATCH 16/19] Fixed typo --- doc/configuration.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 89948cc8e..3c8e970de 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -48,7 +48,7 @@ Globals Command-line options .. _option_ansible_keep_generated_recipes_cmd: - * ``--ansible_keep_generated_recipes``: If set to true, generated Ansible recipes on Yorc server are not delete. (false by default: generated recipes are deleted). + * ``--ansible_keep_generated_recipes``: If set to true, generated Ansible recipes on Yorc server are not deleted. (false by default: generated recipes are deleted). .. _option_operation_remote_base_dir_cmd: @@ -115,7 +115,7 @@ Globals Command-line options * ``--terraform_openstack_plugin_version_constraint``: Specify the Terraform OpenStack plugin version constraint. Default one compatible with our source code is ``"~> 1.9"``. If you choose another, it's at your own risk. See https://www.terraform.io/docs/configuration/providers.html#provider-versions for more information. .. _option_terraform_keep_generated_files_cmd: - * ``--terraform_keep_generated_files``: If set to true, generated Terraform infrastructures files on Yorc server are not delete. (false by default: generated files are deleted). + * ``--terraform_keep_generated_files``: If set to true, generated Terraform infrastructures files on Yorc server are not deleted. (false by default: generated files are deleted). .. _option_pub_routines_cmd: From 76d7cb656a6c99919da5d9fc3c8e799921c175da Mon Sep 17 00:00:00 2001 From: Laurent Ganne Date: Wed, 24 Oct 2018 13:01:57 +0000 Subject: [PATCH 17/19] Fixed a panic on nil map when device is not set and mode is set in storage relationship --- deployments/definition_store.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/deployments/definition_store.go b/deployments/definition_store.go index cea86c649..5c806e6e0 100644 --- a/deployments/definition_store.go +++ b/deployments/definition_store.go @@ -1091,9 +1091,11 @@ func fixAlienBlockStorages(ctx context.Context, kv *api.KV, deploymentID, nodeNa if err != nil { return errors.Wrapf(err, "Failed to fix Alien-specific BlockStorage %q", nodeName) } + + req.RelationshipProps = make(map[string]*tosca.ValueAssignment) + if device != nil { va := &tosca.ValueAssignment{} - req.RelationshipProps = make(map[string]*tosca.ValueAssignment) if device.RawString() != "" { err = yaml.Unmarshal([]byte(device.RawString()), &va) if err != nil { From 039a10ab8a93baddd672265329945ae041bf6bfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Albertin?= Date: Wed, 24 Oct 2018 16:34:46 +0200 Subject: [PATCH 18/19] Update CHANGELOG.md Added missing entry in 3.1.0-M3 version --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ef6a3bda1..e892fc3b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ ### BUG FIXES * REQ_TARGET keyword into TOSCA functions was broken. This was introduced during the upgrade to Alien4Cloud 2.0 that changed how requirements definition on node templates ([GH-159](https://github.com/ystia/yorc/issues/159)) +* Parse Alien specific way of defining properties on relationships ([GH-155](https://github.com/ystia/yorc/issues/155)) ## 3.1.0-M2 (August 24, 2018) From 86cdb03bb989316a752103a03056671c204aa786 Mon Sep 17 00:00:00 2001 From: Laurent Ganne Date: Wed, 24 Oct 2018 16:12:08 +0000 Subject: [PATCH 19/19] Added property deletable on BlockStorage to delete it or not on undeploy --- data/tosca/yorc-google-types.yml | 5 +++++ prov/terraform/commons/generator.go | 34 ++++++++++++++++++++++++++++ prov/terraform/google/init.go | 9 +++++--- prov/terraform/openstack/init.go | 35 ++--------------------------- 4 files changed, 47 insertions(+), 36 deletions(-) diff --git a/data/tosca/yorc-google-types.yml b/data/tosca/yorc-google-types.yml index d98440040..4d108da58 100644 --- a/data/tosca/yorc-google-types.yml +++ b/data/tosca/yorc-google-types.yml @@ -254,6 +254,11 @@ node_types: description: > A reference to the zone where the disk resides. In any case the disk must be on the same zone as the associated Compute instances. required: true + deletable: + type: boolean + description: should this volume be deleted at undeployment + required: false + default: false disk_encryption_key: type: yorc.datatypes.google.EncryptionKey description: > diff --git a/prov/terraform/commons/generator.go b/prov/terraform/commons/generator.go index c16635a48..980b3e90c 100644 --- a/prov/terraform/commons/generator.go +++ b/prov/terraform/commons/generator.go @@ -16,9 +16,14 @@ package commons import ( "context" + "fmt" + "strings" "github.com/hashicorp/consul/api" "github.com/ystia/yorc/config" + "github.com/ystia/yorc/deployments" + "github.com/ystia/yorc/events" + "github.com/ystia/yorc/log" ) // FileOutputPrefix is the prefix to identify file output @@ -37,3 +42,32 @@ type Generator interface { // PreDestroyInfraCallback is a function that is call before destroying an infrastructure. If it returns false the node will not be destroyed. type PreDestroyInfraCallback func(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string) (bool, error) + +// PreDestroyStorageInfraCallback is a callback of type PreDestroyInfraCallback +// checking if a block storage node is deletable on undeployment. +func PreDestroyStorageInfraCallback(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string) (bool, error) { + nodeType, err := deployments.GetNodeType(kv, deploymentID, nodeName) + if err != nil { + return false, err + } + isBlockStorage, err := deployments.IsTypeDerivedFrom(kv, deploymentID, nodeType, "tosca.nodes.BlockStorage") + if err != nil { + return false, err + } + + if isBlockStorage { + + deletable, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "deletable") + if err != nil { + return false, err + } + if deletable == nil || strings.ToLower(deletable.RawString()) != "true" { + // False by default + msg := fmt.Sprintf("Node %q is a BlockStorage without the property 'deletable', so not destroyed on undeployment...", nodeName) + log.Debug(msg) + events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, deploymentID).RegisterAsString(msg) + return false, nil + } + } + return true, nil +} diff --git a/prov/terraform/google/init.go b/prov/terraform/google/init.go index 8fe2fb756..fd7d46eea 100644 --- a/prov/terraform/google/init.go +++ b/prov/terraform/google/init.go @@ -14,14 +14,17 @@ package google -import "github.com/ystia/yorc/registry" -import "github.com/ystia/yorc/prov/terraform" +import ( + "github.com/ystia/yorc/prov/terraform" + "github.com/ystia/yorc/prov/terraform/commons" + "github.com/ystia/yorc/registry" +) const googleDeploymentArtifact = "yorc.artifacts.google.Deployment" func init() { reg := registry.GetRegistry() - reg.RegisterDelegates([]string{`yorc\.nodes\.google\..*`}, terraform.NewExecutor(&googleGenerator{}, nil), registry.BuiltinOrigin) + reg.RegisterDelegates([]string{`yorc\.nodes\.google\..*`}, terraform.NewExecutor(&googleGenerator{}, commons.PreDestroyStorageInfraCallback), registry.BuiltinOrigin) reg.RegisterOperationExecutor( []string{googleDeploymentArtifact}, &defaultExecutor{}, registry.BuiltinOrigin) } diff --git a/prov/terraform/openstack/init.go b/prov/terraform/openstack/init.go index 3f1780c48..4f976ca1e 100644 --- a/prov/terraform/openstack/init.go +++ b/prov/terraform/openstack/init.go @@ -15,43 +15,12 @@ package openstack import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/consul/api" - "github.com/ystia/yorc/config" - "github.com/ystia/yorc/deployments" - "github.com/ystia/yorc/events" - "github.com/ystia/yorc/log" "github.com/ystia/yorc/prov/terraform" + "github.com/ystia/yorc/prov/terraform/commons" "github.com/ystia/yorc/registry" ) func init() { reg := registry.GetRegistry() - reg.RegisterDelegates([]string{`yorc\.nodes\.openstack\..*`}, terraform.NewExecutor(&osGenerator{}, preDestroyInfraCallback), registry.BuiltinOrigin) -} - -func preDestroyInfraCallback(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string) (bool, error) { - nodeType, err := deployments.GetNodeType(kv, deploymentID, nodeName) - if err != nil { - return false, err - } - // TODO consider making this generic: references to OpenStack should not be found here. - if nodeType == "yorc.nodes.openstack.BlockStorage" { - - deletable, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "deletable") - if err != nil { - return false, err - } - if deletable == nil || strings.ToLower(deletable.RawString()) != "true" { - // False by default - msg := fmt.Sprintf("Node %q is a BlockStorage without the property 'deletable' do not destroy it...", nodeName) - log.Debug(msg) - events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, deploymentID).RegisterAsString(msg) - return false, nil - } - } - return true, nil + reg.RegisterDelegates([]string{`yorc\.nodes\.openstack\..*`}, terraform.NewExecutor(&osGenerator{}, commons.PreDestroyStorageInfraCallback), registry.BuiltinOrigin) }