diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e0cc0ca2..0226a4f0f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,14 @@ ## UNRELEASED +### FEATURES + +* Support GCE Block storages. ([GH-82](https://github.com/ystia/yorc/issues/81)) + +### ENHANCEMENTS + +* Concurrent workflows and custom commands executions are now allowed except when a deployment/undeployment/scaling operation is in progress ([GH-182](https://github.com/ystia/yorc/issues/182)) + ## 3.1.0-M4 (October 08, 2018) ### DEPENDENCIES @@ -35,6 +43,7 @@ ### BUG FIXES * REQ_TARGET keyword into TOSCA functions was broken. This was introduced during the upgrade to Alien4Cloud 2.0 that changed how requirements definition on node templates ([GH-159](https://github.com/ystia/yorc/issues/159)) +* Parse Alien specific way of defining properties on relationships ([GH-155](https://github.com/ystia/yorc/issues/155)) ## 3.1.0-M2 (August 24, 2018) diff --git a/commands/server.go b/commands/server.go index c47828c86..67f606150 100644 --- a/commands/server.go +++ b/commands/server.go @@ -63,6 +63,7 @@ var ansibleConfiguration = map[string]interface{}{ "ansible.keep_operation_remote_path": config.DefaultKeepOperationRemotePath, "ansible.archive_artifacts": config.DefaultArchiveArtifacts, "ansible.cache_facts": config.DefaultCacheFacts, + "ansible.keep_generated_recipes": false, } var consulConfiguration = map[string]interface{}{ @@ -84,6 +85,7 @@ var terraformConfiguration = map[string]interface{}{ "terraform.aws_plugin_version_constraint": tfAWSPluginVersionConstraint, "terraform.google_plugin_version_constraint": tfGooglePluginVersionConstraint, "terraform.openstack_plugin_version_constraint": tfOpenStackPluginVersionConstraint, + "terraform.keep_generated_files": false, } var cfgFile string @@ -257,6 +259,10 @@ func setConfig() { serverCmd.PersistentFlags().Bool("keep_operation_remote_path", config.DefaultKeepOperationRemotePath, "Define wether the path created to store artifacts on the nodes will be removed at the end of workflow executions.") serverCmd.PersistentFlags().Bool("ansible_archive_artifacts", config.DefaultArchiveArtifacts, "Define wether artifacts should be ./archived before being copied on remote nodes (requires tar to be installed on remote nodes).") serverCmd.PersistentFlags().Bool("ansible_cache_facts", config.DefaultCacheFacts, "Define wether Ansible facts (useful variables about remote hosts) should be cached.") + serverCmd.PersistentFlags().Bool("ansible_keep_generated_recipes", false, "Define if Yorc should not delete generated Ansible recipes") + + //Flags definition for Terraform + serverCmd.PersistentFlags().Bool("terraform_keep_generated_files", false, "Define if Yorc should not delete generated Terraform infrastructures files") //Flags definition for Terraform serverCmd.PersistentFlags().StringP("terraform_plugins_dir", "", "", "The directory where to find Terraform plugins") diff --git a/config/config.go b/config/config.go index ae8fd39ac..36bb12e64 100644 --- a/config/config.go +++ b/config/config.go @@ -115,6 +115,7 @@ type Ansible struct { ConnectionRetries int `mapstructure:"connection_retries"` OperationRemoteBaseDir string `mapstructure:"operation_remote_base_dir"` KeepOperationRemotePath bool `mapstructure:"keep_operation_remote_path"` + KeepGeneratedRecipes bool `mapstructure:"keep_generated_recipes"` ArchiveArtifacts bool `mapstructure:"archive_artifacts"` CacheFacts bool `mapstructure:"cache_facts"` HostedOperations HostedOperations `mapstructure:"hosted_operations"` @@ -151,6 +152,7 @@ type Terraform struct { AWSPluginVersionConstraint string `mapstructure:"aws_plugin_version_constraint"` GooglePluginVersionConstraint string `mapstructure:"google_plugin_version_constraint"` OpenStackPluginVersionConstraint string `mapstructure:"openstack_plugin_version_constraint"` + KeepGeneratedFiles bool `mapstructure:"keep_generated_files"` } // DynamicMap allows to store configuration parameters that are not known in advance. diff --git a/data/tosca/yorc-google-types.yml b/data/tosca/yorc-google-types.yml index 3669d71d0..4d108da58 100644 --- a/data/tosca/yorc-google-types.yml +++ b/data/tosca/yorc-google-types.yml @@ -12,6 +12,47 @@ artifact_types: yorc.artifacts.google.Deployment: derived_from: tosca.artifacts.Deployment +data_types: + yorc.datatypes.google.EncryptionKey: + derived_from: tosca.datatypes.Root + properties: + raw_key: + type: string + required: false + description: Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource. + sha256: + type: string + required: false + description: The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. + + yorc.datatypes.google.ScratchDisk: + derived_from: tosca.datatypes.Root + properties: + interface: + type: string + required: false + description: The disk interface to use for attaching the scratch disks; either SCSI or NVME. Defaults to SCSI. + +relationship_types: + yorc.relationships.google.AttachesTo: + derived_from: tosca.relationships.AttachTo + description: > + This type represents an attachment relationship between two nodes. + For example, an AttachesTo relationship type would be used for attaching a storage node to a Compute node. + valid_target_types: [ tosca.capabilities.Attachment ] + properties: + mode: + type: string + description: > + The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode. + required: false + attributes: + device: + type: string + description: > + The logical name of the device as exposed to the instance. + Note: A runtime property that gets set when the model gets instantiated by the orchestrator. + node_types: yorc.nodes.google.Compute: derived_from: yorc.nodes.Compute @@ -103,6 +144,15 @@ node_types: Comma-separated list of tags to apply to the instances for identifying the instances to which network firewall rules will apply. required: false + scratch_disks: + type: list + description: Additional scratch disks to attach to the instance. Maximum allowed is 8. + required: false + entry_schema: + type: yorc.datatypes.google.ScratchDisk + constraints: + - greater_or_equal: 0 + - max_length: 8 requirements: - assignment: capability: yorc.capabilities.Assignable @@ -150,8 +200,6 @@ node_types: description: > Comma-separated list of label KEY=VALUE pairs to assign to the Compute Address. required: false - entry_schema: - type: string region: type: string description: > @@ -180,3 +228,66 @@ node_types: type: string description: The compute IP address. + yorc.nodes.google.PersistentDisk: + derived_from: tosca.nodes.BlockStorage + properties: + # See https://www.terraform.io/docs/providers/google/r/compute_disk.html + description: + type: string + description: > + An optional description of this resource. + required: false + type: + type: string + description: > + URL of the disk type resource describing which disk type to use to create the disk. + If this field is not specified, it is assumed to be pd-standard for Standard Persistent Disk HDD. + pd-ssd is for solid-state drives (SSD). + required: false + labels: + type: string + description: > + Comma-separated list of label KEY=VALUE pairs to assign to the Compute Disk. + required: false + zone: + type: string + description: > + A reference to the zone where the disk resides. In any case the disk must be on the same zone as the associated Compute instances. + required: true + deletable: + type: boolean + description: should this volume be deleted at undeployment + required: false + default: false + disk_encryption_key: + type: yorc.datatypes.google.EncryptionKey + description: > + Encrypts the disk using a customer-supplied encryption key. After you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later + (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine). + Customer-supplied encryption keys do not protect access to metadata of the disk. + If you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later. + required: false + image_id: + type: string + description: > + The image from which to initialize this disk. This can be one of: the image's self_link, projects/{project}/global/images/{image}, projects/{project}/global/images/family/{family}, global/images/{image}, global/images/family/{family}, family/{family}, {project}/{family}, {project}/{image}, {family}, or {image}. + If referred by family, the images names must include the family name. If they don't, use the google_compute_image data source. For instance, the image centos-6-v20180104 includes its family name centos-6. These images can be referred by family name here. + required: false + image_encryption_key: + type: yorc.datatypes.google.EncryptionKey + description: > + The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. + required: false + snapshot_id: + type: string + description: > + The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. + For example, https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot, projects/project/global/snapshots/snapshot, global/snapshots/snapshot, snapshot are valid values + required: false + snapshot_encryption_key: + type: yorc.datatypes.google.EncryptionKey + description: > + The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key. + required: false + + diff --git a/deployments/consul_test.go b/deployments/consul_test.go index 4e235c5f2..1a6cb7fe7 100644 --- a/deployments/consul_test.go +++ b/deployments/consul_test.go @@ -96,5 +96,8 @@ func TestRunConsulDeploymentsPackageTests(t *testing.T) { t.Run("TestOperationHost", func(t *testing.T) { testOperationHost(t, kv) }) + t.Run("testIssueGetEmptyPropOnRelationship", func(t *testing.T) { + testIssueGetEmptyPropOnRelationship(t, kv) + }) }) } diff --git a/deployments/definition_store.go b/deployments/definition_store.go index b8fb6a117..5c806e6e0 100644 --- a/deployments/definition_store.go +++ b/deployments/definition_store.go @@ -1091,9 +1091,11 @@ func fixAlienBlockStorages(ctx context.Context, kv *api.KV, deploymentID, nodeNa if err != nil { return errors.Wrapf(err, "Failed to fix Alien-specific BlockStorage %q", nodeName) } + + req.RelationshipProps = make(map[string]*tosca.ValueAssignment) + if device != nil { va := &tosca.ValueAssignment{} - req.RelationshipProps = make(map[string]*tosca.ValueAssignment) if device.RawString() != "" { err = yaml.Unmarshal([]byte(device.RawString()), &va) if err != nil { @@ -1103,6 +1105,19 @@ func fixAlienBlockStorages(ctx context.Context, kv *api.KV, deploymentID, nodeNa req.RelationshipProps["device"] = va } + // Get all requirement properties + kvps, _, err := kv.List(path.Join(attachReq, "properties"), nil) + if err != nil { + return errors.Wrapf(err, "Failed to fix Alien-specific BlockStorage %q", nodeName) + } + for _, kvp := range kvps { + va := &tosca.ValueAssignment{} + err := yaml.Unmarshal(kvp.Value, va) + if err != nil { + return errors.Wrapf(err, "Failed to fix Alien-specific BlockStorage %q", nodeName) + } + req.RelationshipProps[path.Base(kvp.Key)] = va + } newReqID, err := GetNbRequirementsForNode(kv, deploymentID, computeNodeName) if err != nil { return err @@ -1179,7 +1194,7 @@ func createMissingBlockStorageForNode(consulStore consulutil.ConsulStore, kv *ap } /** -This function check if a nodes need a floating IP, and return the name of Floating IP node. +This function check if a nodes need a block storage, and return the name of BlockStorage node. */ func checkBlockStorage(kv *api.KV, deploymentID, nodeName string) (bool, []string, error) { requirementsKey, err := GetRequirementsKeysByTypeForNode(kv, deploymentID, nodeName, "local_storage") diff --git a/deployments/definition_store_test.go b/deployments/definition_store_test.go index 2e58a76f1..4f2dd7039 100644 --- a/deployments/definition_store_test.go +++ b/deployments/definition_store_test.go @@ -765,6 +765,27 @@ func testIssueGetEmptyPropRel(t *testing.T, kv *api.KV) { require.Equal(t, "", results[0].Value) } +func testIssueGetEmptyPropOnRelationship(t *testing.T, kv *api.KV) { + // t.Parallel() + deploymentID := strings.Replace(t.Name(), "/", "_", -1) + err := StoreDeploymentDefinition(context.Background(), kv, deploymentID, "testdata/issue_get_empty_prop_rel.yaml") + require.Nil(t, err) + // First test operation outputs detection + + results, err := GetOperationInput(kv, deploymentID, "ValueAssignmentNode2", prov.Operation{ + Name: "configure.pre_configure_source", + ImplementedInType: "yorc.tests.relationships.ValueAssignmentConnectsTo", + ImplementationArtifact: "", + RelOp: prov.RelationshipOperation{ + IsRelationshipOperation: true, + RequirementIndex: "1", + TargetNodeName: "ValueAssignmentNode1", + }}, "input_empty_prop") + require.Nil(t, err) + require.Len(t, results, 1) + require.Equal(t, "", results[0].Value) +} + func testRelationshipWorkflow(t *testing.T, kv *api.KV) { // t.Parallel() deploymentID := strings.Replace(t.Name(), "/", "_", -1) diff --git a/deployments/testdata/issue_get_empty_prop_rel.yaml b/deployments/testdata/issue_get_empty_prop_rel.yaml index b4c995375..709e48163 100644 --- a/deployments/testdata/issue_get_empty_prop_rel.yaml +++ b/deployments/testdata/issue_get_empty_prop_rel.yaml @@ -166,6 +166,11 @@ node_types: relationship_types: yorc.tests.relationships.ValueAssignmentConnectsTo: derived_from: tosca.relationships.ConnectsTo + properties: + empty_prop: + type: string + required: false + default: "" interfaces: Configure: pre_configure_source: @@ -174,6 +179,9 @@ relationship_types: input_list: ["l1", "l2"] input_list_ex: - "le1" + + + - "le2" input_map: {"key1": "value1", "key2": "value2"} input_map_ex: @@ -188,6 +196,7 @@ relationship_types: input_propList_all: { get_property: [SOURCE, list] } input_propList_0_alien: { get_property: [SOURCE, "list[0]"] } input_propList_0_tosca: { get_property: [SOURCE, list, 0] } + input_empty_prop: { get_property: [SELF, empty_prop] } implementation: scripts/show_inputs.sh post_configure_source: inputs: diff --git a/doc/configuration.rst b/doc/configuration.rst index 1346837d9..3c8e970de 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -46,6 +46,10 @@ Globals Command-line options * ``--ansible_archive_artifacts``: If set to true, archives operation bash/python scripts locally, copies this archive and unarchives it on remote hosts (requires tar to be installed on remote hosts), to avoid multiple time consuming remote copy operations of individual scripts (false by default: no archive). +.. _option_ansible_keep_generated_recipes_cmd: + + * ``--ansible_keep_generated_recipes``: If set to true, generated Ansible recipes on Yorc server are not deleted. (false by default: generated recipes are deleted). + .. _option_operation_remote_base_dir_cmd: * ``--operation_remote_base_dir``: Specify an alternative working directory for Ansible on provisioned Compute. @@ -109,6 +113,9 @@ Globals Command-line options .. _option_terraform_openstack_plugin_version_constraint_cmd: * ``--terraform_openstack_plugin_version_constraint``: Specify the Terraform OpenStack plugin version constraint. Default one compatible with our source code is ``"~> 1.9"``. If you choose another, it's at your own risk. See https://www.terraform.io/docs/configuration/providers.html#provider-versions for more information. +.. _option_terraform_keep_generated_files_cmd: + + * ``--terraform_keep_generated_files``: If set to true, generated Terraform infrastructures files on Yorc server are not deleted. (false by default: generated files are deleted). .. _option_pub_routines_cmd: @@ -336,6 +343,10 @@ All available configuration options for Ansible are: * ``keep_operation_remote_path``: Equivalent to :ref:`--keep_operation_remote_path ` command-line flag. +.. _option_ansible_keep_generated_recipes_cfg: + + * ``keep_generated_recipes``: Equivalent to :ref:`--ansible_keep_generated_recipes ` command-line flag. + .. _option_ansible_sandbox_hosted_ops_cfg: * ``hosted_operations``: This is a complex structure that allow to define the behavior of a Yorc server when it executes an hosted operation. @@ -478,7 +489,7 @@ Below is an example of configuration file with Terraform configuration options. } } -All available configuration options for Consul are: +All available configuration options for Terraform are: .. _option_plugins_dir_cfg: @@ -500,6 +511,11 @@ All available configuration options for Consul are: * ``openstack_plugin_version_constraint``: Equivalent to :ref:`--terraform_openstack_plugin_version_constraint ` command-line flag. +.. _option_terraform_keep_generated_files_cfg: + + * ``keep_generated_files``: Equivalent to :ref:`--terraform_keep_generated_files ` command-line flag. + + .. _yorc_config_file_telemetry_section: Telemetry configuration @@ -647,6 +663,10 @@ Environment variables * ``YORC_ANSIBLE_ARCHIVE_ARTIFACTS``: Equivalent to :ref:`--ansible_archive_artifacts ` command-line flag. +.. _option_ansible_keep_generated_recipes_env: + + * ``YORC_ANSIBLE_KEEP_GENERATED_RECIPES``: Equivalent to :ref:`--ansible_keep_generated_recipes ` command-line flag. + .. _option_operation_remote_base_dir_env: * ``YORC_OPERATION_REMOTE_BASE_DIR``: Equivalent to :ref:`--operation_remote_base_dir ` command-line flag. @@ -751,14 +771,6 @@ Environment variables * ``YORC_LOG``: If set to ``1`` or ``DEBUG``, enables debug logging for Yorc. -.. _option_aws_access_key: - - * ``YORC_INFRA_AWS_ACCESS_KEY``: The AWS access key credential. - -.. _option_aws_secret_key: - - * ``YORC_INFRA_AWS_SECRET_KEY``: The AWS secret key credential. - .. _option_terraform_plugins_dir_env: * ``YORC_TERRAFORM_PLUGINS_DIR``: Equivalent to :ref:`--terraform_plugins_dir ` command-line flag. @@ -779,6 +791,10 @@ Environment variables * ``YORC_TERRAFORM_OPENSTACK_PLUGIN_VERSION_CONSTRAINT``: Equivalent to :ref:`--terraform_openstack_plugin_version_constraint ` command-line flag. +.. _option_terraform_keep_generated_files_env: + + * ``YORC_TERRAFORM_KEEP_GENERATED_FILES``: Equivalent to :ref:`--terraform_keep_generated_files ` command-line flag. + Infrastructures configuration ----------------------------- diff --git a/helper/sizeutil/sizeutil.go b/helper/sizeutil/sizeutil.go new file mode 100644 index 000000000..728f1c193 --- /dev/null +++ b/helper/sizeutil/sizeutil.go @@ -0,0 +1,43 @@ +// Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizeutil + +import ( + "github.com/dustin/go-humanize" + "github.com/pkg/errors" + "github.com/ystia/yorc/helper/mathutil" + "strconv" +) + +// ConvertToGB allows to convert a MB size as "42" or a human readable size as "42MB" or "42 KB" into GB +func ConvertToGB(size string) (int, error) { + // Default size unit is MB + mSize, err := strconv.Atoi(size) + // Not an int value, so maybe a human readable size: we try to retrieve bytes + if err != nil { + var bsize uint64 + bsize, err = humanize.ParseBytes(size) + if err != nil { + return 0, errors.Errorf("Can't convert size to bytes value: %v", err) + } + gSize := float64(bsize) / humanize.GByte + gSize = mathutil.Round(gSize, 0, 0) + return int(gSize), nil + } + + gSize := float64(mSize) / 1000 + gSize = mathutil.Round(gSize, 0, 0) + return int(gSize), nil +} diff --git a/helper/sizeutil/sizeutil_test.go b/helper/sizeutil/sizeutil_test.go new file mode 100644 index 000000000..f056208b5 --- /dev/null +++ b/helper/sizeutil/sizeutil_test.go @@ -0,0 +1,50 @@ +// Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sizeutil + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestConvertToGB(t *testing.T) { + var testData = []struct { + test string + inputSize string + expectedSize int + expectedError bool + }{ + {"volume1", "1", 1, false}, + {"volume10000000", "100", 1, false}, + {"volume10000000", "1500 M", 2, false}, + {"volume1GB", "1GB", 1, false}, + {"volume1GBS", "1 GB", 1, false}, + {"olume1GiB", "1 GiB", 2, false}, + {"volume2GIB", "2 GIB", 3, false}, + {"volume1TB", "1 tb", 1000, false}, + {"volume1TiB", "1 TiB", 1100, false}, + {"error", "1 deca", 0, true}, + } + for _, tt := range testData { + s, err := ConvertToGB(tt.inputSize) + if !tt.expectedError { + assert.Nil(t, err) + assert.Equal(t, tt.expectedSize, s) + } else { + assert.Error(t, err, "Expected an error") + } + + } +} diff --git a/prov/ansible/execution.go b/prov/ansible/execution.go index 4fe8d62a5..9f05df749 100644 --- a/prov/ansible/execution.go +++ b/prov/ansible/execution.go @@ -621,7 +621,7 @@ func (e *executionCommon) resolveOperationOutputPath() error { } else { //If we are with an expression type {get_operation_output : [ SELF, ...]} in a relationship we store the result in the corresponding relationship instance if oof.Operands[0].String() == "SELF" && e.operation.RelOp.IsRelationshipOperation { - relationShipPrefix := filepath.Join("relationship_instances", e.NodeName, e.operation.RelOp.RequirementIndex, instanceID) + relationShipPrefix := path.Join("relationship_instances", e.NodeName, e.operation.RelOp.RequirementIndex, instanceID) e.Outputs[outputVariableName+"_"+fmt.Sprint(b)] = path.Join(relationShipPrefix, "outputs", interfaceName, operationName, outputVariableName) } else if oof.Operands[0].String() == "HOST" { // In this case we continue because the parsing has change this type on {get_operation_output : [ SELF, ...]} on the host node @@ -799,11 +799,11 @@ func (e *executionCommon) executeWithCurrentInstance(ctx context.Context, retry return err } - var ansibleRecipePath string + ansibleRecipePath := filepath.Join(ansiblePath, e.taskID, e.NodeName) if e.operation.RelOp.IsRelationshipOperation { - ansibleRecipePath = filepath.Join(ansiblePath, e.NodeName, e.relationshipType, e.operation.RelOp.TargetRelationship, e.operation.Name, currentInstance) + ansibleRecipePath = filepath.Join(ansibleRecipePath, e.relationshipType, e.operation.RelOp.TargetRelationship, e.operation.Name, currentInstance) } else { - ansibleRecipePath = filepath.Join(ansiblePath, e.NodeName, e.operation.Name, currentInstance) + ansibleRecipePath = filepath.Join(ansibleRecipePath, e.operation.Name, currentInstance) } if err = os.RemoveAll(ansibleRecipePath); err != nil { @@ -812,6 +812,17 @@ func (e *executionCommon) executeWithCurrentInstance(ctx context.Context, retry events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelERROR, e.deploymentID).RegisterAsString(err.Error()) return err } + + defer func() { + if !e.cfg.Ansible.KeepGeneratedRecipes { + err := os.RemoveAll(ansibleRecipePath) + if err != nil { + err = errors.Wrapf(err, "Failed to remove ansible recipe directory %q for node %q operation %q", ansibleRecipePath, e.NodeName, e.operation.Name) + log.Debugf("%+v", err) + events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelERROR, e.deploymentID).RegisterAsString(err.Error()) + } + } + }() ansibleHostVarsPath := filepath.Join(ansibleRecipePath, "host_vars") if err = os.MkdirAll(ansibleHostVarsPath, 0775); err != nil { events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelERROR, e.deploymentID).RegisterAsString(err.Error()) diff --git a/prov/terraform/aws/generator.go b/prov/terraform/aws/generator.go index 5c82b2ab2..03620c85d 100644 --- a/prov/terraform/aws/generator.go +++ b/prov/terraform/aws/generator.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "os" "path" "path/filepath" @@ -37,7 +36,7 @@ const infrastructureName = "aws" type awsGenerator struct { } -func (g *awsGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName string) (bool, map[string]string, []string, error) { +func (g *awsGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, map[string]string, []string, error) { log.Debugf("Generating infrastructure for deployment with id %s", deploymentID) cClient, err := cfg.GetConsulClient() if err != nil { @@ -141,13 +140,9 @@ func (g *awsGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg co if err != nil { return false, nil, nil, errors.Wrap(err, "Failed to generate JSON of terraform Infrastructure description") } - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", fmt.Sprint(deploymentID), "infra", nodeName) - if err = os.MkdirAll(infraPath, 0775); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to create infrastructure working directory %q", infraPath) - } - if err = ioutil.WriteFile(filepath.Join(infraPath, "infra.tf.json"), jsonInfra, 0664); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infraPath, "infra.tf.json")) + if err = ioutil.WriteFile(filepath.Join(infrastructurePath, "infra.tf.json"), jsonInfra, 0664); err != nil { + return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infrastructurePath, "infra.tf.json")) } log.Debugf("Infrastructure generated for deployment with id %s", deploymentID) diff --git a/prov/terraform/commons/generator.go b/prov/terraform/commons/generator.go index 9b5b80a24..4fd289c64 100644 --- a/prov/terraform/commons/generator.go +++ b/prov/terraform/commons/generator.go @@ -16,11 +16,19 @@ package commons import ( "context" + "fmt" + "strings" "github.com/hashicorp/consul/api" "github.com/ystia/yorc/config" + "github.com/ystia/yorc/deployments" + "github.com/ystia/yorc/events" + "github.com/ystia/yorc/log" ) +// FileOutputPrefix is the prefix to identify file output +const FileOutputPrefix = "file:" + // A Generator is used to generate the Terraform infrastructure for a given TOSCA node type Generator interface { // GenerateTerraformInfraForNode generates the Terraform infrastructure file for the given node. @@ -29,8 +37,37 @@ type Generator interface { // GenerateTerraformInfraForNode can also return a map of outputs names indexed by consul keys into which the outputs results should be stored. // And a list of environment variables in form "key=value" to be added to the current process environment when running terraform commands. // This is particularly useful for adding secrets that should not be in tf files. - GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName string) (bool, map[string]string, []string, error) + GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, map[string]string, []string, error) } // PreDestroyInfraCallback is a function that is call before destroying an infrastructure. If it returns false the node will not be destroyed. -type PreDestroyInfraCallback func(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string) (bool, error) +type PreDestroyInfraCallback func(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, error) + +// PreDestroyStorageInfraCallback is a callback of type PreDestroyInfraCallback +// checking if a block storage node is deletable on undeployment. +func PreDestroyStorageInfraCallback(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, error) { + nodeType, err := deployments.GetNodeType(kv, deploymentID, nodeName) + if err != nil { + return false, err + } + isBlockStorage, err := deployments.IsTypeDerivedFrom(kv, deploymentID, nodeType, "tosca.nodes.BlockStorage") + if err != nil { + return false, err + } + + if isBlockStorage { + + deletable, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "deletable") + if err != nil { + return false, err + } + if deletable == nil || strings.ToLower(deletable.RawString()) != "true" { + // False by default + msg := fmt.Sprintf("Node %q is a BlockStorage without the property 'deletable', so not destroyed on undeployment...", nodeName) + log.Debug(msg) + events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, deploymentID).RegisterAsString(msg) + return false, nil + } + } + return true, nil +} diff --git a/prov/terraform/commons/resources.go b/prov/terraform/commons/resources.go index 2e3b96486..4ded222bc 100644 --- a/prov/terraform/commons/resources.go +++ b/prov/terraform/commons/resources.go @@ -82,6 +82,14 @@ type RemoteExec struct { Scripts []string `json:"scripts,omitempty"` } +// LocalExec allows to invoke a local executable after a resource is created. This invokes a process on the machine running Terraform, not on the resource +type LocalExec struct { + Command string `json:"command"` + WorkingDir string `json:"working_dir,omitempty"` + interpreter string `json:"interpreter,omitempty"` + environment string `json:"environment,omitempty"` +} + // A Connection allows to overwrite the way Terraform connects to a resource type Connection struct { ConnType string `json:"type,omitempty"` diff --git a/prov/terraform/executor.go b/prov/terraform/executor.go index def6a93b5..acfd51cde 100644 --- a/prov/terraform/executor.go +++ b/prov/terraform/executor.go @@ -28,10 +28,13 @@ import ( "github.com/ystia/yorc/events" "github.com/ystia/yorc/helper/consulutil" "github.com/ystia/yorc/helper/executil" + "github.com/ystia/yorc/log" "github.com/ystia/yorc/prov" "github.com/ystia/yorc/prov/terraform/commons" "github.com/ystia/yorc/tasks" "github.com/ystia/yorc/tosca" + "io/ioutil" + "path" ) type defaultExecutor struct { @@ -65,32 +68,46 @@ func (e *defaultExecutor) ExecDelegate(ctx context.Context, cfg config.Configura if err != nil { return err } - + infrastructurePath := filepath.Join(cfg.WorkingDirectory, "deployments", deploymentID, "terraform", taskID, nodeName) + if err = os.MkdirAll(infrastructurePath, 0775); err != nil { + return errors.Wrapf(err, "Failed to create infrastructure working directory %q", infrastructurePath) + } + defer func() { + if !cfg.Terraform.KeepGeneratedFiles { + err := os.RemoveAll(infrastructurePath) + if err != nil { + err = errors.Wrapf(err, "Failed to remove Terraform infrastructure directory %q for node %q operation %q", infrastructurePath, nodeName, delegateOperation) + log.Debugf("%+v", err) + events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelERROR, deploymentID).RegisterAsString(err.Error()) + } + } + }() op := strings.ToLower(delegateOperation) switch { case op == "install": - err = e.installNode(ctx, kv, cfg, deploymentID, nodeName, instances) + err = e.installNode(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, instances) case op == "uninstall": - err = e.uninstallNode(ctx, kv, cfg, deploymentID, nodeName, instances) + err = e.uninstallNode(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, instances) default: return errors.Errorf("Unsupported operation %q", delegateOperation) } return err } -func (e *defaultExecutor) installNode(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, instances []string) error { +func (e *defaultExecutor) installNode(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, instances []string) error { for _, instance := range instances { err := deployments.SetInstanceStateWithContextualLogs(events.AddLogOptionalFields(ctx, events.LogOptionalFields{events.InstanceID: instance}), kv, deploymentID, nodeName, instance, tosca.NodeStateCreating) if err != nil { return err } } - infraGenerated, outputs, env, err := e.generator.GenerateTerraformInfraForNode(ctx, cfg, deploymentID, nodeName) + + infraGenerated, outputs, env, err := e.generator.GenerateTerraformInfraForNode(ctx, cfg, deploymentID, nodeName, infrastructurePath) if err != nil { return err } if infraGenerated { - if err = e.applyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, outputs, env); err != nil { + if err = e.applyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, outputs, env); err != nil { return err } } @@ -103,19 +120,19 @@ func (e *defaultExecutor) installNode(ctx context.Context, kv *api.KV, cfg confi return nil } -func (e *defaultExecutor) uninstallNode(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, instances []string) error { +func (e *defaultExecutor) uninstallNode(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, instances []string) error { for _, instance := range instances { err := deployments.SetInstanceStateWithContextualLogs(events.AddLogOptionalFields(ctx, events.LogOptionalFields{events.InstanceID: instance}), kv, deploymentID, nodeName, instance, tosca.NodeStateDeleting) if err != nil { return err } } - infraGenerated, outputs, env, err := e.generator.GenerateTerraformInfraForNode(ctx, cfg, deploymentID, nodeName) + infraGenerated, outputs, env, err := e.generator.GenerateTerraformInfraForNode(ctx, cfg, deploymentID, nodeName, infrastructurePath) if err != nil { return err } if infraGenerated { - if err = e.destroyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, outputs, env); err != nil { + if err = e.destroyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, outputs, env); err != nil { return err } } @@ -128,9 +145,8 @@ func (e *defaultExecutor) uninstallNode(ctx context.Context, kv *api.KV, cfg con return nil } -func (e *defaultExecutor) remoteConfigInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, env []string) error { +func (e *defaultExecutor) remoteConfigInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, env []string) error { events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, deploymentID).RegisterAsString("Remote configuring the infrastructure") - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", deploymentID, "infra", nodeName) var cmd *executil.Cmd // Use pre-installed Terraform providers plugins if plugins directory exists // https://www.terraform.io/guides/running-terraform-in-automation.html#pre-installed-plugins @@ -140,7 +156,7 @@ func (e *defaultExecutor) remoteConfigInfrastructure(ctx context.Context, kv *ap cmd = executil.Command(ctx, "terraform", "init") } - cmd.Dir = infraPath + cmd.Dir = infrastructurePath cmd.Env = mergeEnvironments(env) errbuf := events.NewBufferedLogEntryWriter() out := events.NewBufferedLogEntryWriter() @@ -165,6 +181,17 @@ func (e *defaultExecutor) retrieveOutputs(ctx context.Context, kv *api.KV, infra if len(outputs) == 0 { return nil } + + // Filter and handle file output + filteredOutputs, err := e.handleFileOutputs(ctx, kv, infraPath, outputs) + if err != nil { + return err + } + + if len(filteredOutputs) == 0 { + return nil + } + type tfJSONOutput struct { Sensitive bool `json:"sensitive,omitempty"` Type string `json:"type,omitempty"` @@ -183,7 +210,7 @@ func (e *defaultExecutor) retrieveOutputs(ctx context.Context, kv *api.KV, infra if err != nil { return errors.Wrap(err, "Failed to retrieve the infrastructure outputs via terraform") } - for outPath, outName := range outputs { + for outPath, outName := range filteredOutputs { output, ok := outputsList[outName] if !ok { return errors.Errorf("failed to retrieve output %q in terraform result", outName) @@ -197,17 +224,40 @@ func (e *defaultExecutor) retrieveOutputs(ctx context.Context, kv *api.KV, infra return nil } -func (e *defaultExecutor) applyInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, outputs map[string]string, env []string) error { +// File outputs are outputs that terraform can't resolve and which need to be retrieved in local files +func (e *defaultExecutor) handleFileOutputs(ctx context.Context, kv *api.KV, infraPath string, outputs map[string]string) (map[string]string, error) { + filteredOutputs := make(map[string]string, 0) + for k, v := range outputs { + if strings.HasPrefix(v, commons.FileOutputPrefix) { + file := strings.TrimPrefix(v, commons.FileOutputPrefix) + log.Debugf("Handle file output:%q", file) + content, err := ioutil.ReadFile(path.Join(infraPath, file)) + if err != nil { + return nil, errors.Wrapf(err, "Failed to retrieve file output from file:%q", file) + } + contentStr := strings.Trim(string(content), "\r\n") + // Store keyValue in Consul + _, err = kv.Put(&api.KVPair{Key: k, Value: []byte(contentStr)}, nil) + if err != nil { + return nil, errors.Wrap(err, consulutil.ConsulGenericErrMsg) + } + } else { + filteredOutputs[k] = v + } + } + return filteredOutputs, nil +} + +func (e *defaultExecutor) applyInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, outputs map[string]string, env []string) error { // Remote Configuration for Terraform State to store it in the Consul KV store - if err := e.remoteConfigInfrastructure(ctx, kv, cfg, deploymentID, nodeName, env); err != nil { + if err := e.remoteConfigInfrastructure(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, env); err != nil { return err } events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, deploymentID).RegisterAsString("Applying the infrastructure") - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", deploymentID, "infra", nodeName) cmd := executil.Command(ctx, "terraform", "apply", "-input=false", "-auto-approve") - cmd.Dir = infraPath + cmd.Dir = infrastructurePath cmd.Env = mergeEnvironments(env) errbuf := events.NewBufferedLogEntryWriter() out := events.NewBufferedLogEntryWriter() @@ -225,20 +275,20 @@ func (e *defaultExecutor) applyInfrastructure(ctx context.Context, kv *api.KV, c return errors.Wrap(err, "Failed to apply the infrastructure changes via terraform") } - return e.retrieveOutputs(ctx, kv, infraPath, outputs) + return e.retrieveOutputs(ctx, kv, infrastructurePath, outputs) } -func (e *defaultExecutor) destroyInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string, outputs map[string]string, env []string) error { +func (e *defaultExecutor) destroyInfrastructure(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string, outputs map[string]string, env []string) error { if e.preDestroyCheck != nil { - check, err := e.preDestroyCheck(ctx, kv, cfg, deploymentID, nodeName) + check, err := e.preDestroyCheck(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath) if err != nil || !check { return err } } - return e.applyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, outputs, env) + return e.applyInfrastructure(ctx, kv, cfg, deploymentID, nodeName, infrastructurePath, outputs, env) } // mergeEnvironments merges given env with current process env diff --git a/prov/terraform/google/compute_instance.go b/prov/terraform/google/compute_instance.go index 18100eb5d..8cc3338f3 100644 --- a/prov/terraform/google/compute_instance.go +++ b/prov/terraform/google/compute_instance.go @@ -119,7 +119,7 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K // External IP address can be static if required if hasStaticAddressReq { // Address Lookup - externalAddress, err = addressLookup(ctx, kv, deploymentID, instanceName, addressNode) + externalAddress, err = attributeLookup(ctx, kv, deploymentID, instanceName, addressNode, "ip_address") if err != nil { return err } @@ -175,9 +175,43 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K return err } + // Add additional Scratch disks + scratchDisks, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "scratch_disks") + if err != nil { + return err + } + + if scratchDisks != nil && scratchDisks.RawString() != "" { + list, ok := scratchDisks.Value.([]interface{}) + if !ok { + return errors.New("failed to retrieve scratch disk Tosca Value: not expected type") + } + instance.ScratchDisks = make([]ScratchDisk, 0) + for _, n := range list { + v, ok := n.(map[string]interface{}) + if !ok { + return errors.New("failed to retrieve scratch disk map: not expected type") + } + for _, val := range v { + i, ok := val.(string) + if !ok { + return errors.New("failed to retrieve scratch disk interface value: not expected type") + } + scratch := ScratchDisk{Interface: i} + instance.ScratchDisks = append(instance.ScratchDisks, scratch) + } + } + } + // Add the compute instance commons.AddResource(infrastructure, "google_compute_instance", instance.Name, &instance) + // Attach Persistent disks + devices, err := addAttachedDisks(ctx, cfg, kv, deploymentID, nodeName, instanceName, instance.Name, infrastructure, outputs) + if err != nil { + return err + } + // Provide Consul Keys consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} @@ -236,18 +270,70 @@ func (g *googleGenerator) generateComputeInstance(ctx context.Context, kv *api.K commons.AddResource(infrastructure, "null_resource", instance.Name+"-ConnectionCheck", &nullResource) + // Retrieve devices + handleDeviceAttributes(infrastructure, &instance, devices, user, privateKeyFilePath, accessIP) + return nil } -func addressLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName, addressNodeName string) (string, error) { - log.Debugf("Address lookup for deploymentID:%q, address node name:%q, instance:%q", deploymentID, addressNodeName, instanceName) - var address string +func handleDeviceAttributes(infrastructure *commons.Infrastructure, instance *ComputeInstance, devices []string, user, privateKeyFilePath, accessIP string) { + // Retrieve devices { + for _, dev := range devices { + devResource := commons.Resource{} + + // Remote exec to retrieve the logical device for google device ID and to redirect stdout to file + re := commons.RemoteExec{Inline: []string{fmt.Sprintf("readlink -f /dev/disk/by-id/%s > %s", dev, dev)}, + Connection: &commons.Connection{User: user, Host: accessIP, + PrivateKey: `${file("` + privateKeyFilePath + `")}`}} + devResource.Provisioners = make([]map[string]interface{}, 0) + provMap := make(map[string]interface{}) + provMap["remote-exec"] = re + devResource.Provisioners = append(devResource.Provisioners, provMap) + devResource.DependsOn = []string{ + fmt.Sprintf("null_resource.%s", instance.Name+"-ConnectionCheck"), + fmt.Sprintf("google_compute_attached_disk.%s", dev), + } + commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-GetDevice-%s", instance.Name, dev), &devResource) + + // local exec to scp the stdout file locally + scpCommand := fmt.Sprintf("scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s %s@%s:~/%s %s", privateKeyFilePath, user, accessIP, dev, dev) + loc := commons.LocalExec{ + Command: scpCommand, + } + locMap := make(map[string]interface{}) + locMap["local-exec"] = loc + locResource := commons.Resource{} + locResource.Provisioners = append(locResource.Provisioners, locMap) + locResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", fmt.Sprintf("%s-GetDevice-%s", instance.Name, dev))} + commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-CopyOut-%s", instance.Name, dev), &locResource) + + // Remote exec to cleanup created file + cleanResource := commons.Resource{} + re = commons.RemoteExec{Inline: []string{fmt.Sprintf("rm -f %s", dev)}, + Connection: &commons.Connection{User: user, Host: accessIP, + PrivateKey: `${file("` + privateKeyFilePath + `")}`}} + cleanResource.Provisioners = make([]map[string]interface{}, 0) + m := make(map[string]interface{}) + m["remote-exec"] = re + cleanResource.Provisioners = append(devResource.Provisioners, m) + cleanResource.DependsOn = []string{fmt.Sprintf("null_resource.%s", fmt.Sprintf("%s-CopyOut-%s", instance.Name, dev))} + commons.AddResource(infrastructure, "null_resource", fmt.Sprintf("%s-cleanup-%s", instance.Name, dev), &cleanResource) + + consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} + consulKeys.DependsOn = []string{fmt.Sprintf("null_resource.%s", fmt.Sprintf("%s-CopyOut-%s", instance.Name, dev))} + } +} + +func attributeLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName, nodeName, attribute string) (string, error) { + log.Debugf("Attribute:%q lookup for deploymentID:%q, node name:%q, instance:%q", attribute, deploymentID, nodeName, instanceName) res := make(chan string, 1) go func() { for { - if address, _ := deployments.GetInstanceAttributeValue(kv, deploymentID, addressNodeName, instanceName, "ip_address"); address != nil && address.RawString() != "" { - res <- address.RawString() - return + if attr, _ := deployments.GetInstanceAttributeValue(kv, deploymentID, nodeName, instanceName, attribute); attr != nil && attr.RawString() != "" { + if attr != nil && attr.RawString() != "" { + res <- attr.RawString() + return + } } select { @@ -259,9 +345,79 @@ func addressLookup(ctx context.Context, kv *api.KV, deploymentID, instanceName, }() select { - case address = <-res: - return address, nil + case val := <-res: + return val, nil case <-ctx.Done(): return "", ctx.Err() } } + +func addAttachedDisks(ctx context.Context, cfg config.Configuration, kv *api.KV, deploymentID, nodeName, instanceName, computeName string, infrastructure *commons.Infrastructure, outputs map[string]string) ([]string, error) { + devices := make([]string, 0) + + storageKeys, err := deployments.GetRequirementsKeysByTypeForNode(kv, deploymentID, nodeName, "local_storage") + if err != nil { + return nil, err + } + for _, storagePrefix := range storageKeys { + requirementIndex := deployments.GetRequirementIndexFromRequirementKey(storagePrefix) + volumeNodeName, err := deployments.GetTargetNodeForRequirement(kv, deploymentID, nodeName, requirementIndex) + if err != nil { + return nil, err + } + + log.Debugf("Volume attachment required form Volume named %s", volumeNodeName) + + zone, err := deployments.GetStringNodeProperty(kv, deploymentID, volumeNodeName, "zone", true) + if err != nil { + return nil, err + } + + modeValue, err := deployments.GetRelationshipPropertyValueFromRequirement(kv, deploymentID, nodeName, requirementIndex, "mode") + if err != nil { + return nil, err + } + + volumeIDValue, err := deployments.GetNodePropertyValue(kv, deploymentID, volumeNodeName, "volume_id") + if err != nil { + return nil, err + } + var volumeID string + if volumeIDValue == nil || volumeIDValue.RawString() == "" { + // Lookup for attribute volume_id + volumeID, err = attributeLookup(ctx, kv, deploymentID, instanceName, volumeNodeName, "volume_id") + if err != nil { + return nil, err + } + + } else { + volumeID = volumeIDValue.RawString() + } + + attachedDisk := &ComputeAttachedDisk{ + Disk: volumeID, + Instance: fmt.Sprintf("${google_compute_instance.%s.name}", computeName), + Zone: zone, + } + if modeValue != nil && modeValue.RawString() != "" { + attachedDisk.Mode = modeValue.RawString() + } + + attachName := strings.ToLower(cfg.ResourcesPrefix + volumeNodeName + "-" + instanceName + "-to-" + nodeName + "-" + instanceName) + attachName = strings.Replace(attachName, "_", "-", -1) + // attachName is used as device name to retrieve device attribute as logical volume name + attachedDisk.DeviceName = attachName + + // Provide file outputs for device attributes which can't be resolved with Terraform + device := fmt.Sprintf("google-%s", attachName) + commons.AddResource(infrastructure, "google_compute_attached_disk", device, attachedDisk) + outputDeviceVal := commons.FileOutputPrefix + device + instancesPrefix := path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "instances") + outputs[path.Join(instancesPrefix, volumeNodeName, instanceName, "attributes/device")] = outputDeviceVal + outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", nodeName, requirementIndex, instanceName, "attributes/device")] = outputDeviceVal + outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID, "topology", "relationship_instances", volumeNodeName, requirementIndex, instanceName, "attributes/device")] = outputDeviceVal + // Add device + devices = append(devices, device) + } + return devices, nil +} diff --git a/prov/terraform/google/compute_instance_test.go b/prov/terraform/google/compute_instance_test.go index 290bfd54d..fd5b97db2 100644 --- a/prov/terraform/google/compute_instance_test.go +++ b/prov/terraform/google/compute_instance_test.go @@ -81,6 +81,10 @@ func testSimpleComputeInstance(t *testing.T, kv *api.KV, cfg config.Configuratio require.True(t, ok) assert.Equal(t, "centos", rex.Connection.User) assert.Equal(t, `${file("~/.ssh/yorc.pem")}`, rex.Connection.PrivateKey) + + require.Len(t, compute.ScratchDisks, 2, "Expected 2 scratch disks") + assert.Equal(t, "SCSI", compute.ScratchDisks[0].Interface, "SCSI interface expected for 1st scratch") + assert.Equal(t, "NVME", compute.ScratchDisks[1].Interface, "NVME interface expected for 2nd scratch") } func testSimpleComputeInstanceMissingMandatoryParameter(t *testing.T, kv *api.KV, cfg config.Configuration) { @@ -124,3 +128,56 @@ func testSimpleComputeInstanceWithAddress(t *testing.T, kv *api.KV, srv1 *testut require.Len(t, compute.NetworkInterfaces, 1, "Expected one network interface for external access") assert.Equal(t, "1.2.3.4", compute.NetworkInterfaces[0].AccessConfigs[0].NatIP, "Unexpected external IP address") } + +func testSimpleComputeInstanceWithPersistentDisk(t *testing.T, kv *api.KV, srv1 *testutil.TestServer, cfg config.Configuration) { + t.Parallel() + deploymentID := loadTestYaml(t, kv) + + // Simulate the google persistent disk "volume_id" attribute registration + srv1.PopulateKV(t, map[string][]byte{ + path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/nodes/BS1/type"): []byte("yorc.nodes.google.PersistentDisk"), + path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/instances/BS1/0/attributes/volume_id"): []byte("my_vol_id"), + }) + + infrastructure := commons.Infrastructure{} + g := googleGenerator{} + outputs := make(map[string]string, 0) + err := g.generateComputeInstance(context.Background(), kv, cfg, deploymentID, "Compute", "0", 0, &infrastructure, outputs) + require.NoError(t, err, "Unexpected error attempting to generate compute instance for %s", deploymentID) + + require.Len(t, infrastructure.Resource["google_compute_instance"], 1, "Expected one compute instance") + instancesMap := infrastructure.Resource["google_compute_instance"].(map[string]interface{}) + require.Len(t, instancesMap, 1) + require.Contains(t, instancesMap, "compute-0") + + compute, ok := instancesMap["compute-0"].(*ComputeInstance) + require.True(t, ok, "compute-0 is not a ComputeInstance") + assert.Equal(t, "n1-standard-1", compute.MachineType) + assert.Equal(t, "europe-west1-b", compute.Zone) + require.NotNil(t, compute.BootDisk, 1, "Expected boot disk") + assert.Equal(t, "centos-cloud/centos-7", compute.BootDisk.InitializeParams.Image, "Unexpected boot disk image") + + require.Len(t, infrastructure.Resource["google_compute_attached_disk"], 1, "Expected one attached disk") + instancesMap = infrastructure.Resource["google_compute_attached_disk"].(map[string]interface{}) + require.Len(t, instancesMap, 1) + + require.Contains(t, instancesMap, "google-bs1-0-to-compute-0") + attachedDisk, ok := instancesMap["google-bs1-0-to-compute-0"].(*ComputeAttachedDisk) + require.True(t, ok, "google-bs1-0-to-compute-0 is not a ComputeAttachedDisk") + assert.Equal(t, "my_vol_id", attachedDisk.Disk) + assert.Equal(t, "${google_compute_instance.compute-0.name}", attachedDisk.Instance) + assert.Equal(t, "europe-west1-b", attachedDisk.Zone) + assert.Equal(t, "bs1-0-to-compute-0", attachedDisk.DeviceName) + assert.Equal(t, "READ_ONLY", attachedDisk.Mode) + + require.Contains(t, infrastructure.Resource, "null_resource") + require.Len(t, infrastructure.Resource["null_resource"], 4) + + require.Len(t, outputs, 3, "three outputs are expected") + require.Contains(t, outputs, path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/instances/", "BS1", "0", "attributes/device"), "expected instances attribute output") + require.Contains(t, outputs, path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/relationship_instances/", "Compute", "0", "0", "attributes/device"), "expected relationship attribute output for Compute") + require.Contains(t, outputs, path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/relationship_instances/", "BS1", "0", "0", "attributes/device"), "expected relationship attribute output for Block storage") + require.Equal(t, "file:google-bs1-0-to-compute-0", outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/instances/", "BS1", "0", "attributes/device")], "output file value expected") + require.Equal(t, "file:google-bs1-0-to-compute-0", outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/relationship_instances/", "Compute", "0", "0", "attributes/device")], "output file value expected") + require.Equal(t, "file:google-bs1-0-to-compute-0", outputs[path.Join(consulutil.DeploymentKVPrefix, deploymentID+"/topology/relationship_instances/", "BS1", "0", "0", "attributes/device")], "output file value expected") +} diff --git a/prov/terraform/google/consul_test.go b/prov/terraform/google/consul_test.go index 7102177dd..4d5187c5c 100644 --- a/prov/terraform/google/consul_test.go +++ b/prov/terraform/google/consul_test.go @@ -48,5 +48,11 @@ func TestRunConsulGooglePackageTests(t *testing.T) { t.Run("simpleComputeInstanceWithAddress", func(t *testing.T) { testSimpleComputeInstanceWithAddress(t, kv, srv, cfg) }) + t.Run("simplePersistentDisk", func(t *testing.T) { + testSimplePersistentDisk(t, kv, cfg) + }) + t.Run("simpleComputeInstanceWithPersistentDisk", func(t *testing.T) { + testSimpleComputeInstanceWithPersistentDisk(t, kv, srv, cfg) + }) }) } diff --git a/prov/terraform/google/generator.go b/prov/terraform/google/generator.go index 14949a7f1..7cbc1d63f 100644 --- a/prov/terraform/google/generator.go +++ b/prov/terraform/google/generator.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "os" "path" "path/filepath" "strings" @@ -38,7 +37,7 @@ const infrastructureName = "google" type googleGenerator struct { } -func (g *googleGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName string) (bool, map[string]string, []string, error) { +func (g *googleGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, map[string]string, []string, error) { log.Debugf("Generating infrastructure for deployment with id %s", deploymentID) cClient, err := cfg.GetConsulClient() if err != nil { @@ -148,6 +147,11 @@ func (g *googleGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg if err != nil { return false, nil, nil, err } + case "yorc.nodes.google.PersistentDisk": + err = g.generatePersistentDisk(ctx, kv, cfg, deploymentID, nodeName, instanceName, instNb, &infrastructure, outputs) + if err != nil { + return false, nil, nil, err + } default: return false, nil, nil, errors.Errorf("Unsupported node type '%s' for node '%s' in deployment '%s'", nodeType, nodeName, deploymentID) } @@ -158,13 +162,9 @@ func (g *googleGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg if err != nil { return false, nil, nil, errors.Wrap(err, "Failed to generate JSON of terraform Infrastructure description") } - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", fmt.Sprint(deploymentID), "infra", nodeName) - if err = os.MkdirAll(infraPath, 0775); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to create infrastructure working directory %q", infraPath) - } - if err = ioutil.WriteFile(filepath.Join(infraPath, "infra.tf.json"), jsonInfra, 0664); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infraPath, "infra.tf.json")) + if err = ioutil.WriteFile(filepath.Join(infrastructurePath, "infra.tf.json"), jsonInfra, 0664); err != nil { + return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infrastructurePath, "infra.tf.json")) } log.Debugf("Infrastructure generated for deployment with id %s", deploymentID) diff --git a/prov/terraform/google/init.go b/prov/terraform/google/init.go index 8fe2fb756..fd7d46eea 100644 --- a/prov/terraform/google/init.go +++ b/prov/terraform/google/init.go @@ -14,14 +14,17 @@ package google -import "github.com/ystia/yorc/registry" -import "github.com/ystia/yorc/prov/terraform" +import ( + "github.com/ystia/yorc/prov/terraform" + "github.com/ystia/yorc/prov/terraform/commons" + "github.com/ystia/yorc/registry" +) const googleDeploymentArtifact = "yorc.artifacts.google.Deployment" func init() { reg := registry.GetRegistry() - reg.RegisterDelegates([]string{`yorc\.nodes\.google\..*`}, terraform.NewExecutor(&googleGenerator{}, nil), registry.BuiltinOrigin) + reg.RegisterDelegates([]string{`yorc\.nodes\.google\..*`}, terraform.NewExecutor(&googleGenerator{}, commons.PreDestroyStorageInfraCallback), registry.BuiltinOrigin) reg.RegisterOperationExecutor( []string{googleDeploymentArtifact}, &defaultExecutor{}, registry.BuiltinOrigin) } diff --git a/prov/terraform/google/persistent_disk.go b/prov/terraform/google/persistent_disk.go new file mode 100644 index 000000000..beb1dfa3e --- /dev/null +++ b/prov/terraform/google/persistent_disk.go @@ -0,0 +1,150 @@ +// Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package google + +import ( + "context" + "fmt" + "github.com/hashicorp/consul/api" + "github.com/pkg/errors" + "github.com/ystia/yorc/config" + "github.com/ystia/yorc/deployments" + "github.com/ystia/yorc/helper/consulutil" + "github.com/ystia/yorc/helper/sizeutil" + "github.com/ystia/yorc/log" + "github.com/ystia/yorc/prov/terraform/commons" + "path" + "strings" +) + +func (g *googleGenerator) generatePersistentDisk(ctx context.Context, kv *api.KV, + cfg config.Configuration, deploymentID, nodeName, instanceName string, instanceID int, + infrastructure *commons.Infrastructure, + outputs map[string]string) error { + + nodeType, err := deployments.GetNodeType(kv, deploymentID, nodeName) + if err != nil { + return err + } + if nodeType != "yorc.nodes.google.PersistentDisk" { + return errors.Errorf("Unsupported node type for %q: %s", nodeName, nodeType) + } + + instancesPrefix := path.Join(consulutil.DeploymentKVPrefix, deploymentID, + "topology", "instances") + instancesKey := path.Join(instancesPrefix, nodeName) + + persistentDisk := &PersistentDisk{} + var size, volumes string + stringParams := []struct { + pAttr *string + propertyName string + mandatory bool + }{ + {&volumes, "volume_id", false}, + {&persistentDisk.Description, "description", false}, + {&persistentDisk.SourceSnapshot, "snapshot_id", false}, + {&persistentDisk.Type, "type", false}, + {&persistentDisk.Zone, "zone", false}, + {&persistentDisk.SourceImage, "image_id", false}, + {&size, "size", false}, + } + + for _, stringParam := range stringParams { + if *stringParam.pAttr, err = deployments.GetStringNodeProperty(kv, deploymentID, nodeName, + stringParam.propertyName, stringParam.mandatory); err != nil { + return err + } + } + + var volumeID string + if volumes != "" { + tabVol := strings.Split(volumes, ",") + if len(tabVol) > instanceID { + volumeID = strings.TrimSpace(tabVol[instanceID]) + } + } + + persistentDisk.Labels, err = deployments.GetKeyValuePairsNodeProperty(kv, deploymentID, nodeName, "labels") + if err != nil { + return err + } + + if size != "" { + // Default size unit is MB + log.Debugf("Initial size property value (default is MB): %q", size) + persistentDisk.Size, err = sizeutil.ConvertToGB(size) + if err != nil { + return err + } + log.Debugf("Computed size (in GB): %d", persistentDisk.Size) + } + + // Get Encryption key if set + persistentDisk.DiskEncryptionKey, err = handleEncryptionKey(kv, deploymentID, nodeName, "disk_encryption_key") + if err != nil { + return err + } + // Get Source snapshot encryption key if source snapshot is filled + if persistentDisk.SourceSnapshot != "" { + persistentDisk.SourceSnapshotEncryptionKey, err = handleEncryptionKey(kv, deploymentID, nodeName, "snapshot_encryption_key") + if err != nil { + return err + } + } + // Get Source image encryption key if source image is filled + if persistentDisk.SourceImage != "" { + persistentDisk.SourceImageEncryptionKey, err = handleEncryptionKey(kv, deploymentID, nodeName, "image_encryption_key") + if err != nil { + return err + } + } + + name := strings.ToLower(cfg.ResourcesPrefix + nodeName + "-" + instanceName) + persistentDisk.Name = strings.Replace(name, "_", "-", -1) + + // Add google persistent disk resource if not any volume ID is provided + if volumeID == "" { + commons.AddResource(infrastructure, "google_compute_disk", persistentDisk.Name, persistentDisk) + volumeID = fmt.Sprintf("${google_compute_disk.%s.name}", persistentDisk.Name) + } + + // Provide Consul Key for attribute volume_id + consulKeys := commons.ConsulKeys{Keys: []commons.ConsulKey{}} + consulKeyVolumeID := commons.ConsulKey{ + Path: path.Join(instancesKey, instanceName, "/attributes/volume_id"), + Value: volumeID} + + consulKeys.Keys = append(consulKeys.Keys, consulKeyVolumeID) + commons.AddResource(infrastructure, "consul_keys", persistentDisk.Name, &consulKeys) + return nil +} + +func handleEncryptionKey(kv *api.KV, deploymentID, nodeName, prop string) (*EncryptionKey, error) { + val, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, prop, "raw_key") + if err != nil { + return nil, err + } + if val.RawString() != "" { + hashValue, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, prop, "sha256") + if err != nil { + return nil, err + } + return &EncryptionKey{ + Raw: val.RawString(), + SHA256: hashValue.RawString()}, nil + } + return nil, nil +} diff --git a/prov/terraform/google/persistent_disk_test.go b/prov/terraform/google/persistent_disk_test.go new file mode 100644 index 000000000..899013016 --- /dev/null +++ b/prov/terraform/google/persistent_disk_test.go @@ -0,0 +1,50 @@ +// Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package google + +import ( + "context" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/ystia/yorc/config" + "github.com/ystia/yorc/prov/terraform/commons" + "testing" +) + +func testSimplePersistentDisk(t *testing.T, kv *api.KV, cfg config.Configuration) { + t.Parallel() + deploymentID := loadTestYaml(t, kv) + infrastructure := commons.Infrastructure{} + g := googleGenerator{} + err := g.generatePersistentDisk(context.Background(), kv, cfg, deploymentID, "PersistentDisk", "0", 0, &infrastructure, make(map[string]string)) + require.NoError(t, err, "Unexpected error attempting to generate persistent disk for %s", deploymentID) + + require.Len(t, infrastructure.Resource["google_compute_disk"], 1, "Expected one persistent disk") + instancesMap := infrastructure.Resource["google_compute_disk"].(map[string]interface{}) + require.Len(t, instancesMap, 1) + require.Contains(t, instancesMap, "persistentdisk-0") + + persistentDisk, ok := instancesMap["persistentdisk-0"].(*PersistentDisk) + require.True(t, ok, "computeaddress-0 is not a PersistentDisk") + assert.Equal(t, "persistentdisk-0", persistentDisk.Name) + assert.Equal(t, "europe-west1-b", persistentDisk.Zone) + assert.Equal(t, 32, persistentDisk.Size) + assert.Equal(t, "pd-ssd", persistentDisk.Type) + assert.Equal(t, "1234", persistentDisk.DiskEncryptionKey.Raw) + assert.Equal(t, "5678", persistentDisk.DiskEncryptionKey.SHA256) + assert.Equal(t, "my description for persistent disk", persistentDisk.Description) + assert.Equal(t, map[string]string{"key1": "value1", "key2": "value2"}, persistentDisk.Labels) +} diff --git a/prov/terraform/google/resources.go b/prov/terraform/google/resources.go index c42afcc3b..add650018 100644 --- a/prov/terraform/google/resources.go +++ b/prov/terraform/google/resources.go @@ -30,6 +30,7 @@ type ComputeInstance struct { // ServiceAccounts is an array of at most one element ServiceAccounts []ServiceAccount `json:"service_account,omitempty"` Tags []string `json:"tags,omitempty"` + ScratchDisks []ScratchDisk `json:"scratch_disk,omitempty"` } // BootDisk represents the required boot disk for compute instance @@ -84,3 +85,40 @@ type ComputeAddress struct { Labels map[string]string `json:"labels,omitempty"` Project string `json:"project,omitempty"` } + +// EncryptionKey represents a Google encryption key +type EncryptionKey struct { + Raw string `json:"raw_key,omitempty"` + SHA256 string `json:"sha256,omitempty"` +} + +// ScratchDisk represents an additional Compute instance local scratch disk +type ScratchDisk struct { + Interface string `json:"interface,omitempty"` +} + +// PersistentDisk represents a Google persistent disk +// See https://www.terraform.io/docs/providers/google/r/compute_disk.html +type PersistentDisk struct { + Name string `json:"name"` + Size int `json:"size,omitempty"` + Description string `json:"description,omitempty"` + Type string `json:"type,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Zone string `json:"zone,omitempty"` + DiskEncryptionKey *EncryptionKey `json:"disk_encryption_key,omitempty"` + SourceSnapshot string `json:"snapshot,omitempty"` + SourceSnapshotEncryptionKey *EncryptionKey `json:"source_snapshot_encryption_key,omitempty"` + SourceImage string `json:"image,omitempty"` + SourceImageEncryptionKey *EncryptionKey `json:"source_image_encryption_key,omitempty"` +} + +// ComputeAttachedDisk represents compute instance's attached disk +// See https://www.terraform.io/docs/providers/google/r/compute_attached_disk.html +type ComputeAttachedDisk struct { + Instance string `json:"instance"` + Disk string `json:"disk"` + DeviceName string `json:"device_name,omitempty"` + Mode string `json:"mode,omitempty"` + Zone string `json:"zone,omitempty"` +} diff --git a/prov/terraform/google/testdata/simpleComputeInstance.yaml b/prov/terraform/google/testdata/simpleComputeInstance.yaml index 387fcf289..557530126 100644 --- a/prov/terraform/google/testdata/simpleComputeInstance.yaml +++ b/prov/terraform/google/testdata/simpleComputeInstance.yaml @@ -22,6 +22,9 @@ topology_template: service_account: "yorc@yorc.net" tags: "tag1, tag2" labels: "key1=value1, key2=value2" + scratch_disks: + - interface: SCSI + - interface: NVME capabilities: scalable: properties: diff --git a/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml b/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml new file mode 100644 index 000000000..335b6a2a5 --- /dev/null +++ b/prov/terraform/google/testdata/simpleComputeInstanceWithPersistentDisk.yaml @@ -0,0 +1,51 @@ +tosca_definitions_version: alien_dsl_2_0_0 + +metadata: + template_name: PersistentDiskTest + template_version: 1.0 + template_author: tester + +description: "" + +imports: + - + - + - + +topology_template: + node_templates: + Compute: + metadata: + type: yorc.nodes.google.Compute + properties: + image_project: "centos-cloud" + image_family: "centos-7" + machine_type: "n1-standard-1" + zone: "europe-west1-b" + requirements: + - local_storage: + node: BS1 + capability: tosca.capabilities.Attachment + relationship: + type: tosca.relationships.AttachesTo + properties: + mode: "READ_ONLY" + capabilities: + endpoint: + properties: + secure: true + protocol: tcp + network_name: PRIVATE + initiator: source + credentials: {user: centos} + scalable: + properties: + min_instances: 1 + max_instances: 1 + default_instances: 1 + BS1: + metadata: + type: yorc.nodes.google.PersistentDisk + properties: + zone: "europe-west1-b" + size: "12 GB" diff --git a/prov/terraform/google/testdata/simplePersistentDisk.yaml b/prov/terraform/google/testdata/simplePersistentDisk.yaml new file mode 100644 index 000000000..31267f129 --- /dev/null +++ b/prov/terraform/google/testdata/simplePersistentDisk.yaml @@ -0,0 +1,35 @@ +tosca_definitions_version: alien_dsl_2_0_0 + +metadata: + template_name: PersistentDiskTest + template_version: 1.0 + template_author: tester + +description: "" + +imports: + - + - + - + +topology_template: + node_templates: + PersistentDisk: + type: yorc.nodes.google.PersistentDisk + properties: + zone: "europe-west1-b" + size: "12 GB" + type: "pd-ssd" + description: "my description for persistent disk" + snapshot_id: "projects/project/global/snapshots/snapshot" + size: "32 GB" + labels: "key1=value1, key2=value2" + disk_encryption_key: + raw_key: 1234 + sha256: 5678 + requirements: + - attachToComputeAttach: + type_requirement: attachment + node: Comp + capability: tosca.capabilities.Attachment + relationship: tosca.relationships.AttachTo diff --git a/prov/terraform/openstack/bs_volume.go b/prov/terraform/openstack/bs_volume.go index 5192803b6..eb97c75a5 100644 --- a/prov/terraform/openstack/bs_volume.go +++ b/prov/terraform/openstack/bs_volume.go @@ -15,14 +15,11 @@ package openstack import ( - "strconv" - - "github.com/dustin/go-humanize" "github.com/hashicorp/consul/api" "github.com/pkg/errors" "github.com/ystia/yorc/config" - "github.com/ystia/yorc/helper/mathutil" + "github.com/ystia/yorc/helper/sizeutil" "github.com/ystia/yorc/log" ) @@ -50,28 +47,11 @@ func (g *osGenerator) generateOSBSVolume(kv *api.KV, cfg config.Configuration, u } // Default size unit is MB log.Debugf("Size form consul is %q", size) - mSize, err := strconv.Atoi(size) + volume.Size, err = sizeutil.ConvertToGB(size) if err != nil { - var bsize uint64 - bsize, err = humanize.ParseBytes(size) - if err != nil { - return volume, errors.Errorf("Can't convert size to bytes value: %v", err) - } - // OpenStack needs the size in GB so we round it up. - gSize := float64(bsize) / humanize.GByte - log.Debugf("Computed size in GB: %f", gSize) - gSize = mathutil.Round(gSize, 0, 0) - log.Debugf("Computed size rounded in GB: %d", int(gSize)) - volume.Size = int(gSize) - } else { - log.Debugf("Size in MB: %d", mSize) - // OpenStack needs the size in GB so we round it up. - gSize := float64(mSize) / 1000 - log.Debugf("Computed size in GB: %f", gSize) - gSize = mathutil.Round(gSize, 0, 0) - log.Debugf("Computed size rounded in GB: %d", int(gSize)) - volume.Size = int(gSize) + return volume, err } + log.Debugf("Computed size rounded in GB: %d", volume.Size) region, err := g.getStringFormConsul(kv, url, "properties/region") if err != nil { diff --git a/prov/terraform/openstack/generator.go b/prov/terraform/openstack/generator.go index 6525cfb3a..3c9cf2c97 100644 --- a/prov/terraform/openstack/generator.go +++ b/prov/terraform/openstack/generator.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "os" "path" "path/filepath" "strconv" @@ -50,7 +49,7 @@ func (g *osGenerator) getStringFormConsul(kv *api.KV, baseURL, property string) return string(getResult.Value), nil } -func (g *osGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName string) (bool, map[string]string, []string, error) { +func (g *osGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg config.Configuration, deploymentID, nodeName, infrastructurePath string) (bool, map[string]string, []string, error) { log.Debugf("Generating infrastructure for deployment with id %s", deploymentID) cClient, err := cfg.GetConsulClient() if err != nil { @@ -273,13 +272,9 @@ func (g *osGenerator) GenerateTerraformInfraForNode(ctx context.Context, cfg con if err != nil { return false, nil, nil, errors.Wrap(err, "Failed to generate JSON of terraform Infrastructure description") } - infraPath := filepath.Join(cfg.WorkingDirectory, "deployments", deploymentID, "infra", nodeName) - if err = os.MkdirAll(infraPath, 0775); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to create infrastructure working directory %q", infraPath) - } - if err = ioutil.WriteFile(filepath.Join(infraPath, "infra.tf.json"), jsonInfra, 0664); err != nil { - return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infraPath, "infra.tf.json")) + if err = ioutil.WriteFile(filepath.Join(infrastructurePath, "infra.tf.json"), jsonInfra, 0664); err != nil { + return false, nil, nil, errors.Wrapf(err, "Failed to write file %q", filepath.Join(infrastructurePath, "infra.tf.json")) } log.Debugf("Infrastructure generated for deployment with id %s", deploymentID) diff --git a/prov/terraform/openstack/init.go b/prov/terraform/openstack/init.go index 3f1780c48..4f976ca1e 100644 --- a/prov/terraform/openstack/init.go +++ b/prov/terraform/openstack/init.go @@ -15,43 +15,12 @@ package openstack import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/consul/api" - "github.com/ystia/yorc/config" - "github.com/ystia/yorc/deployments" - "github.com/ystia/yorc/events" - "github.com/ystia/yorc/log" "github.com/ystia/yorc/prov/terraform" + "github.com/ystia/yorc/prov/terraform/commons" "github.com/ystia/yorc/registry" ) func init() { reg := registry.GetRegistry() - reg.RegisterDelegates([]string{`yorc\.nodes\.openstack\..*`}, terraform.NewExecutor(&osGenerator{}, preDestroyInfraCallback), registry.BuiltinOrigin) -} - -func preDestroyInfraCallback(ctx context.Context, kv *api.KV, cfg config.Configuration, deploymentID, nodeName string) (bool, error) { - nodeType, err := deployments.GetNodeType(kv, deploymentID, nodeName) - if err != nil { - return false, err - } - // TODO consider making this generic: references to OpenStack should not be found here. - if nodeType == "yorc.nodes.openstack.BlockStorage" { - - deletable, err := deployments.GetNodePropertyValue(kv, deploymentID, nodeName, "deletable") - if err != nil { - return false, err - } - if deletable == nil || strings.ToLower(deletable.RawString()) != "true" { - // False by default - msg := fmt.Sprintf("Node %q is a BlockStorage without the property 'deletable' do not destroy it...", nodeName) - log.Debug(msg) - events.WithContextOptionalFields(ctx).NewLogEntry(events.LogLevelINFO, deploymentID).RegisterAsString(msg) - return false, nil - } - } - return true, nil + reg.RegisterDelegates([]string{`yorc\.nodes\.openstack\..*`}, terraform.NewExecutor(&osGenerator{}, commons.PreDestroyStorageInfraCallback), registry.BuiltinOrigin) } diff --git a/tasks/tasks.go b/tasks/tasks.go index f57dbffd7..a5f3cb3ac 100644 --- a/tasks/tasks.go +++ b/tasks/tasks.go @@ -216,32 +216,34 @@ func DeleteTask(kv *api.KV, taskID string) error { } // TargetHasLivingTasks checks if a targetID has associated tasks in status INITIAL or RUNNING and returns the id and status of the first one found +// +// Only Deploy, UnDeploy, ScaleOut, ScaleIn and Purge task type are considered. func TargetHasLivingTasks(kv *api.KV, targetID string) (bool, string, string, error) { tasksKeys, _, err := kv.Keys(consulutil.TasksPrefix+"/", "/", nil) if err != nil { return false, "", "", errors.Wrap(err, consulutil.ConsulGenericErrMsg) } for _, taskKey := range tasksKeys { - kvp, _, err := kv.Get(path.Join(taskKey, "targetId"), nil) + taskID := path.Base(taskKey) + ttID, err := GetTaskTarget(kv, taskID) if err != nil { - return false, "", "", errors.Wrap(err, consulutil.ConsulGenericErrMsg) + return false, "", "", err } - if kvp != nil && len(kvp.Value) > 0 && string(kvp.Value) == targetID { - kvp, _, err := kv.Get(path.Join(taskKey, "status"), nil) - taskID := path.Base(taskKey) + if ttID == targetID { + tStatus, err := GetTaskStatus(kv, taskID) if err != nil { - return false, "", "", errors.Wrap(err, consulutil.ConsulGenericErrMsg) + return false, "", "", err } - if kvp == nil || len(kvp.Value) == 0 { - return false, "", "", errors.Errorf("Missing status for task with id %q", taskID) - } - statusInt, err := strconv.Atoi(string(kvp.Value)) + tType, err := GetTaskType(kv, taskID) if err != nil { - return false, "", "", errors.Wrap(err, "Invalid task status") + return false, "", "", err } - switch TaskStatus(statusInt) { - case TaskStatusINITIAL, TaskStatusRUNNING: - return true, taskID, TaskStatus(statusInt).String(), nil + + switch tType { + case TaskTypeDeploy, TaskTypeUnDeploy, TaskTypePurge, TaskTypeScaleIn, TaskTypeScaleOut: + if tStatus == TaskStatusINITIAL || tStatus == TaskStatusRUNNING { + return true, taskID, tStatus.String(), nil + } } } } diff --git a/tasks/tasks_test.go b/tasks/tasks_test.go index 411dd0149..558309a7e 100644 --- a/tasks/tasks_test.go +++ b/tasks/tasks_test.go @@ -15,17 +15,16 @@ package tasks import ( + "encoding/json" + "fmt" + "path" "reflect" "testing" - "github.com/ystia/yorc/helper/consulutil" - - "path" - - "encoding/json" - "fmt" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testutil" + + "github.com/ystia/yorc/helper/consulutil" ) func populateKV(t *testing.T, srv *testutil.TestServer) { @@ -80,7 +79,8 @@ func populateKV(t *testing.T, srv *testutil.TestServer) { consulutil.TasksPrefix + "/t13/type": []byte("5"), consulutil.TasksPrefix + "/t13/status": []byte("3"), consulutil.TasksPrefix + "/t14/status": []byte("3"), - consulutil.TasksPrefix + "/t14/type": []byte("5"), + consulutil.TasksPrefix + "/t14/type": []byte("6"), + consulutil.TasksPrefix + "/t14/targetId": []byte("id"), consulutil.TasksPrefix + "/t15/targetId": []byte("xxx"), consulutil.TasksPrefix + "/t15/status": []byte("2"),