From 7081b2fc6091ebf65e912aca47a6d21680170eb6 Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Wed, 11 Jan 2023 12:44:29 +0530 Subject: [PATCH] Feat/m era ha (#518) --- client/era/era_service.go | 2 +- client/era/era_structs.go | 120 +++++---- examples/ndb/database_instance/main.tf | 170 ++++++++++++ nutanix/common_era_schema.go | 241 +++++++++++++++--- nutanix/data_source_nutanix_ndb_database.go | 33 ++- nutanix/resource_nutanix_nbd_database_test.go | 203 ++++++++++++++- nutanix/resource_nutanix_ndb_database.go | 219 +++++++++++++--- 7 files changed, 860 insertions(+), 128 deletions(-) diff --git a/client/era/era_service.go b/client/era/era_service.go index 2d79cbd40..f71356ff9 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -254,7 +254,7 @@ func (sc ServiceClient) GetOperation(req GetOperationRequest) (*GetOperationResp } func (sc ServiceClient) GetDatabaseInstance(ctx context.Context, dbInstanceID string) (*GetDatabaseResponse, error) { - httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/databases/%s?detailed=true&load-dbserver-cluster=true", dbInstanceID), nil) + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/databases/%s?detailed=false&load-dbserver-cluster=false", dbInstanceID), nil) if err != nil { return nil, err } diff --git a/client/era/era_structs.go b/client/era/era_structs.go index 5fe5ccc3b..3c3a845f6 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -232,35 +232,44 @@ type Dailyschedule struct { } type Schedule struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - UniqueName string `json:"uniqueName"` - OwnerID string `json:"ownerId"` - SystemPolicy bool `json:"systemPolicy"` - GlobalPolicy bool `json:"globalPolicy"` - Datecreated string `json:"dateCreated"` - Datemodified string `json:"dateModified"` - Snapshottimeofday *Snapshottimeofday `json:"snapshotTimeOfDay"` - Continuousschedule *Continuousschedule `json:"continuousSchedule"` - Weeklyschedule *Weeklyschedule `json:"weeklySchedule"` - Dailyschedule *Dailyschedule `json:"dailySchedule"` - Monthlyschedule *Monthlyschedule `json:"monthlySchedule"` - Quartelyschedule *Quartelyschedule `json:"quartelySchedule"` - Yearlyschedule *Yearlyschedule `json:"yearlySchedule"` - ReferenceCount int `json:"referenceCount"` - StartTime string `json:"startTime"` - TimeZone string `json:"timeZone"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + UniqueName *string `json:"uniqueName,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + StartTime *string `json:"startTime,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + Datecreated *string `json:"dateCreated,omitempty"` + Datemodified *string `json:"dateModified,omitempty"` + ReferenceCount *int `json:"referenceCount,omitempty"` + SystemPolicy bool `json:"systemPolicy,omitempty"` + GlobalPolicy bool `json:"globalPolicy,omitempty"` + Snapshottimeofday *Snapshottimeofday `json:"snapshotTimeOfDay,omitempty"` + Continuousschedule *Continuousschedule `json:"continuousSchedule,omitempty"` + Weeklyschedule *Weeklyschedule `json:"weeklySchedule,omitempty"` + Dailyschedule *Dailyschedule `json:"dailySchedule,omitempty"` + Monthlyschedule *Monthlyschedule `json:"monthlySchedule,omitempty"` + Quartelyschedule *Quartelyschedule `json:"quartelySchedule,omitempty"` + Yearlyschedule *Yearlyschedule `json:"yearlySchedule,omitempty"` +} + +type PrimarySLA struct { + SLAID *string `json:"slaId,omitempty"` + NxClusterIds []*string `json:"nxClusterIds,omitempty"` +} + +type SLADetails struct { + PrimarySLA *PrimarySLA `json:"primarySla,omitempty"` } type Timemachineinfo struct { - Name string `json:"name"` - Description string `json:"description"` - Slaid string `json:"slaId"` - Schedule Schedule `json:"schedule"` - Tags []*Tags `json:"tags,omitempty"` - - Autotunelogdrive bool `json:"autoTuneLogDrive"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Slaid string `json:"slaId,omitempty"` + Schedule Schedule `json:"schedule,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Autotunelogdrive bool `json:"autoTuneLogDrive,omitempty"` + SLADetails *SLADetails `json:"slaDetails,omitempty"` } type Actionarguments struct { @@ -268,11 +277,24 @@ type Actionarguments struct { Value interface{} `json:"value"` } +type NodesProperties struct { + Name string `json:"name"` + Value interface{} `json:"value"` +} + +type IPInfos struct { + IPType *string `json:"ipType,omitempty"` + IPAddresses []*string `json:"ipAddresses,omitempty"` +} + type Nodes struct { - Properties []interface{} `json:"properties"` - Vmname string `json:"vmName,omitempty"` - Networkprofileid string `json:"networkProfileId,omitempty"` - DatabaseServerID string `json:"dbserverId,omitempty"` + Properties []*NodesProperties `json:"properties"` + Vmname *string `json:"vmName,omitempty"` + Networkprofileid *string `json:"networkProfileId,omitempty"` + DatabaseServerID *string `json:"dbserverId,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + ComputeProfileID *string `json:"computeProfileId,omitempty"` + IPInfos []*IPInfos `json:"ipInfos,omitempty"` } // ProvisionDatabaseResponse structs @@ -831,25 +853,25 @@ type Protectiondomain struct { AssocEntities []string `json:"assocEntities,omitempty"` } type Databasenodes struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Ownerid string `json:"ownerId"` - Datecreated string `json:"dateCreated"` - Datemodified string `json:"dateModified"` - AccessLevel interface{} `json:"accessLevel,omitempty"` - Properties []interface{} `json:"properties"` - Tags []*Tags `json:"tags"` - Databaseid string `json:"databaseId"` - Status string `json:"status"` - Databasestatus string `json:"databaseStatus"` - Primary bool `json:"primary"` - Dbserverid string `json:"dbserverId"` - Softwareinstallationid string `json:"softwareInstallationId"` - Protectiondomainid string `json:"protectionDomainId"` - Info Info `json:"info"` - Metadata interface{} `json:"metadata"` - Protectiondomain *Protectiondomain `json:"protectionDomain"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Ownerid string `json:"ownerId"` + Datecreated string `json:"dateCreated"` + Datemodified string `json:"dateModified"` + AccessLevel interface{} `json:"accessLevel,omitempty"` + Properties []*DBInstanceProperties `json:"properties"` + Tags []*Tags `json:"tags"` + Databaseid string `json:"databaseId"` + Status string `json:"status"` + Databasestatus string `json:"databaseStatus"` + Primary bool `json:"primary"` + Dbserverid string `json:"dbserverId"` + Softwareinstallationid string `json:"softwareInstallationId"` + Protectiondomainid string `json:"protectionDomainId"` + Info Info `json:"info"` + Metadata interface{} `json:"metadata"` + Protectiondomain *Protectiondomain `json:"protectionDomain"` // Valideastate bool `json:"validEaState"` } diff --git a/examples/ndb/database_instance/main.tf b/examples/ndb/database_instance/main.tf index ec09f1da0..826a99923 100644 --- a/examples/ndb/database_instance/main.tf +++ b/examples/ndb/database_instance/main.tf @@ -98,3 +98,173 @@ resource "nutanix_ndb_database" "dbp" { } } } + + +## provision HA instance + +resource "nutanix_ndb_database" "dbp" { + // database type + databasetype = "postgres_database" + + // database name & descriptio + name = "test-pg-inst-HA-tf" + description = "adding description" + + // adding the profiles details + softwareprofileid = "{{ software_profile_id }}" + softwareprofileversionid = "{{ software_profile_version_id }}" + computeprofileid = "{{ compute_profile_id }}" + networkprofileid = "{{ network_profile_id }}" + dbparameterprofileid = "{{ db_parameter_profile_id }}" + + // required for HA instance + createdbserver = true + clustered = true + + // node count (with haproxy server node) + nodecount= 4 + + // min required details for provisioning HA instance + postgresql_info{ + listener_port = "5432" + + database_size= "200" + + db_password = "{{ database password}}" + + database_names= "testdb1" + + ha_instance{ + proxy_read_port= "5001" + + proxy_write_port = "5000" + + cluster_name= "{{ cluster_name }}" + + patroni_cluster_name = " {{ patroni_cluster_name }}" + } + } + + nxclusterid= "1c42ca25-32f4-42d9-a2bd-6a21f925b725" + sshpublickey= "{{ ssh_public_key }}" + + // nodes are required. + + // HA proxy node + nodes{ + properties{ + name = "node_type" + value = "haproxy" + } + vmname = "{{ vm name }}" + nx_cluster_id = "{{ nx_cluster_id }}" + } + + // Primary node for read/write ops + nodes{ + properties{ + name= "role" + value= "Primary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + + vmname = "{{ name of vm }}" + networkprofileid="{{ network_profile_id }}" + computeprofileid= "{{ compute_profile_id }}" + nx_cluster_id= "{{ nx_cluster_id }}" + } + + // secondary nodes for read ops + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + vmname = "{{ name of vm }}" + networkprofileid="{{ network_profile_id }}" + computeprofileid= "{{ compute_profile_id }}" + nx_cluster_id= "{{ nx_cluster_id }}" + } + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + + vmname = "{{ name of vm }}" + networkprofileid="{{ network_profile_id }}" + computeprofileid= "{{ compute_profile_id }}" + nx_cluster_id= "{{ nx_cluster_id }}" + } + + // time machine required + timemachineinfo { + name= "test-pg-inst-HA" + description="" + sla_details{ + primary_sla{ + sla_id= "{{ required SLA}}0" + nx_cluster_ids= [ + "{{ nx_cluster_id}}" + ] + } + } + // schedule fields are optional + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + + vm_password= "{{ vm_password}}" + autotunestagingdrive= true +} \ No newline at end of file diff --git a/nutanix/common_era_schema.go b/nutanix/common_era_schema.go index 538fe0786..5e7b1ca60 100644 --- a/nutanix/common_era_schema.go +++ b/nutanix/common_era_schema.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" ) type dbID string @@ -46,9 +47,37 @@ func timeMachineInfoSchema() *schema.Schema { }, "slaid": { Type: schema.TypeString, - Required: true, + Optional: true, Description: "description of SLA ID.", }, + "sla_details": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "primary_sla": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sla_id": { + Type: schema.TypeString, + Required: true, + Description: "description of SLA ID.", + }, + "nx_cluster_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, "autotunelogdrive": { Type: schema.TypeBool, Optional: true, @@ -308,14 +337,40 @@ func buildTimeMachineSchedule(set *schema.Set) *era.Schedule { func buildTimeMachineFromResourceData(set *schema.Set) *era.Timemachineinfo { d := set.List() tMap := d[0].(map[string]interface{}) - return &era.Timemachineinfo{ - Name: tMap["name"].(string), - Description: tMap["description"].(string), - Slaid: tMap["slaid"].(string), - Schedule: *buildTimeMachineSchedule(tMap["schedule"].(*schema.Set)), // NULL Pointer check - Tags: expandTags(tMap["tags"].([]interface{})), - Autotunelogdrive: tMap["autotunelogdrive"].(bool), + + out := &era.Timemachineinfo{} + + if tMap != nil { + if name, ok := tMap["name"]; ok && len(name.(string)) > 0 { + out.Name = name.(string) + } + + if des, ok := tMap["description"]; ok && len(des.(string)) > 0 { + out.Description = des.(string) + } + + if slaid, ok := tMap["slaid"]; ok && len(slaid.(string)) > 0 { + out.Slaid = slaid.(string) + } + + if schedule, ok := tMap["schedule"]; ok && len(schedule.(*schema.Set).List()) > 0 { + out.Schedule = *buildTimeMachineSchedule(schedule.(*schema.Set)) + } + + if tags, ok := tMap["tags"]; ok && len(tags.([]interface{})) > 0 { + out.Tags = expandTags(tags.([]interface{})) + } + + if autotunelogdrive, ok := tMap["autotunelogdrive"]; ok && autotunelogdrive.(bool) { + out.Autotunelogdrive = autotunelogdrive.(bool) + } + + if slaDetails, ok := tMap["sla_details"]; ok && len(slaDetails.([]interface{})) > 0 { + out.SLADetails = buildSLADetails(slaDetails.([]interface{})) + } + return out } + return nil } func nodesSchema() *schema.Schema { @@ -328,9 +383,8 @@ func nodesSchema() *schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "properties": { - Type: schema.TypeSet, - Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, + Type: schema.TypeSet, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -345,21 +399,44 @@ func nodesSchema() *schema.Schema { }, }, "vmname": { - Type: schema.TypeString, - Required: true, - ConfigMode: schema.SchemaConfigModeAttr, + Type: schema.TypeString, + Required: true, }, "networkprofileid": { - Type: schema.TypeString, - Required: true, - ConfigMode: schema.SchemaConfigModeAttr, + Type: schema.TypeString, + Optional: true, + }, + "ip_infos": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_type": { + Type: schema.TypeString, + Optional: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "computeprofileid": { + Type: schema.TypeString, + Optional: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Optional: true, }, "dbserverid": { // When createDbServer is false, we can use this field to set the target db server. - Type: schema.TypeString, - Description: "", - Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, - Default: "", + Type: schema.TypeString, + Optional: true, + Default: "", }, }, }, @@ -368,17 +445,36 @@ func nodesSchema() *schema.Schema { func buildNodesFromResourceData(d *schema.Set) []*era.Nodes { argSet := d.List() - args := []*era.Nodes{} + nodes := []*era.Nodes{} for _, arg := range argSet { - args = append(args, &era.Nodes{ - Properties: arg.(map[string]interface{})["properties"].(*schema.Set).List(), - Vmname: arg.(map[string]interface{})["vmname"].(string), - Networkprofileid: arg.(map[string]interface{})["networkprofileid"].(string), - DatabaseServerID: arg.(map[string]interface{})["dbserverid"].(string), - }) + val := arg.(map[string]interface{}) + node := &era.Nodes{} + + if prop, ok := val["properties"]; ok { + node.Properties = expandNodesProperties(prop.(*schema.Set)) + } + if vmName, ok := val["vmname"]; ok && len(vmName.(string)) > 0 { + node.Vmname = utils.StringPtr(vmName.(string)) + } + if networkProfile, ok := val["networkprofileid"]; ok && len(networkProfile.(string)) > 0 { + node.Networkprofileid = utils.StringPtr(networkProfile.(string)) + } + if dbServer, ok := val["dbserverid"]; ok && len(dbServer.(string)) > 0 { + node.DatabaseServerID = utils.StringPtr(dbServer.(string)) + } + if nxCls, ok := val["nx_cluster_id"]; ok && len(nxCls.(string)) > 0 { + node.NxClusterID = utils.StringPtr(nxCls.(string)) + } + if computeProfile, ok := val["computeprofileid"]; ok && len(computeProfile.(string)) > 0 { + node.ComputeProfileID = utils.StringPtr(computeProfile.(string)) + } + if infos, ok := val["ip_infos"]; ok && len(infos.([]interface{})) > 0 { + node.IPInfos = expandIPInfos(infos.([]interface{})) + } + nodes = append(nodes, node) } - return args + return nodes } func actionArgumentsSchema() *schema.Schema { @@ -429,3 +525,88 @@ func buildActionArgumentsFromResourceData(d *schema.Set, args []*era.Actionargum } return args } + +func buildSLADetails(pr []interface{}) *era.SLADetails { + if len(pr) > 0 { + res := &era.SLADetails{} + + for _, v := range pr { + val := v.(map[string]interface{}) + + if priSLA, pok := val["primary_sla"]; pok { + res.PrimarySLA = expandPrimarySLA(priSLA.([]interface{})) + } + } + return res + } + return nil +} + +func expandPrimarySLA(pr []interface{}) *era.PrimarySLA { + if len(pr) > 0 { + out := &era.PrimarySLA{} + + for _, v := range pr { + val := v.(map[string]interface{}) + + if slaid, ok := val["sla_id"]; ok { + out.SLAID = utils.StringPtr(slaid.(string)) + } + + if nxcls, ok := val["nx_cluster_ids"]; ok { + res := make([]*string, 0) + nxclster := nxcls.([]interface{}) + + for _, v := range nxclster { + res = append(res, utils.StringPtr(v.(string))) + } + out.NxClusterIds = res + } + } + return out + } + return nil +} + +func expandNodesProperties(pr *schema.Set) []*era.NodesProperties { + argSet := pr.List() + + out := make([]*era.NodesProperties, 0) + for _, arg := range argSet { + var val interface{} + val = arg.(map[string]interface{})["value"] + b, ok := tryToConvertBool(arg.(map[string]interface{})["value"]) + if ok { + val = b + } + + out = append(out, &era.NodesProperties{ + Name: arg.(map[string]interface{})["name"].(string), + Value: val, + }) + } + return out +} + +func expandIPInfos(pr []interface{}) []*era.IPInfos { + if len(pr) > 0 { + IPInfos := make([]*era.IPInfos, 0) + + for _, v := range pr { + val := v.(map[string]interface{}) + IPInfo := &era.IPInfos{} + + if ipType, ok := val["ip_type"]; ok { + IPInfo.IPType = utils.StringPtr(ipType.(string)) + } + + if addr, ok := val["ip_addresses"]; ok { + IPInfo.IPAddresses = utils.StringSlice(addr.([]string)) + } + + IPInfos = append(IPInfos, IPInfo) + } + return IPInfos + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_database.go b/nutanix/data_source_nutanix_ndb_database.go index be5e84d4f..398eb2c4d 100644 --- a/nutanix/data_source_nutanix_ndb_database.go +++ b/nutanix/data_source_nutanix_ndb_database.go @@ -287,8 +287,10 @@ func dataSourceNutanixEraDatabaseRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { - return diag.FromErr(err) + if resp.Dbserverlogicalcluster != nil { + if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { + return diag.FromErr(err) + } } if err := d.Set("database_nodes", flattenDBNodes(resp.Databasenodes)); err != nil { @@ -381,7 +383,7 @@ func flattenDBNodes(pr []Era.Databasenodes) []map[string]interface{} { db["name"] = v.Name db["owner_id"] = v.Ownerid db["primary"] = v.Primary - db["properties"] = v.Properties + db["properties"] = flattenDBInstanceProperties(v.Properties) db["protection_domain"] = flattenDBProtectionDomain(v.Protectiondomain) db["protection_domain_id"] = v.Protectiondomainid db["software_installation_id"] = v.Softwareinstallationid @@ -1760,8 +1762,29 @@ func dataSourceEraDatabaseNodes() *schema.Schema { "properties": { Type: schema.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "ref_id": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, }, }, "tags": dataSourceEraDBInstanceTags(), diff --git a/nutanix/resource_nutanix_nbd_database_test.go b/nutanix/resource_nutanix_nbd_database_test.go index 93fb86cd7..36d74a5ed 100644 --- a/nutanix/resource_nutanix_nbd_database_test.go +++ b/nutanix/resource_nutanix_nbd_database_test.go @@ -23,6 +23,32 @@ func TestAccEra_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceNameDB, "name", name), resource.TestCheckResourceAttr(resourceNameDB, "description", desc), + resource.TestCheckResourceAttr(resourceNameDB, "databasetype", "postgres_database"), + resource.TestCheckResourceAttr(resourceNameDB, "database_nodes.#", "1"), + resource.TestCheckResourceAttrSet(resourceNameDB, "time_machine_id"), + ), + }, + }, + }) +} + +func TestAccEraDatabaseProvisionHA(t *testing.T) { + name := "test-pg-inst-HA-tf" + desc := "this is desc" + sshKey := testVars.SSHKey + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseHAConfig(name, desc, sshKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameDB, "name", name), + resource.TestCheckResourceAttr(resourceNameDB, "description", desc), + resource.TestCheckResourceAttr(resourceNameDB, "databasetype", "postgres_database"), + resource.TestCheckResourceAttr(resourceNameDB, "database_nodes.#", "3"), + resource.TestCheckResourceAttr(resourceNameDB, "linked_databases.#", "4"), + resource.TestCheckResourceAttrSet(resourceNameDB, "time_machine_id"), ), }, }, @@ -86,7 +112,7 @@ func testAccEraDatabaseConfig(name, desc, vmName, sshKey string) string { networkprofileid= local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id } timemachineinfo { - name= "test-pg-inst" + name= "test-pg-inst-12" description="" slaid=local.slas["DEFAULT_OOB_BRONZE_SLA"].id schedule { @@ -123,3 +149,178 @@ func testAccEraDatabaseConfig(name, desc, vmName, sshKey string) string { } `, name, desc, vmName, sshKey) } + +func testAccEraDatabaseHAConfig(name, desc, sshKey string) string { + return fmt.Sprintf(` + data "nutanix_ndb_profiles" "p"{ + } + data "nutanix_ndb_slas" "slas"{} + data "nutanix_ndb_clusters" "clusters"{} + + locals { + profiles_by_type = { + for p in data.nutanix_ndb_profiles.p.profiles : p.type => p... + } + storage_profiles = { + for p in local.profiles_by_type.Storage: p.name => p + } + compute_profiles = { + for p in local.profiles_by_type.Compute: p.name => p + } + network_profiles = { + for p in local.profiles_by_type.Network: p.name => p + } + database_parameter_profiles = { + for p in local.profiles_by_type.Database_Parameter: p.name => p + } + software_profiles = { + for p in local.profiles_by_type.Software: p.name => p + } + slas = { + for p in data.nutanix_ndb_slas.slas.slas: p.name => p + } + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + + resource "nutanix_ndb_database" "acctest-managed" { + databasetype = "postgres_database" + name = "%[1]s" + description = "%[2]s" + softwareprofileid = local.software_profiles["POSTGRES_10.4_OOB"].id + softwareprofileversionid = local.software_profiles["POSTGRES_10.4_OOB"].latest_version_id + computeprofileid = local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + networkprofileid = local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + dbparameterprofileid = local.database_parameter_profiles.DEFAULT_POSTGRES_PARAMS.id + + createdbserver = true + nodecount= 4 + clustered = true + + postgresql_info{ + listener_port = "5432" + database_size= "200" + db_password = "password" + database_names= "testdb1" + ha_instance{ + proxy_read_port= "5001" + + proxy_write_port = "5000" + + cluster_name= "ha-cls" + + patroni_cluster_name = "ha-patroni-cluster" + } + } + nxclusterid= local.clusters.EraCluster.id + sshpublickey= "%[3]s" + nodes{ + properties{ + name = "node_type" + value = "haproxy" + } + vmname = "ha-cls_haproxy1" + nx_cluster_id = local.clusters.EraCluster.id + } + nodes{ + properties{ + name= "role" + value= "Primary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + vmname = "ha-cls-1" + networkprofileid=local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + computeprofileid= local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + nx_cluster_id= local.clusters.EraCluster.id + } + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + vmname = "ha-cls-2" + networkprofileid=local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + computeprofileid= local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + nx_cluster_id= local.clusters.EraCluster.id + } + + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + vmname = "ha-cls-3" + networkprofileid=local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + computeprofileid= local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + nx_cluster_id= local.clusters.EraCluster.id + } + timemachineinfo { + name= "test-pg-inst" + description="" + + sla_details{ + primary_sla{ + sla_id= local.slas["DEFAULT_OOB_BRONZE_SLA"].id + nx_cluster_ids= [ + local.clusters.EraCluster.id + ] + } + } + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + } + `, name, desc, sshKey) +} diff --git a/nutanix/resource_nutanix_ndb_database.go b/nutanix/resource_nutanix_ndb_database.go index a05eae41f..ad26da89e 100644 --- a/nutanix/resource_nutanix_ndb_database.go +++ b/nutanix/resource_nutanix_ndb_database.go @@ -17,7 +17,7 @@ import ( var ( eraDelay = 1 * time.Minute - EraProvisionTimeout = 35 * time.Minute + EraProvisionTimeout = 75 * time.Minute ) func resourceDatabaseInstance() *schema.Resource { @@ -216,6 +216,70 @@ func resourceDatabaseInstance() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "ha_instance": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + }, + "patroni_cluster_name": { + Type: schema.TypeString, + Required: true, + }, + "proxy_read_port": { + Type: schema.TypeString, + Required: true, + }, + "proxy_write_port": { + Type: schema.TypeString, + Required: true, + }, + "provision_virtual_ip": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "deploy_haproxy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "enable_synchronous_mode": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "failover_mode": { + Type: schema.TypeString, + Optional: true, + }, + "node_type": { + Type: schema.TypeString, + Optional: true, + Default: "database", + }, + "archive_wal_expire_days": { + Type: schema.TypeInt, + Optional: true, + Default: -1, + }, + "backup_policy": { + Type: schema.TypeString, + Optional: true, + Default: "primary_only", + }, + "enable_peer_auth": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, }, }, }, @@ -679,92 +743,163 @@ func deleteDatabaseInstance(ctx context.Context, d *schema.ResourceData, m inter func expandActionArguments(d *schema.ResourceData) []*era.Actionarguments { args := []*era.Actionarguments{} - if post, ok := d.GetOk("postgresql_info"); ok { + if post, ok := d.GetOk("postgresql_info"); ok && (len(post.([]interface{}))) > 0 { brr := post.([]interface{}) for _, arg := range brr { val := arg.(map[string]interface{}) - var values interface{} if plist, pok := val["listener_port"]; pok && len(plist.(string)) > 0 { - values = plist - args = append(args, &era.Actionarguments{ Name: "listener_port", - Value: values, + Value: plist, }) } - if plist, pok := val["database_size"]; pok && len(plist.(string)) > 0 { - values = plist - + if dbSize, pok := val["database_size"]; pok && len(dbSize.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "database_size", - Value: values, + Value: dbSize, }) } - if plist, pok := val["db_password"]; pok && len(plist.(string)) > 0 { - values = plist - + if dbPass, pok := val["db_password"]; pok && len(dbPass.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "db_password", - Value: values, + Value: dbPass, }) } - if plist, pok := val["database_names"]; pok && len(plist.(string)) > 0 { - values = plist - + if dbName, pok := val["database_names"]; pok && len(dbName.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "database_names", - Value: values, + Value: dbName, }) } - if plist, pok := val["auto_tune_staging_drive"]; pok && plist.(bool) { - values = plist - + if autoTune, pok := val["auto_tune_staging_drive"]; pok && autoTune.(bool) { args = append(args, &era.Actionarguments{ Name: "auto_tune_staging_drive", - Value: values, + Value: autoTune, }) } - if plist, pok := val["allocate_pg_hugepage"]; pok { - values = plist - + if allocatePG, pok := val["allocate_pg_hugepage"]; pok { args = append(args, &era.Actionarguments{ Name: "allocate_pg_hugepage", - Value: values, + Value: allocatePG, }) } - if plist, pok := val["auth_method"]; pok && len(plist.(string)) > 0 { - values = plist - + if authMethod, pok := val["auth_method"]; pok && len(authMethod.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "auth_method", - Value: values, + Value: authMethod, }) } - if plist, clok := val["cluster_database"]; clok { - values = plist - + if clsDB, clok := val["cluster_database"]; clok { args = append(args, &era.Actionarguments{ Name: "cluster_database", - Value: values, + Value: clsDB, }) } - if plist, clok := val["pre_create_script"]; clok && len(plist.(string)) > 0 { - values = plist - + if preScript, clok := val["pre_create_script"]; clok && len(preScript.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "pre_create_script", - Value: values, + Value: preScript, }) } - if plist, clok := val["post_create_script"]; clok && len(plist.(string)) > 0 { - values = plist - + if postScript, clok := val["post_create_script"]; clok && len(postScript.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "post_create_script", - Value: values, + Value: postScript, }) } + + if ha, ok := val["ha_instance"]; ok && len(ha.([]interface{})) > 0 { + haList := ha.([]interface{}) + + for _, v := range haList { + val := v.(map[string]interface{}) + + if haProxy, pok := val["proxy_read_port"]; pok && len(haProxy.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "proxy_read_port", + Value: haProxy, + }) + } + + if proxyWrite, pok := val["proxy_write_port"]; pok && len(proxyWrite.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "proxy_write_port", + Value: proxyWrite, + }) + } + + if backupPolicy, pok := val["backup_policy"]; pok && len(backupPolicy.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "backup_policy", + Value: backupPolicy, + }) + } + + if clsName, pok := val["cluster_name"]; pok && len(clsName.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "cluster_name", + Value: clsName, + }) + } + + if patroniClsName, pok := val["patroni_cluster_name"]; pok && len(patroniClsName.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "patroni_cluster_name", + Value: patroniClsName, + }) + } + + if nodeType, pok := val["node_type"]; pok && len(nodeType.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "node_type", + Value: nodeType, + }) + } + + if proVIP, pok := val["provision_virtual_ip"]; pok && proVIP.(bool) { + args = append(args, &era.Actionarguments{ + Name: "provision_virtual_ip", + Value: proVIP, + }) + } + + if deployHaproxy, pok := val["deploy_haproxy"]; pok && deployHaproxy.(bool) { + args = append(args, &era.Actionarguments{ + Name: "deploy_haproxy", + Value: deployHaproxy, + }) + } + + if enableSyncMode, pok := val["enable_synchronous_mode"]; pok && (enableSyncMode.(bool)) { + args = append(args, &era.Actionarguments{ + Name: "enable_synchronous_mode", + Value: enableSyncMode, + }) + } + + if failoverMode, pok := val["failover_mode"]; pok && len(failoverMode.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "failover_mode", + Value: failoverMode, + }) + } + + if walExp, pok := val["archive_wal_expire_days"]; pok { + args = append(args, &era.Actionarguments{ + Name: "archive_wal_expire_days", + Value: walExp, + }) + } + + if enablePeerAuth, pok := val["enable_peer_auth"]; pok && enablePeerAuth.(bool) { + args = append(args, &era.Actionarguments{ + Name: "enable_peer_auth", + Value: enablePeerAuth, + }) + } + } + } } } resp := buildActionArgumentsFromResourceData(d.Get("actionarguments").(*schema.Set), args)