From ec5d65cbdafb86515ec8cc67dc3cec6c591519fa Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Tue, 3 Jan 2023 15:21:20 +0530 Subject: [PATCH 01/18] Impr/m tags (#528) --- client/era/era_structs.go | 43 ++++++-- nutanix/common_era_schema.go | 10 +- nutanix/data_source_nutanix_ndb_database.go | 4 + nutanix/resource_nutanix_ndb_database.go | 113 +++++++++++++++++++- 4 files changed, 150 insertions(+), 20 deletions(-) diff --git a/client/era/era_structs.go b/client/era/era_structs.go index 27ebf8fc7..e94783697 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -148,6 +148,25 @@ type DatabaseTypeProperties struct { Defaultlogdriveautotune bool `json:"defaultLogDriveAutoTune"` } +type PrePostCommand struct { + PreCommand *string `json:"preCommand,omitempty"` + PostCommand *string `json:"postCommand,omitempty"` +} + +type Payload struct { + PrePostCommand *PrePostCommand `json:"prePostCommand,omitempty"` +} + +type Tasks struct { + TaskType *string `json:"taskType,omitempty"` + Payload *Payload `json:"payload,omitempty"` +} + +type MaintenanceTasks struct { + MaintenanceWindowID *string `json:"maintenanceWindowId,omitempty"` + Tasks []*Tasks `json:"tasks,omitempty"` +} + // ProvisionDatabaseRequestStructs type ProvisionDatabaseRequest struct { Createdbserver bool `json:"createDbserver,omitempty"` @@ -170,6 +189,8 @@ type ProvisionDatabaseRequest struct { Timemachineinfo *Timemachineinfo `json:"timeMachineInfo,omitempty"` Actionarguments []*Actionarguments `json:"actionArguments,omitempty"` Nodes []*Nodes `json:"nodes,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + MaintenanceTasks *MaintenanceTasks `json:"maintenanceTasks,omitempty"` } type Snapshottimeofday struct { @@ -233,11 +254,11 @@ type Schedule struct { } type Timemachineinfo struct { - Name string `json:"name"` - Description string `json:"description"` - Slaid string `json:"slaId"` - Schedule Schedule `json:"schedule"` - Tags []interface{} `json:"tags"` + Name string `json:"name"` + Description string `json:"description"` + Slaid string `json:"slaId"` + Schedule Schedule `json:"schedule"` + Tags []*Tags `json:"tags,omitempty"` Autotunelogdrive bool `json:"autoTuneLogDrive"` } @@ -511,12 +532,12 @@ type DeleteDatabaseResponse struct { // UpdateDatabase models type UpdateDatabaseRequest struct { - Name string `json:"name"` - Description string `json:"description"` - Tags []interface{} `json:"tags"` - Resetname bool `json:"resetName"` - Resetdescription bool `json:"resetDescription"` - Resettags bool `json:"resetTags"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Resetname bool `json:"resetName,omitempty"` + Resetdescription bool `json:"resetDescription,omitempty"` + Resettags bool `json:"resetTags,omitempty"` } type UpdateDatabaseResponse struct { diff --git a/nutanix/common_era_schema.go b/nutanix/common_era_schema.go index 949b5f3d5..bbe9ccc1e 100644 --- a/nutanix/common_era_schema.go +++ b/nutanix/common_era_schema.go @@ -171,13 +171,7 @@ func timeMachineInfoSchema() *schema.Schema { }, }, }, - "tags": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Description: "description of schedule of time machine", - Elem: &schema.Schema{Type: schema.TypeString}, - }, + "tags": dataSourceEraDBInstanceTags(), }, }, } @@ -301,7 +295,7 @@ func buildTimeMachineFromResourceData(set *schema.Set) *era.Timemachineinfo { Description: tMap["description"].(string), Slaid: tMap["slaid"].(string), Schedule: *buildTimeMachineSchedule(tMap["schedule"].(*schema.Set)), // NULL Pointer check - Tags: tMap["tags"].(*schema.Set).List(), + Tags: expandTags(tMap["tags"].([]interface{})), Autotunelogdrive: tMap["autotunelogdrive"].(bool), } } diff --git a/nutanix/data_source_nutanix_ndb_database.go b/nutanix/data_source_nutanix_ndb_database.go index f63cf6cfd..85ba95076 100644 --- a/nutanix/data_source_nutanix_ndb_database.go +++ b/nutanix/data_source_nutanix_ndb_database.go @@ -2108,11 +2108,13 @@ func dataSourceEraDBInstanceMetadata() *schema.Schema { func dataSourceEraDBInstanceTags() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, + Optional: true, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "tag_id": { Type: schema.TypeString, + Optional: true, Computed: true, }, "entity_id": { @@ -2125,10 +2127,12 @@ func dataSourceEraDBInstanceTags() *schema.Schema { }, "value": { Type: schema.TypeString, + Optional: true, Computed: true, }, "tag_name": { Type: schema.TypeString, + Optional: true, Computed: true, }, }, diff --git a/nutanix/resource_nutanix_ndb_database.go b/nutanix/resource_nutanix_ndb_database.go index 123e396ce..2d6eae4a6 100644 --- a/nutanix/resource_nutanix_ndb_database.go +++ b/nutanix/resource_nutanix_ndb_database.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) var ( @@ -219,6 +220,41 @@ func resourceDatabaseInstance() *schema.Resource { }, }, + "maintenance_tasks": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "maintenance_window_id": { + Type: schema.TypeString, + Optional: true, + }, + "tasks": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "task_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"OS_PATCHING", "DB_PATCHING"}, false), + }, + "pre_command": { + Type: schema.TypeString, + Optional: true, + }, + "post_command": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + // Computed values "owner_id": { @@ -398,6 +434,8 @@ func buildEraRequest(d *schema.ResourceData) (*era.ProvisionDatabaseRequest, err Nodes: buildNodesFromResourceData(d.Get("nodes").(*schema.Set)), Autotunestagingdrive: d.Get("autotunestagingdrive").(bool), VMPassword: utils.StringPtr(d.Get("vm_password").(string)), + Tags: expandTags(d.Get("tags").([]interface{})), + MaintenanceTasks: expandMaintenanceTasks(d.Get("maintenance_tasks").([]interface{})), }, nil } @@ -555,10 +593,15 @@ func updateDatabaseInstance(ctx context.Context, d *schema.ResourceData, m inter name := d.Get("name").(string) description := d.Get("description").(string) + tags := make([]*era.Tags, 0) + if d.HasChange("tags") { + tags = expandTags(d.Get("tags").([]interface{})) + } + updateReq := era.UpdateDatabaseRequest{ Name: name, Description: description, - Tags: []interface{}{}, + Tags: tags, Resetname: true, Resetdescription: true, Resettags: true, @@ -740,3 +783,71 @@ func eraRefresh(ctx context.Context, conn *era.Client, opID era.GetOperationRequ return opRes, "PENDING", nil } } + +func expandTags(pr []interface{}) []*era.Tags { + if len(pr) > 0 { + tags := make([]*era.Tags, 0) + + for _, v := range pr { + tag := &era.Tags{} + val := v.(map[string]interface{}) + + if tagName, ok := val["tag_name"]; ok { + tag.TagName = tagName.(string) + } + + if tagID, ok := val["tag_id"]; ok { + tag.TagID = tagID.(string) + } + + if tagVal, ok := val["value"]; ok { + tag.Value = tagVal.(string) + } + tags = append(tags, tag) + } + return tags + } + return nil +} + +func expandMaintenanceTasks(pr []interface{}) *era.MaintenanceTasks { + if len(pr) > 0 { + maintenanceTask := &era.MaintenanceTasks{} + val := pr[0].(map[string]interface{}) + + if windowID, ok := val["maintenance_window_id"]; ok { + maintenanceTask.MaintenanceWindowID = utils.StringPtr(windowID.(string)) + } + + if task, ok := val["tasks"]; ok { + taskList := make([]*era.Tasks, 0) + tasks := task.([]interface{}) + + for _, v := range tasks { + out := &era.Tasks{} + value := v.(map[string]interface{}) + + if taskType, ok := value["task_type"]; ok { + out.TaskType = utils.StringPtr(taskType.(string)) + } + + payload := &era.Payload{} + prepostCommand := &era.PrePostCommand{} + if preCommand, ok := value["pre_command"]; ok { + prepostCommand.PreCommand = utils.StringPtr(preCommand.(string)) + } + if postCommand, ok := value["post_command"]; ok { + prepostCommand.PostCommand = utils.StringPtr(postCommand.(string)) + } + + payload.PrePostCommand = prepostCommand + out.Payload = payload + + taskList = append(taskList, out) + } + maintenanceTask.Tasks = taskList + } + return maintenanceTask + } + return nil +} From 9b9d88f07d178363c89b68846c702196d613d03f Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Tue, 3 Jan 2023 15:50:37 +0530 Subject: [PATCH 02/18] filter based on database type (#513) --- nutanix/data_source_nutanix_ndb_database.go | 54 ++++++++++++++++--- nutanix/data_source_nutanix_ndb_databases.go | 37 +++++++++++-- .../data_source_nutanix_ndb_databases_test.go | 27 ++++++++++ 3 files changed, 106 insertions(+), 12 deletions(-) diff --git a/nutanix/data_source_nutanix_ndb_database.go b/nutanix/data_source_nutanix_ndb_database.go index 85ba95076..be5e84d4f 100644 --- a/nutanix/data_source_nutanix_ndb_database.go +++ b/nutanix/data_source_nutanix_ndb_database.go @@ -336,7 +336,7 @@ func flattenDBInstanceMetadata(pr *Era.DBInstanceMetadata) []map[string]interfac pmeta := make(map[string]interface{}) pmeta["secure_info"] = pr.Secureinfo pmeta["info"] = pr.Info - pmeta["deregister_info"] = pr.Deregisterinfo + pmeta["deregister_info"] = flattenDeRegiserInfo(pr.Deregisterinfo) pmeta["tm_activate_operation_id"] = pr.Tmactivateoperationid pmeta["created_dbservers"] = pr.Createddbservers pmeta["registered_dbservers"] = pr.Registereddbservers @@ -845,7 +845,7 @@ func flattenTimeMachineMetadata(pr *Era.TimeMachineMetadata) []map[string]interf tm["secure_info"] = pr.SecureInfo tm["info"] = pr.Info - tm["deregister_info"] = pr.DeregisterInfo + tm["deregister_info"] = flattenDeRegiserInfo(pr.DeregisterInfo) tm["capability_reset_time"] = pr.CapabilityResetTime tm["auto_heal"] = pr.AutoHeal tm["auto_heal_snapshot_count"] = pr.AutoHealSnapshotCount @@ -880,6 +880,20 @@ func flattenTimeMachineMetadata(pr *Era.TimeMachineMetadata) []map[string]interf return nil } +func flattenDeRegiserInfo(pr *Era.DeregisterInfo) []map[string]interface{} { + if pr != nil { + Deregis := make([]map[string]interface{}, 0) + regis := map[string]interface{}{} + + regis["message"] = pr.Message + regis["operations"] = utils.StringValueSlice(pr.Operations) + + Deregis = append(Deregis, regis) + return Deregis + } + return nil +} + func dataSourceEraDatabaseProperties() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, @@ -1569,10 +1583,22 @@ func dataSourceEraTimeMachine() *schema.Schema { }, }, "deregister_info": { - Type: schema.TypeMap, + Type: schema.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + }, + "operations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, }, }, "capability_reset_time": { @@ -2016,10 +2042,22 @@ func dataSourceEraDBInstanceMetadata() *schema.Schema { }, }, "deregister_info": { - Type: schema.TypeMap, + Type: schema.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + }, + "operations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, }, }, "tm_activate_operation_id": { diff --git a/nutanix/data_source_nutanix_ndb_databases.go b/nutanix/data_source_nutanix_ndb_databases.go index edba77b9c..108bfa17d 100644 --- a/nutanix/data_source_nutanix_ndb_databases.go +++ b/nutanix/data_source_nutanix_ndb_databases.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" era "github.com/terraform-providers/terraform-provider-nutanix/client/era" ) @@ -13,6 +14,12 @@ func dataSourceNutanixEraDatabases() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceNutanixEraDatabaseIntancesRead, Schema: map[string]*schema.Schema{ + "database_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"oracle_database", + "postgres_database", "sqlserver_database", "mariadb_database", + "mysql_database", "mssql_database", "saphana_database", "mongodb_database"}, false)}, "database_instances": { Type: schema.TypeList, Computed: true, @@ -162,10 +169,21 @@ func dataSourceNutanixEraDatabases() *schema.Resource { func dataSourceNutanixEraDatabaseIntancesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*Client).Era - - resp, err := conn.Service.ListDatabaseInstance(ctx) - if err != nil { - return diag.FromErr(err) + var resp *era.ListDatabaseInstance + var err error + if dbEng, ok := d.GetOk("database_type"); ok { + // todo : when era have query params for db egine type call , API here + // filter the database based on db engine type provided + respon, _ := conn.Service.ListDatabaseInstance(ctx) + resp, err = filterDatabaseBasedOnDatabaseEngine(respon, dbEng.(string)) + if err != nil { + return diag.FromErr(err) + } + } else { + resp, err = conn.Service.ListDatabaseInstance(ctx) + if err != nil { + return diag.FromErr(err) + } } if e := d.Set("database_instances", flattenDatabaseIntancesList(resp)); e != nil { @@ -229,3 +247,14 @@ func flattenDatabaseIntancesList(db *era.ListDatabaseInstance) []map[string]inte } return nil } + +func filterDatabaseBasedOnDatabaseEngine(resp *era.ListDatabaseInstance, dbengine string) (*era.ListDatabaseInstance, error) { + found := make(era.ListDatabaseInstance, 0) + + for _, v := range *resp { + if dbengine == v.Type { + found = append(found, v) + } + } + return &found, nil +} diff --git a/nutanix/data_source_nutanix_ndb_databases_test.go b/nutanix/data_source_nutanix_ndb_databases_test.go index a07b80ca5..911be9377 100644 --- a/nutanix/data_source_nutanix_ndb_databases_test.go +++ b/nutanix/data_source_nutanix_ndb_databases_test.go @@ -25,8 +25,35 @@ func TestAccEraDatabasesDataSource_basic(t *testing.T) { }) } +func TestAccEraDatabasesDataSource_ByFilters(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabasesDataSourceConfigByFilters(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.metadata.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.time_zone"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.id"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.linked_databases.#"), + ), + }, + }, + }) +} + func testAccEraDatabasesDataSourceConfig() string { return ` data "nutanix_ndb_databases" "test" {} ` } + +func testAccEraDatabasesDataSourceConfigByFilters() string { + return ` + data "nutanix_ndb_databases" "test" { + database_type = "postgres_database" + } +` +} From 882972947baf0dc914aed89da7b2f3ae808885d4 Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Tue, 10 Jan 2023 13:35:50 +0530 Subject: [PATCH 03/18] sla resource (#508) --- client/era/era_service.go | 35 +++ client/era/era_structs.go | 14 ++ examples/ndb/sla/main.tf | 57 +++++ examples/ndb/sla/terraform.tfvars | 3 + examples/ndb/sla/variables.tf | 9 + nutanix/provider.go | 1 + nutanix/resource_nutanix_ndb_sla.go | 276 +++++++++++++++++++++++ nutanix/resource_nutanix_ndb_sla_test.go | 98 ++++++++ 8 files changed, 493 insertions(+) create mode 100644 examples/ndb/sla/main.tf create mode 100644 examples/ndb/sla/terraform.tfvars create mode 100644 examples/ndb/sla/variables.tf create mode 100644 nutanix/resource_nutanix_ndb_sla.go create mode 100644 nutanix/resource_nutanix_ndb_sla_test.go diff --git a/client/era/era_service.go b/client/era/era_service.go index c21b51cfb..019dbd53d 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -24,6 +24,9 @@ type Service interface { ListClusters(ctx context.Context) (*ClusterListResponse, error) GetSLA(ctx context.Context, id string, name string) (*ListSLAResponse, error) ListSLA(ctx context.Context) (*SLAResponse, error) + CreateSLA(ctx context.Context, req *SLAIntentInput) (*ListSLAResponse, error) + UpdateSLA(ctx context.Context, req *SLAIntentInput, id string) (*ListSLAResponse, error) + DeleteSLA(ctx context.Context, uuid string) (*SLADeleteResponse, error) } type ServiceClient struct { @@ -267,3 +270,35 @@ func (sc ServiceClient) ListDatabaseInstance(ctx context.Context) (*ListDatabase return res, sc.c.Do(ctx, httpReq, res) } + +func (sc ServiceClient) CreateSLA(ctx context.Context, req *SLAIntentInput) (*ListSLAResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/slas", req) + res := new(ListSLAResponse) + + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteSLA(ctx context.Context, uuid string) (*SLADeleteResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/slas/%s", uuid), nil) + if err != nil { + return nil, err + } + res := new(SLADeleteResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateSLA(ctx context.Context, req *SLAIntentInput, id string) (*ListSLAResponse, error) { + path := fmt.Sprintf("/slas/%s", id) + httpReq, err := sc.c.NewRequest(ctx, http.MethodPut, path, req) + if err != nil { + return nil, err + } + res := new(ListSLAResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} diff --git a/client/era/era_structs.go b/client/era/era_structs.go index e94783697..af9024815 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -958,3 +958,17 @@ type TimeMachineMetadata struct { LastSuccessfulLogCatchupPostHealWithResetCapability interface{} `json:"lastSuccessfulLogCatchupPostHealWithResetCapability,omitempty"` AutoSnapshotRetryInfo interface{} `json:"autoSnapshotRetryInfo,omitempty"` } + +type SLAIntentInput struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + ContinuousRetention *int `json:"continuousRetention,omitempty"` + DailyRetention *int `json:"dailyRetention,omitempty"` + WeeklyRetention *int `json:"weeklyRetention,omitempty"` + MonthlyRetention *int `json:"monthlyRetention,omitempty"` + QuarterlyRetention *int `json:"quarterlyRetention,omitempty"` +} + +type SLADeleteResponse struct { + Status *string `json:"status,omitempty"` +} diff --git a/examples/ndb/sla/main.tf b/examples/ndb/sla/main.tf new file mode 100644 index 000000000..9222903d9 --- /dev/null +++ b/examples/ndb/sla/main.tf @@ -0,0 +1,57 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## resource to create sla + +resource "nutanix_ndb_sla" "sla" { + // name is required + name= "test-sla" + // desc is optional + description = "here goes description" + // Rentention args are optional with default values + continuous_retention = 30 + daily_retention = 3 + weekly_retention = 2 + monthly_retention= 1 + quarterly_retention=1 +} + +## data source sla with sla_name +data "nutanix_ndb_sla" "sla"{ + sla_name = "{{ SLA_NAME }}" +} + +output "salO1" { + value = data.nutanix_ndb_sla.sal +} + +## data source sla with sla_id +data "nutanix_ndb_sla" "sla"{ + sla_id = "{{ SLA_ID }}" +} + +output "salO1" { + value = data.nutanix_ndb_sla.sla +} + +## List SLAs + +data "nutanix_ndb_slas" "sla"{} + +output "salO" { + value = data.nutanix_ndb_slas.sla +} \ No newline at end of file diff --git a/examples/ndb/sla/terraform.tfvars b/examples/ndb/sla/terraform.tfvars new file mode 100644 index 000000000..3bf972a77 --- /dev/null +++ b/examples/ndb/sla/terraform.tfvars @@ -0,0 +1,3 @@ +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/sla/variables.tf b/examples/ndb/sla/variables.tf new file mode 100644 index 000000000..32cdf2d45 --- /dev/null +++ b/examples/ndb/sla/variables.tf @@ -0,0 +1,9 @@ +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} \ No newline at end of file diff --git a/nutanix/provider.go b/nutanix/provider.go index a0aa1e0d5..b49dc71c6 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -225,6 +225,7 @@ func Provider() *schema.Provider { "nutanix_static_routes": resourceNutanixStaticRoute(), "nutanix_user_groups": resourceNutanixUserGroups(), "nutanix_ndb_database": resourceDatabaseInstance(), + "nutanix_ndb_sla": resourceNutanixNDBSla(), }, ConfigureContextFunc: providerConfigure, } diff --git a/nutanix/resource_nutanix_ndb_sla.go b/nutanix/resource_nutanix_ndb_sla.go new file mode 100644 index 000000000..826ac5736 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_sla.go @@ -0,0 +1,276 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBSla() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBSlaCreate, + ReadContext: resourceNutanixNDBSlaRead, + UpdateContext: resourceNutanixNDBSlaUpdate, + DeleteContext: resourceNutanixNDBSlaDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "continuous_retention": { + Type: schema.TypeInt, + Optional: true, + Default: "30", + }, + "daily_retention": { + Type: schema.TypeInt, + Optional: true, + Default: "7", + }, + "weekly_retention": { + Type: schema.TypeInt, + Optional: true, + Default: "2", + }, + "monthly_retention": { + Type: schema.TypeInt, + Optional: true, + Default: "2", + }, + "quarterly_retention": { + Type: schema.TypeInt, + Optional: true, + Default: "1", + }, + "yearly_retention": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + // computed + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "system_sla": { + Type: schema.TypeBool, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "pitr_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "current_active_frequency": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceNutanixNDBSlaCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.SLAIntentInput{} + + if name, ok1 := d.GetOk("name"); ok1 { + req.Name = utils.StringPtr(name.(string)) + } + + if desc, ok1 := d.GetOk("description"); ok1 { + req.Description = utils.StringPtr(desc.(string)) + } + + if conRen, ok1 := d.GetOk("continuous_retention"); ok1 { + req.ContinuousRetention = utils.IntPtr(conRen.(int)) + } + + if dailyRen, ok1 := d.GetOk("daily_retention"); ok1 { + req.DailyRetention = utils.IntPtr(dailyRen.(int)) + } + if weeklyRen, ok1 := d.GetOk("weekly_retention"); ok1 { + req.WeeklyRetention = utils.IntPtr(weeklyRen.(int)) + } + + if monthRen, ok1 := d.GetOk("monthly_retention"); ok1 { + req.MonthlyRetention = utils.IntPtr(monthRen.(int)) + } + if quartRen, ok1 := d.GetOk("quarterly_retention"); ok1 { + req.QuarterlyRetention = utils.IntPtr(quartRen.(int)) + } + + resp, err := conn.Service.CreateSLA(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(*resp.ID) + return resourceNutanixNDBSlaRead(ctx, d, meta) +} + +func resourceNutanixNDBSlaRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + // get the sla + + resp, err := conn.Service.GetSLA(ctx, d.Id(), "") + if err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", resp.Name); err != nil { + return diag.Errorf("error setting name for sla %s: %s", d.Id(), err) + } + + if err = d.Set("description", resp.Description); err != nil { + return diag.Errorf("error setting description for sla %s: %s", d.Id(), err) + } + + if err = d.Set("continuous_retention", resp.Continuousretention); err != nil { + return diag.Errorf("error setting continuous_retention for sla %s: %s", d.Id(), err) + } + + if err = d.Set("daily_retention", resp.Dailyretention); err != nil { + return diag.Errorf("error setting daily_retention for sla %s: %s", d.Id(), err) + } + + if err = d.Set("weekly_retention", resp.Weeklyretention); err != nil { + return diag.Errorf("error setting weekly_retention for sla %s: %s", d.Id(), err) + } + + if err = d.Set("monthly_retention", resp.Monthlyretention); err != nil { + return diag.Errorf("error setting monthly_retention for sla %s: %s", d.Id(), err) + } + + if err = d.Set("quarterly_retention", resp.Quarterlyretention); err != nil { + return diag.Errorf("error setting quarterly_retention for sla %s: %s", d.Id(), err) + } + + if err = d.Set("unique_name", resp.Uniquename); err != nil { + return diag.Errorf("error setting unique_name for sla %s: %s", d.Id(), err) + } + + if err = d.Set("owner_id", resp.Ownerid); err != nil { + return diag.Errorf("error setting owner_id for sla %s: %s", d.Id(), err) + } + + if err = d.Set("system_sla", resp.Systemsla); err != nil { + return diag.Errorf("error setting system_sla for sla %s: %s", d.Id(), err) + } + + if err = d.Set("date_created", resp.Datecreated); err != nil { + return diag.Errorf("error setting date_created for sla %s: %s", d.Id(), err) + } + + if err = d.Set("date_modified", resp.Datemodified); err != nil { + return diag.Errorf("error setting date_modified for sla %s: %s", d.Id(), err) + } + + if err = d.Set("yearly_retention", resp.Yearlyretention); err != nil { + return diag.Errorf("error setting yearly_retention for sla %s: %s", d.Id(), err) + } + + if err = d.Set("reference_count", resp.Referencecount); err != nil { + return diag.Errorf("error setting reference_count for sla %s: %s", d.Id(), err) + } + + if err = d.Set("pitr_enabled", resp.PitrEnabled); err != nil { + return diag.Errorf("error setting pitr_enabled for sla %s: %s", d.Id(), err) + } + + if err = d.Set("current_active_frequency", resp.CurrentActiveFrequency); err != nil { + return diag.Errorf("error setting current_active_frequency for sla %s: %s", d.Id(), err) + } + return nil +} + +func resourceNutanixNDBSlaUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + req := &era.SLAIntentInput{} + // get the current sla + + resp, er := conn.Service.GetSLA(ctx, d.Id(), "") + if er != nil { + return diag.FromErr(er) + } + + if resp != nil { + req.ContinuousRetention = &resp.Continuousretention + req.DailyRetention = &resp.Dailyretention + req.Description = resp.Description + req.MonthlyRetention = &resp.Monthlyretention + req.Name = resp.Name + req.QuarterlyRetention = &resp.Quarterlyretention + req.WeeklyRetention = &resp.Weeklyretention + } + + if d.HasChange("name") { + req.Name = utils.StringPtr(d.Get("name").(string)) + } + + if d.HasChange("description") { + req.Description = utils.StringPtr(d.Get("description").(string)) + } + + if d.HasChange("continuous_retention") { + req.ContinuousRetention = utils.IntPtr(d.Get("continuous_retention").(int)) + } + + if d.HasChange("daily_retention") { + req.DailyRetention = utils.IntPtr(d.Get("daily_retention").(int)) + } + if d.HasChange("weekly_retention") { + req.WeeklyRetention = utils.IntPtr(d.Get("weekly_retention").(int)) + } + + if d.HasChange("monthly_retention") { + req.MonthlyRetention = utils.IntPtr(d.Get("monthly_retention").(int)) + } + if d.HasChange("quarterly_retention") { + req.QuarterlyRetention = utils.IntPtr(d.Get("quarterly_retention").(int)) + } + + _, err := conn.Service.UpdateSLA(ctx, req, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + return resourceNutanixNDBSlaRead(ctx, d, meta) +} + +func resourceNutanixNDBSlaDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.DeleteSLA(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp.Status == utils.StringPtr("success") { + d.SetId("") + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_sla_test.go b/nutanix/resource_nutanix_ndb_sla_test.go new file mode 100644 index 000000000..ebca5a867 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_sla_test.go @@ -0,0 +1,98 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameSLA = "nutanix_ndb_sla.acctest-managed" + +func TestAccEra_Slabasic(t *testing.T) { + name := "test-sla-tf" + desc := "this is sla desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSLAConfig(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameSLA, "name", name), + resource.TestCheckResourceAttr(resourceNameSLA, "description", desc), + resource.TestCheckResourceAttr(resourceNameSLA, "continuous_retention", "30"), + resource.TestCheckResourceAttr(resourceNameSLA, "daily_retention", "3"), + resource.TestCheckResourceAttr(resourceNameSLA, "weekly_retention", "2"), + resource.TestCheckResourceAttr(resourceNameSLA, "monthly_retention", "1"), + resource.TestCheckResourceAttr(resourceNameSLA, "quarterly_retention", "1"), + ), + }, + }, + }) +} + +func TestAccEra_SlaUpdate(t *testing.T) { + name := "test-sla-tf" + desc := "this is sla desc" + updatedName := "test-sla-updated" + updatedDesc := "desc is updated" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSLAConfig(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameSLA, "name", name), + resource.TestCheckResourceAttr(resourceNameSLA, "description", desc), + resource.TestCheckResourceAttr(resourceNameSLA, "continuous_retention", "30"), + resource.TestCheckResourceAttr(resourceNameSLA, "daily_retention", "3"), + resource.TestCheckResourceAttr(resourceNameSLA, "weekly_retention", "2"), + resource.TestCheckResourceAttr(resourceNameSLA, "monthly_retention", "1"), + resource.TestCheckResourceAttr(resourceNameSLA, "quarterly_retention", "1"), + ), + }, + { + Config: testAccEraSLAConfigUpdated(updatedName, updatedDesc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameSLA, "name", updatedName), + resource.TestCheckResourceAttr(resourceNameSLA, "description", updatedDesc), + resource.TestCheckResourceAttr(resourceNameSLA, "continuous_retention", "25"), + resource.TestCheckResourceAttr(resourceNameSLA, "daily_retention", "1"), + resource.TestCheckResourceAttr(resourceNameSLA, "weekly_retention", "3"), + resource.TestCheckResourceAttr(resourceNameSLA, "monthly_retention", "1"), + resource.TestCheckResourceAttr(resourceNameSLA, "quarterly_retention", "3"), + ), + }, + }, + }) +} + +func testAccEraSLAConfig(name, desc string) string { + return fmt.Sprintf(` + resource "nutanix_ndb_sla" "acctest-managed" { + name= "%[1]s" + description = "%[2]s" + continuous_retention = 30 + daily_retention = 3 + weekly_retention = 2 + monthly_retention= 1 + quarterly_retention=1 + } + `, name, desc) +} + +func testAccEraSLAConfigUpdated(name, desc string) string { + return fmt.Sprintf(` + resource "nutanix_ndb_sla" "acctest-managed" { + name= "%[1]s" + description = "%[2]s" + continuous_retention = 25 + daily_retention = 1 + weekly_retention = 3 + monthly_retention= 1 + quarterly_retention=3 + } + `, name, desc) +} From 1e3c112ce8b6b3022d89b649c60005bff5e18a4b Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Tue, 10 Jan 2023 17:50:42 +0530 Subject: [PATCH 04/18] Feat/m ndb database restore (#515) --- client/era/era_service.go | 21 ++ client/era/era_structs.go | 13 + examples/ndb/database_restore/main.tf | 31 ++ .../ndb/database_restore/terraform.tfvars | 4 + examples/ndb/database_restore/variables.tf | 10 + examples/ndb/log_catchups/main.tf | 23 ++ examples/ndb/log_catchups/terraform.tfvars | 4 + examples/ndb/log_catchups/variables.tf | 10 + nutanix/common_era_schema.go | 18 ++ nutanix/provider.go | 2 + nutanix/resource_nutanix_ndb_database.go | 7 +- .../resource_nutanix_ndb_database_restore.go | 270 ++++++++++++++++++ nutanix/resource_nutanix_ndb_log_catchups.go | 137 +++++++++ 13 files changed, 549 insertions(+), 1 deletion(-) create mode 100644 examples/ndb/database_restore/main.tf create mode 100644 examples/ndb/database_restore/terraform.tfvars create mode 100644 examples/ndb/database_restore/variables.tf create mode 100644 examples/ndb/log_catchups/main.tf create mode 100644 examples/ndb/log_catchups/terraform.tfvars create mode 100644 examples/ndb/log_catchups/variables.tf create mode 100644 nutanix/resource_nutanix_ndb_database_restore.go create mode 100644 nutanix/resource_nutanix_ndb_log_catchups.go diff --git a/client/era/era_service.go b/client/era/era_service.go index 019dbd53d..2d79cbd40 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -27,6 +27,8 @@ type Service interface { CreateSLA(ctx context.Context, req *SLAIntentInput) (*ListSLAResponse, error) UpdateSLA(ctx context.Context, req *SLAIntentInput, id string) (*ListSLAResponse, error) DeleteSLA(ctx context.Context, uuid string) (*SLADeleteResponse, error) + DatabaseRestore(ctx context.Context, databaseID string, req *DatabaseRestoreRequest) (*ProvisionDatabaseResponse, error) + LogCatchUp(ctx context.Context, id string, req *LogCatchUpRequest) (*ProvisionDatabaseResponse, error) } type ServiceClient struct { @@ -299,6 +301,25 @@ func (sc ServiceClient) UpdateSLA(ctx context.Context, req *SLAIntentInput, id s return nil, err } res := new(ListSLAResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DatabaseRestore(ctx context.Context, databaseID string, req *DatabaseRestoreRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/databases/%s/restore", databaseID), req) + if err != nil { + return nil, err + } + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) LogCatchUp(ctx context.Context, tmsID string, req *LogCatchUpRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/log-catchups", tmsID), req) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) return res, sc.c.Do(ctx, httpReq, res) } diff --git a/client/era/era_structs.go b/client/era/era_structs.go index af9024815..5fe5ccc3b 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -972,3 +972,16 @@ type SLAIntentInput struct { type SLADeleteResponse struct { Status *string `json:"status,omitempty"` } + +type DatabaseRestoreRequest struct { + SnapshotID *string `json:"snapshotId,omitempty"` + LatestSnapshot *string `json:"latestSnapshot,omitempty"` + UserPitrTimestamp *string `json:"userPitrTimestamp,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + ActionArguments []*Actionarguments `json:"actionArguments,omitempty"` +} + +type LogCatchUpRequest struct { + ForRestore bool `json:"for_restore,omitempty"` + Actionarguments []*Actionarguments `json:"actionArguments,omitempty"` +} diff --git a/examples/ndb/database_restore/main.tf b/examples/ndb/database_restore/main.tf new file mode 100644 index 000000000..31cffdc52 --- /dev/null +++ b/examples/ndb/database_restore/main.tf @@ -0,0 +1,31 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## database_restore with Point in Time + +resource "nutanix_ndb_database_restore" "name" { + database_id= "{{ database_id }}" + user_pitr_timestamp = "2022-12-28 00:54:30" + time_zone_pitr = "Asia/Calcutta" +} + +## database_restore with snapshot uuid + +resource "nutanix_ndb_database_restore" "name" { + database_id= "{{ database_id }}" + snapshot_id= "{{ snapshot id }}" +} \ No newline at end of file diff --git a/examples/ndb/database_restore/terraform.tfvars b/examples/ndb/database_restore/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/database_restore/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/database_restore/variables.tf b/examples/ndb/database_restore/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/database_restore/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/examples/ndb/log_catchups/main.tf b/examples/ndb/log_catchups/main.tf new file mode 100644 index 000000000..0a4765731 --- /dev/null +++ b/examples/ndb/log_catchups/main.tf @@ -0,0 +1,23 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + + +## resource to perform log catchup + +resource "nutanix_ndb_log_catchups" "name" { + time_machine_id = "{{ timeMachineID }}" +} \ No newline at end of file diff --git a/examples/ndb/log_catchups/terraform.tfvars b/examples/ndb/log_catchups/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/log_catchups/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/log_catchups/variables.tf b/examples/ndb/log_catchups/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/log_catchups/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/nutanix/common_era_schema.go b/nutanix/common_era_schema.go index bbe9ccc1e..538fe0786 100644 --- a/nutanix/common_era_schema.go +++ b/nutanix/common_era_schema.go @@ -1,12 +1,30 @@ package nutanix import ( + "context" "strconv" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" era "github.com/terraform-providers/terraform-provider-nutanix/client/era" ) +type dbID string + +const dbIDKey dbID = "" + +// this method is used to pass the key-value pair to different modules using context to avoid duplicate code. + +// NewContext returns a new Context that carries a provided key value +func NewContext(ctx context.Context, dbID dbID) context.Context { + return context.WithValue(ctx, dbIDKey, dbID) +} + +// FromContext extracts a value from a Context +func FromContext(ctx context.Context) (string, bool) { + databaseID, ok := ctx.Value(dbIDKey).(dbID) + return string(databaseID), ok +} + func timeMachineInfoSchema() *schema.Schema { return &schema.Schema{ Type: schema.TypeSet, diff --git a/nutanix/provider.go b/nutanix/provider.go index b49dc71c6..632159abd 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -226,6 +226,8 @@ func Provider() *schema.Provider { "nutanix_user_groups": resourceNutanixUserGroups(), "nutanix_ndb_database": resourceDatabaseInstance(), "nutanix_ndb_sla": resourceNutanixNDBSla(), + "nutanix_ndb_database_restore": resourceNutanixNDBDatabaseRestore(), + "nutanix_ndb_log_catchups": resourceNutanixNDBLogCatchUps(), }, ConfigureContextFunc: providerConfigure, } diff --git a/nutanix/resource_nutanix_ndb_database.go b/nutanix/resource_nutanix_ndb_database.go index 2d6eae4a6..a05eae41f 100644 --- a/nutanix/resource_nutanix_ndb_database.go +++ b/nutanix/resource_nutanix_ndb_database.go @@ -445,7 +445,12 @@ func readDatabaseInstance(ctx context.Context, d *schema.ResourceData, m interfa return diag.Errorf("era is nil") } - databaseInstanceID := d.Id() + databaseInstanceID := "" + if databaseInsID, ok := FromContext(ctx); ok { + databaseInstanceID = databaseInsID + } else { + databaseInstanceID = d.Id() + } resp, err := c.Service.GetDatabaseInstance(ctx, databaseInstanceID) if err != nil { diff --git a/nutanix/resource_nutanix_ndb_database_restore.go b/nutanix/resource_nutanix_ndb_database_restore.go new file mode 100644 index 000000000..29d9156a2 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_database_restore.go @@ -0,0 +1,270 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBDatabaseRestore() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBDatabaseRestoreCreate, + ReadContext: resourceNutanixNDBDatabaseRestoreRead, + UpdateContext: resourceNutanixNDBDatabaseRestoreUpdate, + DeleteContext: resourceNutanixNDBDatabaseRestoreDelete, + Schema: map[string]*schema.Schema{ + "database_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"user_pitr_timestamp"}, + }, + "latest_snapshot": { + Type: schema.TypeString, + Optional: true, + }, + "user_pitr_timestamp": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"snapshot_id"}, + RequiredWith: []string{"time_zone_pitr"}, + }, + "time_zone_pitr": { + Type: schema.TypeString, + Optional: true, + }, + "restore_version": { + Type: schema.TypeInt, + Optional: true, + }, + // computed Values + + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + + "value": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + }, + }, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "tags": dataSourceEraDBInstanceTags(), + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + }, + } +} + +func resourceNutanixNDBDatabaseRestoreCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + req := &era.DatabaseRestoreRequest{} + + databaseID := "" + if dbID, ok := d.GetOk("database_id"); ok { + databaseID = dbID.(string) + } + + if snapID, ok := d.GetOk("snapshot_id"); ok { + req.SnapshotID = utils.StringPtr(snapID.(string)) + } + + if latestsnap, ok := d.GetOk("latest_snapshot"); ok { + req.LatestSnapshot = utils.StringPtr(latestsnap.(string)) + } + + if uptime, ok := d.GetOk("user_pitr_timestamp"); ok { + req.UserPitrTimestamp = utils.StringPtr(uptime.(string)) + } + + if timezone, ok := d.GetOk("time_zone_pitr"); ok { + req.TimeZone = utils.StringPtr(timezone.(string)) + } + + // getting action arguments + + actargs := []*era.Actionarguments{} + + actargs = append(actargs, &era.Actionarguments{ + Name: "sameLocation", + Value: "true", + }) + + req.ActionArguments = actargs + + // call the database restore API + + resp, er := conn.Service.DatabaseRestore(ctx, databaseID, req) + if er != nil { + return diag.FromErr(er) + } + + // Get Operation ID from response of database restore and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting to perform db restore (%s) to create: %s", resp.Entityid, errWaitTask) + } + + d.SetId(resp.Operationid) + return resourceNutanixNDBDatabaseRestoreRead(ctx, d, meta) +} + +func resourceNutanixNDBDatabaseRestoreRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + databaseID := d.Get("database_id").(string) + ctx = NewContext(ctx, dbID(databaseID)) + return readDatabaseInstance(ctx, d, meta) +} + +func resourceNutanixNDBDatabaseRestoreUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return resourceNutanixNDBDatabaseRestoreCreate(ctx, d, meta) +} + +func resourceNutanixNDBDatabaseRestoreDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} diff --git a/nutanix/resource_nutanix_ndb_log_catchups.go b/nutanix/resource_nutanix_ndb_log_catchups.go new file mode 100644 index 000000000..3008ea1ba --- /dev/null +++ b/nutanix/resource_nutanix_ndb_log_catchups.go @@ -0,0 +1,137 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func resourceNutanixNDBLogCatchUps() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBLogCatchUpsCreate, + ReadContext: resourceNutanixNDBLogCatchUpsRead, + UpdateContext: resourceNutanixNDBLogCatchUpsUpdate, + DeleteContext: resourceNutanixNDBLogCatchUpsDelete, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"database_id"}, + ForceNew: true, + }, + "database_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + ForceNew: true, + }, + "for_restore": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "log_catchup_version": { + Type: schema.TypeInt, + Optional: true, + }, + }, + } +} + +func resourceNutanixNDBLogCatchUpsCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + req := &era.LogCatchUpRequest{} + + tmsID := "" + + tm, tmOk := d.GetOk("time_machine_id") + + dbID, dbOk := d.GetOk("database_id") + + if !tmOk && !dbOk { + return diag.Errorf("please provide the required `time_machine_id` or `database_id` attribute") + } + + if tmOk { + tmsID = tm.(string) + } + + if dbOk { + // get the time machine id by getting database details + + dbResp, er := conn.Service.GetDatabaseInstance(ctx, dbID.(string)) + if er != nil { + return diag.FromErr(er) + } + + tmsID = dbResp.Timemachineid + } + + // call log-catchup API + + actargs := []*era.Actionarguments{} + + if restore, rok := d.GetOkExists("for_restore"); rok && restore.(bool) { + forRestore := restore.(bool) + + req.ForRestore = forRestore + + actargs = append(actargs, &era.Actionarguments{ + Name: "preRestoreLogCatchup", + Value: forRestore, + }) + } + + actargs = append(actargs, &era.Actionarguments{ + Name: "switch_log", + Value: "true", + }) + + req.Actionarguments = actargs + resp, err := conn.Service.LogCatchUp(ctx, tmsID, req) + if err != nil { + return diag.FromErr(err) + } + + // Get Operation ID from response of log-catchups and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting to perform log-catchups (%s) to create: %s", resp.Entityid, errWaitTask) + } + d.SetId(resp.Operationid) + return nil +} + +func resourceNutanixNDBLogCatchUpsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func resourceNutanixNDBLogCatchUpsUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return resourceNutanixNDBLogCatchUpsCreate(ctx, d, meta) +} + +func resourceNutanixNDBLogCatchUpsDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} From 7081b2fc6091ebf65e912aca47a6d21680170eb6 Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Wed, 11 Jan 2023 12:44:29 +0530 Subject: [PATCH 05/18] Feat/m era ha (#518) --- client/era/era_service.go | 2 +- client/era/era_structs.go | 120 +++++---- examples/ndb/database_instance/main.tf | 170 ++++++++++++ nutanix/common_era_schema.go | 241 +++++++++++++++--- nutanix/data_source_nutanix_ndb_database.go | 33 ++- nutanix/resource_nutanix_nbd_database_test.go | 203 ++++++++++++++- nutanix/resource_nutanix_ndb_database.go | 219 +++++++++++++--- 7 files changed, 860 insertions(+), 128 deletions(-) diff --git a/client/era/era_service.go b/client/era/era_service.go index 2d79cbd40..f71356ff9 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -254,7 +254,7 @@ func (sc ServiceClient) GetOperation(req GetOperationRequest) (*GetOperationResp } func (sc ServiceClient) GetDatabaseInstance(ctx context.Context, dbInstanceID string) (*GetDatabaseResponse, error) { - httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/databases/%s?detailed=true&load-dbserver-cluster=true", dbInstanceID), nil) + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/databases/%s?detailed=false&load-dbserver-cluster=false", dbInstanceID), nil) if err != nil { return nil, err } diff --git a/client/era/era_structs.go b/client/era/era_structs.go index 5fe5ccc3b..3c3a845f6 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -232,35 +232,44 @@ type Dailyschedule struct { } type Schedule struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - UniqueName string `json:"uniqueName"` - OwnerID string `json:"ownerId"` - SystemPolicy bool `json:"systemPolicy"` - GlobalPolicy bool `json:"globalPolicy"` - Datecreated string `json:"dateCreated"` - Datemodified string `json:"dateModified"` - Snapshottimeofday *Snapshottimeofday `json:"snapshotTimeOfDay"` - Continuousschedule *Continuousschedule `json:"continuousSchedule"` - Weeklyschedule *Weeklyschedule `json:"weeklySchedule"` - Dailyschedule *Dailyschedule `json:"dailySchedule"` - Monthlyschedule *Monthlyschedule `json:"monthlySchedule"` - Quartelyschedule *Quartelyschedule `json:"quartelySchedule"` - Yearlyschedule *Yearlyschedule `json:"yearlySchedule"` - ReferenceCount int `json:"referenceCount"` - StartTime string `json:"startTime"` - TimeZone string `json:"timeZone"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + UniqueName *string `json:"uniqueName,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + StartTime *string `json:"startTime,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + Datecreated *string `json:"dateCreated,omitempty"` + Datemodified *string `json:"dateModified,omitempty"` + ReferenceCount *int `json:"referenceCount,omitempty"` + SystemPolicy bool `json:"systemPolicy,omitempty"` + GlobalPolicy bool `json:"globalPolicy,omitempty"` + Snapshottimeofday *Snapshottimeofday `json:"snapshotTimeOfDay,omitempty"` + Continuousschedule *Continuousschedule `json:"continuousSchedule,omitempty"` + Weeklyschedule *Weeklyschedule `json:"weeklySchedule,omitempty"` + Dailyschedule *Dailyschedule `json:"dailySchedule,omitempty"` + Monthlyschedule *Monthlyschedule `json:"monthlySchedule,omitempty"` + Quartelyschedule *Quartelyschedule `json:"quartelySchedule,omitempty"` + Yearlyschedule *Yearlyschedule `json:"yearlySchedule,omitempty"` +} + +type PrimarySLA struct { + SLAID *string `json:"slaId,omitempty"` + NxClusterIds []*string `json:"nxClusterIds,omitempty"` +} + +type SLADetails struct { + PrimarySLA *PrimarySLA `json:"primarySla,omitempty"` } type Timemachineinfo struct { - Name string `json:"name"` - Description string `json:"description"` - Slaid string `json:"slaId"` - Schedule Schedule `json:"schedule"` - Tags []*Tags `json:"tags,omitempty"` - - Autotunelogdrive bool `json:"autoTuneLogDrive"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Slaid string `json:"slaId,omitempty"` + Schedule Schedule `json:"schedule,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Autotunelogdrive bool `json:"autoTuneLogDrive,omitempty"` + SLADetails *SLADetails `json:"slaDetails,omitempty"` } type Actionarguments struct { @@ -268,11 +277,24 @@ type Actionarguments struct { Value interface{} `json:"value"` } +type NodesProperties struct { + Name string `json:"name"` + Value interface{} `json:"value"` +} + +type IPInfos struct { + IPType *string `json:"ipType,omitempty"` + IPAddresses []*string `json:"ipAddresses,omitempty"` +} + type Nodes struct { - Properties []interface{} `json:"properties"` - Vmname string `json:"vmName,omitempty"` - Networkprofileid string `json:"networkProfileId,omitempty"` - DatabaseServerID string `json:"dbserverId,omitempty"` + Properties []*NodesProperties `json:"properties"` + Vmname *string `json:"vmName,omitempty"` + Networkprofileid *string `json:"networkProfileId,omitempty"` + DatabaseServerID *string `json:"dbserverId,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + ComputeProfileID *string `json:"computeProfileId,omitempty"` + IPInfos []*IPInfos `json:"ipInfos,omitempty"` } // ProvisionDatabaseResponse structs @@ -831,25 +853,25 @@ type Protectiondomain struct { AssocEntities []string `json:"assocEntities,omitempty"` } type Databasenodes struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Ownerid string `json:"ownerId"` - Datecreated string `json:"dateCreated"` - Datemodified string `json:"dateModified"` - AccessLevel interface{} `json:"accessLevel,omitempty"` - Properties []interface{} `json:"properties"` - Tags []*Tags `json:"tags"` - Databaseid string `json:"databaseId"` - Status string `json:"status"` - Databasestatus string `json:"databaseStatus"` - Primary bool `json:"primary"` - Dbserverid string `json:"dbserverId"` - Softwareinstallationid string `json:"softwareInstallationId"` - Protectiondomainid string `json:"protectionDomainId"` - Info Info `json:"info"` - Metadata interface{} `json:"metadata"` - Protectiondomain *Protectiondomain `json:"protectionDomain"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Ownerid string `json:"ownerId"` + Datecreated string `json:"dateCreated"` + Datemodified string `json:"dateModified"` + AccessLevel interface{} `json:"accessLevel,omitempty"` + Properties []*DBInstanceProperties `json:"properties"` + Tags []*Tags `json:"tags"` + Databaseid string `json:"databaseId"` + Status string `json:"status"` + Databasestatus string `json:"databaseStatus"` + Primary bool `json:"primary"` + Dbserverid string `json:"dbserverId"` + Softwareinstallationid string `json:"softwareInstallationId"` + Protectiondomainid string `json:"protectionDomainId"` + Info Info `json:"info"` + Metadata interface{} `json:"metadata"` + Protectiondomain *Protectiondomain `json:"protectionDomain"` // Valideastate bool `json:"validEaState"` } diff --git a/examples/ndb/database_instance/main.tf b/examples/ndb/database_instance/main.tf index ec09f1da0..826a99923 100644 --- a/examples/ndb/database_instance/main.tf +++ b/examples/ndb/database_instance/main.tf @@ -98,3 +98,173 @@ resource "nutanix_ndb_database" "dbp" { } } } + + +## provision HA instance + +resource "nutanix_ndb_database" "dbp" { + // database type + databasetype = "postgres_database" + + // database name & descriptio + name = "test-pg-inst-HA-tf" + description = "adding description" + + // adding the profiles details + softwareprofileid = "{{ software_profile_id }}" + softwareprofileversionid = "{{ software_profile_version_id }}" + computeprofileid = "{{ compute_profile_id }}" + networkprofileid = "{{ network_profile_id }}" + dbparameterprofileid = "{{ db_parameter_profile_id }}" + + // required for HA instance + createdbserver = true + clustered = true + + // node count (with haproxy server node) + nodecount= 4 + + // min required details for provisioning HA instance + postgresql_info{ + listener_port = "5432" + + database_size= "200" + + db_password = "{{ database password}}" + + database_names= "testdb1" + + ha_instance{ + proxy_read_port= "5001" + + proxy_write_port = "5000" + + cluster_name= "{{ cluster_name }}" + + patroni_cluster_name = " {{ patroni_cluster_name }}" + } + } + + nxclusterid= "1c42ca25-32f4-42d9-a2bd-6a21f925b725" + sshpublickey= "{{ ssh_public_key }}" + + // nodes are required. + + // HA proxy node + nodes{ + properties{ + name = "node_type" + value = "haproxy" + } + vmname = "{{ vm name }}" + nx_cluster_id = "{{ nx_cluster_id }}" + } + + // Primary node for read/write ops + nodes{ + properties{ + name= "role" + value= "Primary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + + vmname = "{{ name of vm }}" + networkprofileid="{{ network_profile_id }}" + computeprofileid= "{{ compute_profile_id }}" + nx_cluster_id= "{{ nx_cluster_id }}" + } + + // secondary nodes for read ops + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + vmname = "{{ name of vm }}" + networkprofileid="{{ network_profile_id }}" + computeprofileid= "{{ compute_profile_id }}" + nx_cluster_id= "{{ nx_cluster_id }}" + } + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + + vmname = "{{ name of vm }}" + networkprofileid="{{ network_profile_id }}" + computeprofileid= "{{ compute_profile_id }}" + nx_cluster_id= "{{ nx_cluster_id }}" + } + + // time machine required + timemachineinfo { + name= "test-pg-inst-HA" + description="" + sla_details{ + primary_sla{ + sla_id= "{{ required SLA}}0" + nx_cluster_ids= [ + "{{ nx_cluster_id}}" + ] + } + } + // schedule fields are optional + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + + vm_password= "{{ vm_password}}" + autotunestagingdrive= true +} \ No newline at end of file diff --git a/nutanix/common_era_schema.go b/nutanix/common_era_schema.go index 538fe0786..5e7b1ca60 100644 --- a/nutanix/common_era_schema.go +++ b/nutanix/common_era_schema.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" ) type dbID string @@ -46,9 +47,37 @@ func timeMachineInfoSchema() *schema.Schema { }, "slaid": { Type: schema.TypeString, - Required: true, + Optional: true, Description: "description of SLA ID.", }, + "sla_details": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "primary_sla": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sla_id": { + Type: schema.TypeString, + Required: true, + Description: "description of SLA ID.", + }, + "nx_cluster_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, "autotunelogdrive": { Type: schema.TypeBool, Optional: true, @@ -308,14 +337,40 @@ func buildTimeMachineSchedule(set *schema.Set) *era.Schedule { func buildTimeMachineFromResourceData(set *schema.Set) *era.Timemachineinfo { d := set.List() tMap := d[0].(map[string]interface{}) - return &era.Timemachineinfo{ - Name: tMap["name"].(string), - Description: tMap["description"].(string), - Slaid: tMap["slaid"].(string), - Schedule: *buildTimeMachineSchedule(tMap["schedule"].(*schema.Set)), // NULL Pointer check - Tags: expandTags(tMap["tags"].([]interface{})), - Autotunelogdrive: tMap["autotunelogdrive"].(bool), + + out := &era.Timemachineinfo{} + + if tMap != nil { + if name, ok := tMap["name"]; ok && len(name.(string)) > 0 { + out.Name = name.(string) + } + + if des, ok := tMap["description"]; ok && len(des.(string)) > 0 { + out.Description = des.(string) + } + + if slaid, ok := tMap["slaid"]; ok && len(slaid.(string)) > 0 { + out.Slaid = slaid.(string) + } + + if schedule, ok := tMap["schedule"]; ok && len(schedule.(*schema.Set).List()) > 0 { + out.Schedule = *buildTimeMachineSchedule(schedule.(*schema.Set)) + } + + if tags, ok := tMap["tags"]; ok && len(tags.([]interface{})) > 0 { + out.Tags = expandTags(tags.([]interface{})) + } + + if autotunelogdrive, ok := tMap["autotunelogdrive"]; ok && autotunelogdrive.(bool) { + out.Autotunelogdrive = autotunelogdrive.(bool) + } + + if slaDetails, ok := tMap["sla_details"]; ok && len(slaDetails.([]interface{})) > 0 { + out.SLADetails = buildSLADetails(slaDetails.([]interface{})) + } + return out } + return nil } func nodesSchema() *schema.Schema { @@ -328,9 +383,8 @@ func nodesSchema() *schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "properties": { - Type: schema.TypeSet, - Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, + Type: schema.TypeSet, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -345,21 +399,44 @@ func nodesSchema() *schema.Schema { }, }, "vmname": { - Type: schema.TypeString, - Required: true, - ConfigMode: schema.SchemaConfigModeAttr, + Type: schema.TypeString, + Required: true, }, "networkprofileid": { - Type: schema.TypeString, - Required: true, - ConfigMode: schema.SchemaConfigModeAttr, + Type: schema.TypeString, + Optional: true, + }, + "ip_infos": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_type": { + Type: schema.TypeString, + Optional: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "computeprofileid": { + Type: schema.TypeString, + Optional: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Optional: true, }, "dbserverid": { // When createDbServer is false, we can use this field to set the target db server. - Type: schema.TypeString, - Description: "", - Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, - Default: "", + Type: schema.TypeString, + Optional: true, + Default: "", }, }, }, @@ -368,17 +445,36 @@ func nodesSchema() *schema.Schema { func buildNodesFromResourceData(d *schema.Set) []*era.Nodes { argSet := d.List() - args := []*era.Nodes{} + nodes := []*era.Nodes{} for _, arg := range argSet { - args = append(args, &era.Nodes{ - Properties: arg.(map[string]interface{})["properties"].(*schema.Set).List(), - Vmname: arg.(map[string]interface{})["vmname"].(string), - Networkprofileid: arg.(map[string]interface{})["networkprofileid"].(string), - DatabaseServerID: arg.(map[string]interface{})["dbserverid"].(string), - }) + val := arg.(map[string]interface{}) + node := &era.Nodes{} + + if prop, ok := val["properties"]; ok { + node.Properties = expandNodesProperties(prop.(*schema.Set)) + } + if vmName, ok := val["vmname"]; ok && len(vmName.(string)) > 0 { + node.Vmname = utils.StringPtr(vmName.(string)) + } + if networkProfile, ok := val["networkprofileid"]; ok && len(networkProfile.(string)) > 0 { + node.Networkprofileid = utils.StringPtr(networkProfile.(string)) + } + if dbServer, ok := val["dbserverid"]; ok && len(dbServer.(string)) > 0 { + node.DatabaseServerID = utils.StringPtr(dbServer.(string)) + } + if nxCls, ok := val["nx_cluster_id"]; ok && len(nxCls.(string)) > 0 { + node.NxClusterID = utils.StringPtr(nxCls.(string)) + } + if computeProfile, ok := val["computeprofileid"]; ok && len(computeProfile.(string)) > 0 { + node.ComputeProfileID = utils.StringPtr(computeProfile.(string)) + } + if infos, ok := val["ip_infos"]; ok && len(infos.([]interface{})) > 0 { + node.IPInfos = expandIPInfos(infos.([]interface{})) + } + nodes = append(nodes, node) } - return args + return nodes } func actionArgumentsSchema() *schema.Schema { @@ -429,3 +525,88 @@ func buildActionArgumentsFromResourceData(d *schema.Set, args []*era.Actionargum } return args } + +func buildSLADetails(pr []interface{}) *era.SLADetails { + if len(pr) > 0 { + res := &era.SLADetails{} + + for _, v := range pr { + val := v.(map[string]interface{}) + + if priSLA, pok := val["primary_sla"]; pok { + res.PrimarySLA = expandPrimarySLA(priSLA.([]interface{})) + } + } + return res + } + return nil +} + +func expandPrimarySLA(pr []interface{}) *era.PrimarySLA { + if len(pr) > 0 { + out := &era.PrimarySLA{} + + for _, v := range pr { + val := v.(map[string]interface{}) + + if slaid, ok := val["sla_id"]; ok { + out.SLAID = utils.StringPtr(slaid.(string)) + } + + if nxcls, ok := val["nx_cluster_ids"]; ok { + res := make([]*string, 0) + nxclster := nxcls.([]interface{}) + + for _, v := range nxclster { + res = append(res, utils.StringPtr(v.(string))) + } + out.NxClusterIds = res + } + } + return out + } + return nil +} + +func expandNodesProperties(pr *schema.Set) []*era.NodesProperties { + argSet := pr.List() + + out := make([]*era.NodesProperties, 0) + for _, arg := range argSet { + var val interface{} + val = arg.(map[string]interface{})["value"] + b, ok := tryToConvertBool(arg.(map[string]interface{})["value"]) + if ok { + val = b + } + + out = append(out, &era.NodesProperties{ + Name: arg.(map[string]interface{})["name"].(string), + Value: val, + }) + } + return out +} + +func expandIPInfos(pr []interface{}) []*era.IPInfos { + if len(pr) > 0 { + IPInfos := make([]*era.IPInfos, 0) + + for _, v := range pr { + val := v.(map[string]interface{}) + IPInfo := &era.IPInfos{} + + if ipType, ok := val["ip_type"]; ok { + IPInfo.IPType = utils.StringPtr(ipType.(string)) + } + + if addr, ok := val["ip_addresses"]; ok { + IPInfo.IPAddresses = utils.StringSlice(addr.([]string)) + } + + IPInfos = append(IPInfos, IPInfo) + } + return IPInfos + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_database.go b/nutanix/data_source_nutanix_ndb_database.go index be5e84d4f..398eb2c4d 100644 --- a/nutanix/data_source_nutanix_ndb_database.go +++ b/nutanix/data_source_nutanix_ndb_database.go @@ -287,8 +287,10 @@ func dataSourceNutanixEraDatabaseRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { - return diag.FromErr(err) + if resp.Dbserverlogicalcluster != nil { + if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { + return diag.FromErr(err) + } } if err := d.Set("database_nodes", flattenDBNodes(resp.Databasenodes)); err != nil { @@ -381,7 +383,7 @@ func flattenDBNodes(pr []Era.Databasenodes) []map[string]interface{} { db["name"] = v.Name db["owner_id"] = v.Ownerid db["primary"] = v.Primary - db["properties"] = v.Properties + db["properties"] = flattenDBInstanceProperties(v.Properties) db["protection_domain"] = flattenDBProtectionDomain(v.Protectiondomain) db["protection_domain_id"] = v.Protectiondomainid db["software_installation_id"] = v.Softwareinstallationid @@ -1760,8 +1762,29 @@ func dataSourceEraDatabaseNodes() *schema.Schema { "properties": { Type: schema.TypeList, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "ref_id": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, }, }, "tags": dataSourceEraDBInstanceTags(), diff --git a/nutanix/resource_nutanix_nbd_database_test.go b/nutanix/resource_nutanix_nbd_database_test.go index 93fb86cd7..36d74a5ed 100644 --- a/nutanix/resource_nutanix_nbd_database_test.go +++ b/nutanix/resource_nutanix_nbd_database_test.go @@ -23,6 +23,32 @@ func TestAccEra_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceNameDB, "name", name), resource.TestCheckResourceAttr(resourceNameDB, "description", desc), + resource.TestCheckResourceAttr(resourceNameDB, "databasetype", "postgres_database"), + resource.TestCheckResourceAttr(resourceNameDB, "database_nodes.#", "1"), + resource.TestCheckResourceAttrSet(resourceNameDB, "time_machine_id"), + ), + }, + }, + }) +} + +func TestAccEraDatabaseProvisionHA(t *testing.T) { + name := "test-pg-inst-HA-tf" + desc := "this is desc" + sshKey := testVars.SSHKey + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseHAConfig(name, desc, sshKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameDB, "name", name), + resource.TestCheckResourceAttr(resourceNameDB, "description", desc), + resource.TestCheckResourceAttr(resourceNameDB, "databasetype", "postgres_database"), + resource.TestCheckResourceAttr(resourceNameDB, "database_nodes.#", "3"), + resource.TestCheckResourceAttr(resourceNameDB, "linked_databases.#", "4"), + resource.TestCheckResourceAttrSet(resourceNameDB, "time_machine_id"), ), }, }, @@ -86,7 +112,7 @@ func testAccEraDatabaseConfig(name, desc, vmName, sshKey string) string { networkprofileid= local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id } timemachineinfo { - name= "test-pg-inst" + name= "test-pg-inst-12" description="" slaid=local.slas["DEFAULT_OOB_BRONZE_SLA"].id schedule { @@ -123,3 +149,178 @@ func testAccEraDatabaseConfig(name, desc, vmName, sshKey string) string { } `, name, desc, vmName, sshKey) } + +func testAccEraDatabaseHAConfig(name, desc, sshKey string) string { + return fmt.Sprintf(` + data "nutanix_ndb_profiles" "p"{ + } + data "nutanix_ndb_slas" "slas"{} + data "nutanix_ndb_clusters" "clusters"{} + + locals { + profiles_by_type = { + for p in data.nutanix_ndb_profiles.p.profiles : p.type => p... + } + storage_profiles = { + for p in local.profiles_by_type.Storage: p.name => p + } + compute_profiles = { + for p in local.profiles_by_type.Compute: p.name => p + } + network_profiles = { + for p in local.profiles_by_type.Network: p.name => p + } + database_parameter_profiles = { + for p in local.profiles_by_type.Database_Parameter: p.name => p + } + software_profiles = { + for p in local.profiles_by_type.Software: p.name => p + } + slas = { + for p in data.nutanix_ndb_slas.slas.slas: p.name => p + } + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + + resource "nutanix_ndb_database" "acctest-managed" { + databasetype = "postgres_database" + name = "%[1]s" + description = "%[2]s" + softwareprofileid = local.software_profiles["POSTGRES_10.4_OOB"].id + softwareprofileversionid = local.software_profiles["POSTGRES_10.4_OOB"].latest_version_id + computeprofileid = local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + networkprofileid = local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + dbparameterprofileid = local.database_parameter_profiles.DEFAULT_POSTGRES_PARAMS.id + + createdbserver = true + nodecount= 4 + clustered = true + + postgresql_info{ + listener_port = "5432" + database_size= "200" + db_password = "password" + database_names= "testdb1" + ha_instance{ + proxy_read_port= "5001" + + proxy_write_port = "5000" + + cluster_name= "ha-cls" + + patroni_cluster_name = "ha-patroni-cluster" + } + } + nxclusterid= local.clusters.EraCluster.id + sshpublickey= "%[3]s" + nodes{ + properties{ + name = "node_type" + value = "haproxy" + } + vmname = "ha-cls_haproxy1" + nx_cluster_id = local.clusters.EraCluster.id + } + nodes{ + properties{ + name= "role" + value= "Primary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + vmname = "ha-cls-1" + networkprofileid=local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + computeprofileid= local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + nx_cluster_id= local.clusters.EraCluster.id + } + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + vmname = "ha-cls-2" + networkprofileid=local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + computeprofileid= local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + nx_cluster_id= local.clusters.EraCluster.id + } + + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + vmname = "ha-cls-3" + networkprofileid=local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + computeprofileid= local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + nx_cluster_id= local.clusters.EraCluster.id + } + timemachineinfo { + name= "test-pg-inst" + description="" + + sla_details{ + primary_sla{ + sla_id= local.slas["DEFAULT_OOB_BRONZE_SLA"].id + nx_cluster_ids= [ + local.clusters.EraCluster.id + ] + } + } + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + } + `, name, desc, sshKey) +} diff --git a/nutanix/resource_nutanix_ndb_database.go b/nutanix/resource_nutanix_ndb_database.go index a05eae41f..ad26da89e 100644 --- a/nutanix/resource_nutanix_ndb_database.go +++ b/nutanix/resource_nutanix_ndb_database.go @@ -17,7 +17,7 @@ import ( var ( eraDelay = 1 * time.Minute - EraProvisionTimeout = 35 * time.Minute + EraProvisionTimeout = 75 * time.Minute ) func resourceDatabaseInstance() *schema.Resource { @@ -216,6 +216,70 @@ func resourceDatabaseInstance() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "ha_instance": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + }, + "patroni_cluster_name": { + Type: schema.TypeString, + Required: true, + }, + "proxy_read_port": { + Type: schema.TypeString, + Required: true, + }, + "proxy_write_port": { + Type: schema.TypeString, + Required: true, + }, + "provision_virtual_ip": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "deploy_haproxy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "enable_synchronous_mode": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "failover_mode": { + Type: schema.TypeString, + Optional: true, + }, + "node_type": { + Type: schema.TypeString, + Optional: true, + Default: "database", + }, + "archive_wal_expire_days": { + Type: schema.TypeInt, + Optional: true, + Default: -1, + }, + "backup_policy": { + Type: schema.TypeString, + Optional: true, + Default: "primary_only", + }, + "enable_peer_auth": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, }, }, }, @@ -679,92 +743,163 @@ func deleteDatabaseInstance(ctx context.Context, d *schema.ResourceData, m inter func expandActionArguments(d *schema.ResourceData) []*era.Actionarguments { args := []*era.Actionarguments{} - if post, ok := d.GetOk("postgresql_info"); ok { + if post, ok := d.GetOk("postgresql_info"); ok && (len(post.([]interface{}))) > 0 { brr := post.([]interface{}) for _, arg := range brr { val := arg.(map[string]interface{}) - var values interface{} if plist, pok := val["listener_port"]; pok && len(plist.(string)) > 0 { - values = plist - args = append(args, &era.Actionarguments{ Name: "listener_port", - Value: values, + Value: plist, }) } - if plist, pok := val["database_size"]; pok && len(plist.(string)) > 0 { - values = plist - + if dbSize, pok := val["database_size"]; pok && len(dbSize.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "database_size", - Value: values, + Value: dbSize, }) } - if plist, pok := val["db_password"]; pok && len(plist.(string)) > 0 { - values = plist - + if dbPass, pok := val["db_password"]; pok && len(dbPass.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "db_password", - Value: values, + Value: dbPass, }) } - if plist, pok := val["database_names"]; pok && len(plist.(string)) > 0 { - values = plist - + if dbName, pok := val["database_names"]; pok && len(dbName.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "database_names", - Value: values, + Value: dbName, }) } - if plist, pok := val["auto_tune_staging_drive"]; pok && plist.(bool) { - values = plist - + if autoTune, pok := val["auto_tune_staging_drive"]; pok && autoTune.(bool) { args = append(args, &era.Actionarguments{ Name: "auto_tune_staging_drive", - Value: values, + Value: autoTune, }) } - if plist, pok := val["allocate_pg_hugepage"]; pok { - values = plist - + if allocatePG, pok := val["allocate_pg_hugepage"]; pok { args = append(args, &era.Actionarguments{ Name: "allocate_pg_hugepage", - Value: values, + Value: allocatePG, }) } - if plist, pok := val["auth_method"]; pok && len(plist.(string)) > 0 { - values = plist - + if authMethod, pok := val["auth_method"]; pok && len(authMethod.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "auth_method", - Value: values, + Value: authMethod, }) } - if plist, clok := val["cluster_database"]; clok { - values = plist - + if clsDB, clok := val["cluster_database"]; clok { args = append(args, &era.Actionarguments{ Name: "cluster_database", - Value: values, + Value: clsDB, }) } - if plist, clok := val["pre_create_script"]; clok && len(plist.(string)) > 0 { - values = plist - + if preScript, clok := val["pre_create_script"]; clok && len(preScript.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "pre_create_script", - Value: values, + Value: preScript, }) } - if plist, clok := val["post_create_script"]; clok && len(plist.(string)) > 0 { - values = plist - + if postScript, clok := val["post_create_script"]; clok && len(postScript.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "post_create_script", - Value: values, + Value: postScript, }) } + + if ha, ok := val["ha_instance"]; ok && len(ha.([]interface{})) > 0 { + haList := ha.([]interface{}) + + for _, v := range haList { + val := v.(map[string]interface{}) + + if haProxy, pok := val["proxy_read_port"]; pok && len(haProxy.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "proxy_read_port", + Value: haProxy, + }) + } + + if proxyWrite, pok := val["proxy_write_port"]; pok && len(proxyWrite.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "proxy_write_port", + Value: proxyWrite, + }) + } + + if backupPolicy, pok := val["backup_policy"]; pok && len(backupPolicy.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "backup_policy", + Value: backupPolicy, + }) + } + + if clsName, pok := val["cluster_name"]; pok && len(clsName.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "cluster_name", + Value: clsName, + }) + } + + if patroniClsName, pok := val["patroni_cluster_name"]; pok && len(patroniClsName.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "patroni_cluster_name", + Value: patroniClsName, + }) + } + + if nodeType, pok := val["node_type"]; pok && len(nodeType.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "node_type", + Value: nodeType, + }) + } + + if proVIP, pok := val["provision_virtual_ip"]; pok && proVIP.(bool) { + args = append(args, &era.Actionarguments{ + Name: "provision_virtual_ip", + Value: proVIP, + }) + } + + if deployHaproxy, pok := val["deploy_haproxy"]; pok && deployHaproxy.(bool) { + args = append(args, &era.Actionarguments{ + Name: "deploy_haproxy", + Value: deployHaproxy, + }) + } + + if enableSyncMode, pok := val["enable_synchronous_mode"]; pok && (enableSyncMode.(bool)) { + args = append(args, &era.Actionarguments{ + Name: "enable_synchronous_mode", + Value: enableSyncMode, + }) + } + + if failoverMode, pok := val["failover_mode"]; pok && len(failoverMode.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "failover_mode", + Value: failoverMode, + }) + } + + if walExp, pok := val["archive_wal_expire_days"]; pok { + args = append(args, &era.Actionarguments{ + Name: "archive_wal_expire_days", + Value: walExp, + }) + } + + if enablePeerAuth, pok := val["enable_peer_auth"]; pok && enablePeerAuth.(bool) { + args = append(args, &era.Actionarguments{ + Name: "enable_peer_auth", + Value: enablePeerAuth, + }) + } + } + } } } resp := buildActionArgumentsFromResourceData(d.Get("actionarguments").(*schema.Set), args) From f03e18b746d720824e615a44ab553d2810c9e2bb Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Fri, 13 Jan 2023 12:28:13 +0530 Subject: [PATCH 06/18] added bad Request (#530) --- client/client.go | 10 +++++----- client/client_test.go | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/client/client.go b/client/client.go index 088377798..2decd025a 100644 --- a/client/client.go +++ b/client/client.go @@ -550,7 +550,11 @@ func CheckResponse(r *http.Response) error { // Nutanix returns non-json response with code 401 when // invalid credentials are used if c == http.StatusUnauthorized { - return fmt.Errorf("invalid Nutanix Credentials") + return fmt.Errorf("invalid auth Credentials") + } + + if c == http.StatusBadRequest { + return fmt.Errorf("bad Request") } buf, err := ioutil.ReadAll(r.Body) @@ -574,7 +578,6 @@ func CheckResponse(r *http.Response) error { if err != nil { return fmt.Errorf("unmarshalling error response %s for response body %s", err, string(buf)) } - log.Print("[DEBUG] after json.Unmarshal") errRes := &ErrorResponse{} if status, ok := res["status"]; ok { @@ -590,11 +593,9 @@ func CheckResponse(r *http.Response) error { return nil } - log.Print("[DEBUG] after bunch of switch cases") if err != nil { return err } - log.Print("[DEBUG] first nil check") // karbon error check if messageInfo, ok := res["message_info"]; ok { @@ -610,7 +611,6 @@ func CheckResponse(r *http.Response) error { return nil } - log.Print("[DEBUG] after errRes.State") pretty, _ := json.MarshalIndent(errRes, "", " ") return fmt.Errorf("error: %s", string(pretty)) } diff --git a/client/client_test.go b/client/client_test.go index dc9ff73c7..6c4d45fd8 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -283,7 +283,7 @@ func TestGetResponse(t *testing.T) { StatusCode: http.StatusBadRequest, Body: ioutil.NopCloser(strings.NewReader( `{"api_version": "3.1", "code": 400, "kind": "error", "message_list": - [{"message": "This field may not be blank."}], "state": "ERROR"}`)), + [{"message": "bad Request"}], "state": "ERROR"}`)), } err := CheckResponse(res) @@ -292,8 +292,8 @@ func TestGetResponse(t *testing.T) { t.Fatal("Expected error response.") } - if !strings.Contains(fmt.Sprint(err), "This field may not be blank.") { - t.Errorf("error = %#v, expected %#v", err, "This field may not be blank.") + if !strings.Contains(fmt.Sprint(err), "bad Request") { + t.Errorf("error = %#v, expected %#v", err, "bad Request") } } @@ -303,7 +303,7 @@ func TestCheckResponse(t *testing.T) { StatusCode: http.StatusBadRequest, Body: ioutil.NopCloser(strings.NewReader( `{"api_version": "3.1", "code": 400, "kind": "error", "message_list": - [{"message": "This field may not be blank."}], "state": "ERROR"}`)), + [{"message": "bad Request"}], "state": "ERROR"}`)), } err := CheckResponse(res) @@ -311,8 +311,8 @@ func TestCheckResponse(t *testing.T) { t.Fatalf("Expected error response.") } - if !strings.Contains(fmt.Sprint(err), "This field may not be blank.") { - t.Errorf("error = %#v, expected %#v", err, "This field may not be blank.") + if !strings.Contains(fmt.Sprint(err), "bad Request") { + t.Errorf("error = %#v, expected %#v", err, "bad Request") } } From b6790d11d1fc4604500eb56387cc359b8357287e Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Fri, 13 Jan 2023 12:50:20 +0530 Subject: [PATCH 07/18] Feat/m era profiles (#529) --- client/era/era_service.go | 99 +- client/era/era_structs.go | 63 +- examples/ndb/profiles/main.tf | 99 ++ examples/ndb/profiles/terraform.tfvars | 4 + examples/ndb/profiles/variables.tf | 10 + nutanix/data_source_nutanix_ndb_profile.go | 8 +- nutanix/data_source_nutanix_ndb_profiles.go | 2 +- nutanix/provider.go | 2 + nutanix/resource_nutanix_ndb_profiles.go | 1135 +++++++++++++++++ nutanix/resource_nutanix_ndb_profiles_test.go | 243 ++++ ...ce_nutanix_ndb_software_version_profile.go | 411 ++++++ 11 files changed, 2060 insertions(+), 16 deletions(-) create mode 100644 examples/ndb/profiles/main.tf create mode 100644 examples/ndb/profiles/terraform.tfvars create mode 100644 examples/ndb/profiles/variables.tf create mode 100644 nutanix/resource_nutanix_ndb_profiles.go create mode 100644 nutanix/resource_nutanix_ndb_profiles_test.go create mode 100644 nutanix/resource_nutanix_ndb_software_version_profile.go diff --git a/client/era/era_service.go b/client/era/era_service.go index f71356ff9..f4f6e6d3b 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -19,7 +19,9 @@ type Service interface { UpdateDatabase(ctx context.Context, req *UpdateDatabaseRequest, uuid string) (*UpdateDatabaseResponse, error) DeleteDatabase(ctx context.Context, req *DeleteDatabaseRequest, uuid string) (*DeleteDatabaseResponse, error) ListProfiles(ctx context.Context, engine string, profileType string) (*ProfileListResponse, error) - GetProfiles(ctx context.Context, engine string, profileType string, id string, name string) (*ListProfileResponse, error) + GetProfile(ctx context.Context, filters *ProfileFilter) (*ListProfileResponse, error) + CreateProfiles(ctx context.Context, req *ProfileRequest) (*ListProfileResponse, error) + DeleteProfile(ctx context.Context, uuid string) (*string, error) GetCluster(ctx context.Context, id string, name string) (*ListClusterResponse, error) ListClusters(ctx context.Context) (*ClusterListResponse, error) GetSLA(ctx context.Context, id string, name string) (*ListSLAResponse, error) @@ -29,6 +31,12 @@ type Service interface { DeleteSLA(ctx context.Context, uuid string) (*SLADeleteResponse, error) DatabaseRestore(ctx context.Context, databaseID string, req *DatabaseRestoreRequest) (*ProvisionDatabaseResponse, error) LogCatchUp(ctx context.Context, id string, req *LogCatchUpRequest) (*ProvisionDatabaseResponse, error) + CreateSoftwareProfiles(ctx context.Context, req *ProfileRequest) (*SoftwareProfileResponse, error) + UpdateProfile(ctx context.Context, req *UpdateProfileRequest, id string) (*ListProfileResponse, error) + GetSoftwareProfileVersion(ctx context.Context, profileID string, profileVersionID string) (*Versions, error) + CreateSoftwareProfileVersion(ctx context.Context, id string, req *ProfileRequest) (*SoftwareProfileResponse, error) + UpdateProfileVersion(ctx context.Context, req *ProfileRequest, id string, vid string) (*ListProfileResponse, error) + DeleteProfileVersion(ctx context.Context, profileID string, profileVersionID string) (*string, error) } type ServiceClient struct { @@ -50,10 +58,11 @@ func (sc ServiceClient) ListProfiles(ctx context.Context, engine string, profile return res, sc.c.Do(ctx, httpReq, res) } -func (sc ServiceClient) GetProfiles(ctx context.Context, engine string, profileType string, id string, name string) (*ListProfileResponse, error) { +func (sc ServiceClient) GetProfile(ctx context.Context, filter *ProfileFilter) (*ListProfileResponse, error) { var httpReq *http.Request var err error - path := makePathProfiles(engine, profileType, id, name) + + path := makePathProfiles(filter.Engine, filter.ProfileType, filter.ProfileID, filter.ProfileName) httpReq, err = sc.c.NewRequest(ctx, http.MethodGet, path, nil) @@ -264,7 +273,7 @@ func (sc ServiceClient) GetDatabaseInstance(ctx context.Context, dbInstanceID st } func (sc ServiceClient) ListDatabaseInstance(ctx context.Context) (*ListDatabaseInstance, error) { - httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, ("/databases?detailed=false&load-dbserver-cluster=false&order-by-dbserver-cluster=false"), nil) + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, ("/databases?detailed=true&load-dbserver-cluster=true&order-by-dbserver-cluster=false"), nil) if err != nil { return nil, err } @@ -276,6 +285,15 @@ func (sc ServiceClient) ListDatabaseInstance(ctx context.Context) (*ListDatabase func (sc ServiceClient) CreateSLA(ctx context.Context, req *SLAIntentInput) (*ListSLAResponse, error) { httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/slas", req) res := new(ListSLAResponse) + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} +func (sc ServiceClient) CreateProfiles(ctx context.Context, req *ProfileRequest) (*ListProfileResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/profiles", req) + res := new(ListProfileResponse) if err != nil { return nil, err @@ -290,6 +308,14 @@ func (sc ServiceClient) DeleteSLA(ctx context.Context, uuid string) (*SLADeleteR return nil, err } res := new(SLADeleteResponse) + return res, sc.c.Do(ctx, httpReq, res) +} +func (sc ServiceClient) DeleteProfile(ctx context.Context, uuid string) (*string, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/profiles/%s", uuid), nil) + if err != nil { + return nil, err + } + res := new(string) return res, sc.c.Do(ctx, httpReq, res) } @@ -303,6 +329,16 @@ func (sc ServiceClient) UpdateSLA(ctx context.Context, req *SLAIntentInput, id s res := new(ListSLAResponse) return res, sc.c.Do(ctx, httpReq, res) } +func (sc ServiceClient) UpdateProfile(ctx context.Context, req *UpdateProfileRequest, id string) (*ListProfileResponse, error) { + path := fmt.Sprintf("/profiles/%s", id) + httpReq, err := sc.c.NewRequest(ctx, http.MethodPut, path, req) + if err != nil { + return nil, err + } + res := new(ListProfileResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} func (sc ServiceClient) DatabaseRestore(ctx context.Context, databaseID string, req *DatabaseRestoreRequest) (*ProvisionDatabaseResponse, error) { httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/databases/%s/restore", databaseID), req) @@ -319,7 +355,60 @@ func (sc ServiceClient) LogCatchUp(ctx context.Context, tmsID string, req *LogCa if err != nil { return nil, err } - res := new(ProvisionDatabaseResponse) return res, sc.c.Do(ctx, httpReq, res) } + +func (sc ServiceClient) UpdateProfileVersion(ctx context.Context, req *ProfileRequest, id string, vid string) (*ListProfileResponse, error) { + path := fmt.Sprintf("/profiles/%s/versions/%s", id, vid) + httpReq, err := sc.c.NewRequest(ctx, http.MethodPut, path, req) + if err != nil { + return nil, err + } + res := new(ListProfileResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateSoftwareProfiles(ctx context.Context, req *ProfileRequest) (*SoftwareProfileResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/profiles", req) + res := new(SoftwareProfileResponse) + + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetSoftwareProfileVersion(ctx context.Context, profileID string, profileVersionID string) (*Versions, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/profiles/%s/versions/%s", profileID, profileVersionID), nil) + res := new(Versions) + + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateSoftwareProfileVersion(ctx context.Context, id string, req *ProfileRequest) (*SoftwareProfileResponse, error) { + path := fmt.Sprintf("/profiles/%s/versions", id) + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, path, req) + if err != nil { + return nil, err + } + res := new(SoftwareProfileResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteProfileVersion(ctx context.Context, profileID string, profileVersionID string) (*string, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/profiles/%s/versions/%s", profileID, profileVersionID), nil) + if err != nil { + return nil, err + } + res := new(string) + + return res, sc.c.Do(ctx, httpReq, res) +} diff --git a/client/era/era_structs.go b/client/era/era_structs.go index 3c3a845f6..e7c65be51 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -48,14 +48,14 @@ type Properties struct { } type VersionClusterAssociation struct { - NxClusterID *string `json:"nxClusterId,omitempty"` - DateCreated *string `json:"dateCreated,omitempty"` - DateModified *string `json:"dateModified,omitempty"` - OwnerID *string `json:"ownerId,omitempty"` - Status *string `json:"status,omitempty"` - ProfileVersionID *string `json:"profileVersionId,omitempty"` - Properties []*Properties `json:"properties,omitempty"` - OptimizedForProvisioning bool `json:"optimizedForProvisioning,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + Status *string `json:"status,omitempty"` + ProfileVersionID *string `json:"profileVersionId,omitempty"` + Properties []*ProfileProperties `json:"properties,omitempty"` + OptimizedForProvisioning bool `json:"optimizedForProvisioning,omitempty"` } type Versions struct { @@ -76,7 +76,7 @@ type Versions struct { Deprecated bool `json:"deprecated,omitempty"` Systemprofile bool `json:"systemProfile,omitempty"` Propertiesmap map[string]interface{} `json:"propertiesMap,omitempty"` - Properties []*Properties `json:"properties,omitempty"` + Properties []*ProfileProperties `json:"properties,omitempty"` VersionClusterAssociation []*VersionClusterAssociation `json:"versionClusterAssociation,omitempty"` } @@ -1007,3 +1007,48 @@ type LogCatchUpRequest struct { ForRestore bool `json:"for_restore,omitempty"` Actionarguments []*Actionarguments `json:"actionArguments,omitempty"` } + +type ProfileProperties struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` + Secure bool `json:"secure"` + Description *string `json:"description,omitempty"` +} + +type ProfileRequest struct { + EngineType *string `json:"engineType,omitempty"` + Type *string `json:"type,omitempty"` + Topology *string `json:"topology,omitempty"` + DBVersion *string `json:"dbVersion,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + AvailableClusterIds []*string `json:"availableClusterIds,omitempty"` + SystemProfile bool `json:"systemProfile,omitempty"` + Published bool `json:"published"` + Deprecated bool `json:"deprecated"` + Properties []*ProfileProperties `json:"properties,omitempty"` + VersionClusterAssociation []*VersionClusterAssociation `json:"versionClusterAssociation,omitempty"` +} + +type SoftwareProfileResponse struct { + Name *string `json:"name,omitempty"` + WorkID *string `json:"workId,omitempty"` + OperationID *string `json:"operationId,omitempty"` + DbserverID *string `json:"dbserverId,omitempty"` + EntityID *string `json:"entityId,omitempty"` + EntityName *string `json:"entityName,omitempty"` + EntityType *string `json:"entityType,omitempty"` + Status *string `json:"status,omitempty"` +} + +type UpdateProfileRequest struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` +} + +type ProfileFilter struct { + Engine string `json:"engine,omitempty"` + ProfileType string `json:"profile_type,omitempty"` + ProfileID string `json:"profile_id,omitempty"` + ProfileName string `json:"profile_name,omitempty"` +} diff --git a/examples/ndb/profiles/main.tf b/examples/ndb/profiles/main.tf new file mode 100644 index 000000000..bd957f96a --- /dev/null +++ b/examples/ndb/profiles/main.tf @@ -0,0 +1,99 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## resource to create Compute Profile + +resource "nutanix_ndb_profile" "computeProfile" { + name = "compute-tf" + description = "compute description" + compute_profile{ + cpus = 1 + core_per_cpu = 2 + memory_size = 2 + } + // optional + published= true +} + +## resource to create Database parameters Profile + +resource "nutanix_ndb_database_parameter_profile" "dbProfile" { + name= "dbParams-tf" + description = "database description" + // required engine type + engine_type = "postgres_database" + + // optional args for engine type else will set to default values + postgres_database { + max_connections = "100" + max_replication_slots = "10" + } +} + +## resource to create Network Profile + +### Postgres Database Single Instance profile +resource "nutanix_ndb_profile" "networkProfile" { + name = "tf-net" + description = "terraform created" + engine_type = "postgres_database" + network_profile{ + topology = "single" + postgres_database{ + single_instance{ + vlan_name = "vlan.154" + } + } + } + published = true +} + +### Postgres Database HA Instance profile +resource "nutanix_ndb_profile" "networkProfile" { + name = "tf-net" + description = "terraform created" + engine_type = "postgres_database" + network_profile{ + topology = "cluster" + postgres_database{ + ha_instance{ + num_of_clusters= "1" + vlan_name = ["{{ vlanName }}"] + cluster_name = ["{{ ClusterName }}"] + } + } + } + published = true +} + +## resource to create Software Profile + +resource "nutanix_ndb_profile" "softwareProfile" { + name= "test-software" + description = "description" + engine_type = "postgres_database" + software_profile { + topology = "single" + postgres_database{ + source_dbserver_id = "{{ source_dbserver_id }}" + base_profile_version_name = "test1" + base_profile_version_description= "test1 desc" + } + available_cluster_ids= ["{{ cluster_ids }}"] + } + published = true +} \ No newline at end of file diff --git a/examples/ndb/profiles/terraform.tfvars b/examples/ndb/profiles/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/profiles/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/profiles/variables.tf b/examples/ndb/profiles/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/profiles/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/nutanix/data_source_nutanix_ndb_profile.go b/nutanix/data_source_nutanix_ndb_profile.go index 8c8bacc7e..1e30629a5 100644 --- a/nutanix/data_source_nutanix_ndb_profile.go +++ b/nutanix/data_source_nutanix_ndb_profile.go @@ -291,12 +291,16 @@ func dataSourceNutanixEraProfileRead(ctx context.Context, d *schema.ResourceData profileType := "" pID := "" pName := "" + profileFilters := &Era.ProfileFilter{} + if engineType, ok := d.GetOk("engine"); ok { engine = engineType.(string) + profileFilters.Engine = engine } if ptype, ok := d.GetOk("profile_type"); ok { profileType = ptype.(string) + profileFilters.ProfileType = profileType } profileID, pIDOk := d.GetOk("profile_id") @@ -308,12 +312,14 @@ func dataSourceNutanixEraProfileRead(ctx context.Context, d *schema.ResourceData } if pIDOk { pID = profileID.(string) + profileFilters.ProfileID = pID } if pNameOk { pName = profileName.(string) + profileFilters.ProfileName = pName } - resp, err := conn.Service.GetProfiles(ctx, engine, profileType, pID, pName) + resp, err := conn.Service.GetProfile(ctx, profileFilters) if err != nil { return diag.FromErr(err) } diff --git a/nutanix/data_source_nutanix_ndb_profiles.go b/nutanix/data_source_nutanix_ndb_profiles.go index 0a5b446be..37c83d252 100644 --- a/nutanix/data_source_nutanix_ndb_profiles.go +++ b/nutanix/data_source_nutanix_ndb_profiles.go @@ -359,7 +359,7 @@ func flattenVersions(erv []*Era.Versions) []map[string]interface{} { return nil } -func flattenProperties(erp []*Era.Properties) []map[string]interface{} { +func flattenProperties(erp []*Era.ProfileProperties) []map[string]interface{} { if len(erp) > 0 { res := make([]map[string]interface{}, len(erp)) diff --git a/nutanix/provider.go b/nutanix/provider.go index 632159abd..7019f2812 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -228,6 +228,8 @@ func Provider() *schema.Provider { "nutanix_ndb_sla": resourceNutanixNDBSla(), "nutanix_ndb_database_restore": resourceNutanixNDBDatabaseRestore(), "nutanix_ndb_log_catchups": resourceNutanixNDBLogCatchUps(), + "nutanix_ndb_profile": resourceNutanixNDBProfile(), + "nutanix_ndb_software_version_profile": resourceNutanixNDBSoftwareVersionProfile(), }, ConfigureContextFunc: providerConfigure, } diff --git a/nutanix/resource_nutanix_ndb_profiles.go b/nutanix/resource_nutanix_ndb_profiles.go new file mode 100644 index 000000000..d36324bab --- /dev/null +++ b/nutanix/resource_nutanix_ndb_profiles.go @@ -0,0 +1,1135 @@ +package nutanix + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBProfile() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBProfileCreate, + ReadContext: resourceNutanixNDBProfileRead, + UpdateContext: resourceNutanixNDBProfileUpdate, + DeleteContext: resourceNutanixNDBProfileDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "engine_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "published": { + Type: schema.TypeBool, + Optional: true, + }, + "compute_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"software_profile", "network_profile", "database_parameter_profile"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpus": { + Type: schema.TypeString, + Optional: true, + Default: "1", + }, + "core_per_cpu": { + Type: schema.TypeString, + Optional: true, + Default: "1", + }, + "memory_size": { + Type: schema.TypeString, + Optional: true, + Default: "2", + }, + }, + }, + }, + "software_profile": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"compute_profile", "network_profile", "database_parameter_profile"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topology": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"cluster", "single"}, false), + }, + "postgres_database": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_dbserver_id": { + Type: schema.TypeString, + Optional: true, + }, + "base_profile_version_name": { + Type: schema.TypeString, + Optional: true, + }, + "base_profile_version_description": { + Type: schema.TypeString, + Optional: true, + }, + "os_notes": { + Type: schema.TypeString, + Optional: true, + }, + "db_software_notes": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "available_cluster_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "network_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"compute_profile", "software_profile", "database_parameter_profile"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topology": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"cluster", "single"}, false), + }, + "postgres_database": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "single_instance": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_name": { + Type: schema.TypeString, + Optional: true, + }, + "enable_ip_address_selection": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "ha_instance": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_name": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "cluster_name": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "cluster_id": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "num_of_clusters": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "version_cluster_association": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nx_cluster_id": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "database_parameter_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"compute_profile", "software_profile", "network_profile"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "postgres_database": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_connections": { + Type: schema.TypeString, + Optional: true, + Default: "100", + }, + "max_replication_slots": { + Type: schema.TypeString, + Optional: true, + Default: "10", + }, + "effective_io_concurrency": { + Type: schema.TypeString, + Optional: true, + Default: "1", + }, + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + }, + "max_prepared_transactions": { + Type: schema.TypeString, + Optional: true, + Default: "0", + }, + "max_locks_per_transaction": { + Type: schema.TypeString, + Optional: true, + Default: "64", + }, + "max_wal_senders": { + Type: schema.TypeString, + Optional: true, + Default: "10", + }, + "max_worker_processes": { + Type: schema.TypeString, + Optional: true, + Default: "8", + }, + "min_wal_size": { + Type: schema.TypeString, + Optional: true, + Default: "80MB", + }, + "max_wal_size": { + Type: schema.TypeString, + Optional: true, + Default: "1GB", + }, + "checkpoint_timeout": { + Type: schema.TypeString, + Optional: true, + Default: "5min", + }, + "autovacuum": { + Type: schema.TypeString, + Optional: true, + Default: "on", + }, + "checkpoint_completion_target": { + Type: schema.TypeString, + Optional: true, + Default: "0.5", + }, + "autovacuum_freeze_max_age": { + Type: schema.TypeString, + Optional: true, + Default: "200000000", + }, + "autovacuum_vacuum_threshold": { + Type: schema.TypeString, + Optional: true, + Default: "50", + }, + "autovacuum_vacuum_scale_factor": { + Type: schema.TypeString, + Optional: true, + Default: "0.2", + }, + "autovacuum_work_mem": { + Type: schema.TypeString, + Optional: true, + Default: "-1", + }, + "autovacuum_max_workers": { + Type: schema.TypeString, + Optional: true, + Default: "3", + }, + "autovacuum_vacuum_cost_delay": { + Type: schema.TypeString, + Optional: true, + Default: "2ms", + }, + "wal_buffers": { + Type: schema.TypeString, + Optional: true, + Default: "-1", + }, + "synchronous_commit": { + Type: schema.TypeString, + Optional: true, + Default: "on", + }, + "random_page_cost": { + Type: schema.TypeString, + Optional: true, + Default: "4", + }, + "wal_keep_segments": { + Type: schema.TypeString, + Optional: true, + Default: "700", + }, + }, + }, + }, + }, + }, + }, + + // computed arguments + "status": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "latest_version": { + Type: schema.TypeString, + Computed: true, + }, + "latest_version_id": { + Type: schema.TypeString, + Computed: true, + }, + "versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "engine_type": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "topology": { + Type: schema.TypeString, + Computed: true, + }, + "db_version": { + Type: schema.TypeString, + Computed: true, + }, + "system_profile": { + Type: schema.TypeBool, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + "profile_id": { + Type: schema.TypeString, + Computed: true, + }, + "published": { + Type: schema.TypeBool, + Computed: true, + }, + "deprecated": { + Type: schema.TypeBool, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "properties_map": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "version_cluster_association": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "profile_version_id": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "optimized_for_provisioning": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "assoc_databases": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "assoc_db_servers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "cluster_availability": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "profile_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceNutanixNDBProfileCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.ProfileRequest{} + softwareProfile := false + if name, ok := d.GetOk("name"); ok { + req.Name = utils.StringPtr(name.(string)) + } + + if des, ok := d.GetOk("description"); ok { + req.Description = utils.StringPtr(des.(string)) + } + + if engType, ok := d.GetOk("engine_type"); ok { + req.EngineType = utils.StringPtr(engType.(string)) + } + + if cp, ok := d.GetOk("compute_profile"); ok { + req.Properties = buildComputeProfileRequest(cp) + // setting some defaults values which are generated at runtime + req.Topology = utils.StringPtr("ALL") + req.Type = utils.StringPtr("Compute") + req.SystemProfile = false + req.DBVersion = utils.StringPtr("ALL") + } + + if np, ok := d.GetOk("network_profile"); ok { + nps := np.([]interface{}) + + for _, v := range nps { + val := v.(map[string]interface{}) + + if tp, ok := val["topology"]; ok { + req.Topology = utils.StringPtr(tp.(string)) + + // other details + req.Type = utils.StringPtr("Network") + req.SystemProfile = false + req.DBVersion = utils.StringPtr("ALL") + } + + if ps, ok := val["postgres_database"]; ok { + req.Properties = expandNetworkProfileProperties(ctx, meta, ps.([]interface{})) + } + + if cls, ok := val["version_cluster_association"]; ok { + clster := cls.([]interface{}) + out := make([]*era.VersionClusterAssociation, len(clster)) + for _, v := range clster { + val := v.(map[string]interface{}) + + if p1, ok1 := val["nx_cluster_id"]; ok1 { + out = append(out, &era.VersionClusterAssociation{ + NxClusterID: utils.StringPtr(p1.(string)), + }) + } + } + req.VersionClusterAssociation = out + } + } + } + + if db, ok := d.GetOk("database_parameter_profile"); ok { + req.Properties = buildDatabaseProfileProperties(db.([]interface{})) + + // setting some defaults values which are generated at runtime + req.Topology = utils.StringPtr("ALL") + req.Type = utils.StringPtr("Database_Parameter") + req.SystemProfile = false + req.DBVersion = utils.StringPtr("ALL") + } + + if sp, ok := d.GetOk("software_profile"); ok { + softwareProfile = true + splist := sp.([]interface{}) + + for _, v := range splist { + val := v.(map[string]interface{}) + + if tp, ok := val["topology"]; ok { + req.Topology = utils.StringPtr(tp.(string)) + + // other details + req.Type = utils.StringPtr("Software") + req.SystemProfile = false + req.DBVersion = utils.StringPtr("ALL") + } + + if ps, ok := val["postgres_database"]; ok { + req.Properties = expandSoftwareProfileProp(ps.([]interface{})) + } + + if ac, ok1 := d.GetOk("available_cluster_ids"); ok1 { + st := ac.([]interface{}) + sublist := make([]*string, len(st)) + + for a := range st { + sublist[a] = utils.StringPtr(st[a].(string)) + } + req.AvailableClusterIds = sublist + } + } + } + + if softwareProfile { + resp, er := conn.Service.CreateSoftwareProfiles(ctx, req) + if er != nil { + return diag.FromErr(er) + } + + // Get Operation ID from response of SoftwareProfile and poll for the operation to get completed. + opID := resp.OperationID + if opID == utils.StringPtr("") { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: utils.StringValue(opID), + } + + log.Printf("polling for operation with id: %s\n", *opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for software profile (%s) to create: %s", *resp.EntityID, errWaitTask) + } + d.SetId(*resp.EntityID) + } else { + resp, err := conn.Service.CreateProfiles(ctx, req) + if err != nil { + return diag.FromErr(err) + } + d.SetId(*resp.ID) + } + + // Now if published is present args + + if publish, ok := d.GetOk("published"); ok { + req := &era.ProfileRequest{} + netReq := &era.UpdateProfileRequest{} + + req.Published = publish.(bool) + + // profile filter spec + profileFilter := &era.ProfileFilter{} + profileFilter.ProfileID = d.Id() + res, err := conn.Service.GetProfile(ctx, profileFilter) + if err != nil { + diag.FromErr(err) + } + + if res != nil { + netReq.Name = res.Name + netReq.Description = res.Description + req.Properties = res.Versions[0].Properties + } + versionID := res.Versions[0].ID + + _, eror := conn.Service.UpdateProfile(ctx, netReq, d.Id()) + if eror != nil { + return diag.FromErr(eror) + } + + _, er := conn.Service.UpdateProfileVersion(ctx, req, d.Id(), *versionID) + if er != nil { + return diag.FromErr(er) + } + } + return resourceNutanixNDBProfileRead(ctx, d, meta) +} + +func resourceNutanixNDBProfileRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + // profile filter spec + profileFilter := &era.ProfileFilter{} + profileFilter.ProfileID = d.Id() + + resp, err := conn.Service.GetProfile(ctx, profileFilter) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + if err := d.Set("engine_type", resp.Enginetype); err != nil { + return diag.FromErr(err) + } + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + if err := d.Set("owner", resp.Owner); err != nil { + return diag.FromErr(err) + } + if err := d.Set("latest_version", resp.Latestversion); err != nil { + return diag.FromErr(err) + } + if err := d.Set("versions", flattenVersions(resp.Versions)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("latest_version_id", resp.Latestversionid); err != nil { + return diag.FromErr(err) + } + + if resp.Assocdbservers != nil { + d.Set("assoc_db_servers", resp.Assocdbservers) + } else { + d.Set("assoc_db_servers", nil) + } + + if resp.Assocdatabases != nil { + d.Set("assoc_databases", resp.Assocdatabases) + } else { + d.Set("assoc_databases", nil) + } + + if resp.Nxclusterid != nil { + d.Set("nx_cluster_id", resp.Nxclusterid) + } else { + d.Set("nx_cluster_id", nil) + } + + if resp.Clusteravailability != nil { + d.Set("cluster_availability", flattenClusterAvailability(resp.Clusteravailability)) + } else { + d.Set("cluster_availability", nil) + } + + return nil +} + +func resourceNutanixNDBProfileUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.ProfileRequest{} + + netReq := &era.UpdateProfileRequest{} + + // profile filter spec + profileFilter := &era.ProfileFilter{} + profileFilter.ProfileID = d.Id() + + res, err := conn.Service.GetProfile(ctx, profileFilter) + if err != nil { + diag.FromErr(err) + } + + if res != nil { + netReq.Name = res.Name + netReq.Description = res.Description + req.Properties = res.Versions[0].Properties + } + + if pub, ok := d.GetOk("published"); ok { + req.Published = pub.(bool) + } + + if d.HasChange("name") { + netReq.Name = utils.StringPtr(d.Get("name").(string)) + // update version name as well + versionName := d.Get("name").(string) + updateVersionName := versionName + " " + " (1.0)" + req.Name = utils.StringPtr(updateVersionName) + } + + if d.HasChange("description") { + netReq.Description = utils.StringPtr(d.Get("description").(string)) + req.Description = utils.StringPtr(d.Get("description").(string)) + } + + if d.HasChange("compute_profile") { + req.Properties = buildComputeProfileRequest(d.Get("compute_profile")) + } + + if d.HasChange("network_profile") { + nps := d.Get("network_profile").([]interface{}) + + for _, v := range nps { + val := v.(map[string]interface{}) + + if ps, ok := val["postgres_database"]; ok { + req.Properties = expandNetworkProfileProperties(ctx, meta, ps.([]interface{})) + } + + if cls, ok := val["version_cluster_association"]; ok { + clster := cls.([]interface{}) + out := make([]*era.VersionClusterAssociation, len(clster)) + for _, v := range clster { + val := v.(map[string]interface{}) + + if p1, ok1 := val["nx_cluster_id"]; ok1 { + out = append(out, &era.VersionClusterAssociation{ + NxClusterID: utils.StringPtr(p1.(string)), + }) + } + } + req.VersionClusterAssociation = out + } + } + } + + if d.HasChange("database_parameter_profile") { + req.Properties = buildDatabaseProfileProperties(d.Get("database_parameter_profile").([]interface{})) + } + + if d.HasChange("software_profile") { + splist := d.Get("software_profile").([]interface{}) + + for _, v := range splist { + val := v.(map[string]interface{}) + + if ps, ok := val["postgres_database"]; ok { + req.Properties = expandSoftwareProfileProp(ps.([]interface{})) + } + + if ac, ok1 := d.GetOk("available_cluster_ids"); ok1 { + st := ac.([]interface{}) + sublist := make([]*string, len(st)) + + for a := range st { + sublist[a] = utils.StringPtr(st[a].(string)) + } + req.AvailableClusterIds = sublist + } + } + } + + versionID := res.Versions[0].ID + + _, eror := conn.Service.UpdateProfile(ctx, netReq, d.Id()) + if eror != nil { + return diag.FromErr(eror) + } + + _, er := conn.Service.UpdateProfileVersion(ctx, req, d.Id(), *versionID) + if er != nil { + return diag.FromErr(er) + } + + return resourceNutanixNDBProfileRead(ctx, d, meta) +} + +func resourceNutanixNDBProfileDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.DeleteProfile(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp == utils.StringPtr("Profile Successfully Deleted.") { + d.SetId("") + } + return nil +} + +func buildComputeProfileRequest(p interface{}) []*era.ProfileProperties { + if p != nil { + computeProp := []*era.ProfileProperties{} + pc := p.([]interface{}) + for _, v := range pc { + val := v.(map[string]interface{}) + if cpu, ok := val["cpus"]; ok { + computeProp = append(computeProp, &era.ProfileProperties{ + Name: utils.StringPtr("CPUS"), + Value: utils.StringPtr(cpu.(string)), + Secure: false, + }) + } + + if coreCPU, ok := val["core_per_cpu"]; ok { + computeProp = append(computeProp, &era.ProfileProperties{ + Name: utils.StringPtr("CORE_PER_CPU"), + Value: utils.StringPtr(coreCPU.(string)), + Secure: false, + }) + } + + if mem, ok := val["memory_size"]; ok { + computeProp = append(computeProp, &era.ProfileProperties{ + Name: utils.StringPtr("MEMORY_SIZE"), + Value: utils.StringPtr(mem.(string)), + Secure: false, + }) + } + } + return computeProp + } + return nil +} + +func expandNetworkProfileProperties(ctx context.Context, meta interface{}, ps []interface{}) []*era.ProfileProperties { + prop := []*era.ProfileProperties{} + if len(ps) > 0 { + for _, v := range ps { + inst := v.(map[string]interface{}) + + if sIns, ok := inst["single_instance"]; ok && len(sIns.([]interface{})) > 0 { + prop = expandNetworkSingleInstance(sIns.([]interface{})) + } + + if hIns, ok := inst["ha_instance"]; ok && len(hIns.([]interface{})) > 0 { + prop = expandNetworkHAInstance(ctx, meta, hIns.([]interface{})) + } + } + } + return prop +} + +func buildDatabaseProfileProperties(ps []interface{}) []*era.ProfileProperties { + prop := []*era.ProfileProperties{} + if len(ps) > 0 { + for _, v := range ps { + val := v.(map[string]interface{}) + if psdb, ok := val["postgres_database"]; ok { + brr := psdb.([]interface{}) + + postgresProp := brr[0].(map[string]interface{}) + for key, value := range postgresProp { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr(key), + Value: utils.StringPtr(value.(string)), + Secure: false, + }) + } + } + } + } + return prop +} + +func expandSoftwareProfileProp(ps []interface{}) []*era.ProfileProperties { + prop := []*era.ProfileProperties{} + if len(ps) > 0 { + for _, v := range ps { + val := v.(map[string]interface{}) + + if p1, ok1 := val["source_dbserver_id"]; ok1 && len(p1.(string)) > 0 { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr("SOURCE_DBSERVER_ID"), + Value: utils.StringPtr(p1.(string)), + Secure: false, + Description: utils.StringPtr("ID of the database server that should be used as a reference to create the software profile"), + }) + } + if p1, ok1 := val["base_profile_version_name"]; ok1 && len(p1.(string)) > 0 { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr("BASE_PROFILE_VERSION_NAME"), + Value: utils.StringPtr(p1.(string)), + Secure: false, + Description: utils.StringPtr("Name of the base profile version."), + }) + } + if p1, ok1 := val["base_profile_version_description"]; ok1 && len(p1.(string)) > 0 { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr("BASE_PROFILE_VERSION_DESCRIPTION"), + Value: utils.StringPtr(p1.(string)), + Secure: false, + Description: utils.StringPtr("Description of the base profile version."), + }) + } + if p1, ok1 := val["os_notes"]; ok1 && len(p1.(string)) > 0 { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr("OS_NOTES"), + Value: utils.StringPtr(p1.(string)), + Secure: false, + Description: utils.StringPtr("Notes or description for the Operating System."), + }) + } + if p1, ok1 := val["db_software_notes"]; ok1 && len(p1.(string)) > 0 { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr("DB_SOFTWARE_NOTES"), + Value: utils.StringPtr(p1.(string)), + Secure: false, + Description: utils.StringPtr("Description of the Postgres database software."), + }) + } + } + return prop + } + return nil +} + +func expandNetworkSingleInstance(ps []interface{}) []*era.ProfileProperties { + if len(ps) > 0 { + prop := []*era.ProfileProperties{} + for _, v := range ps { + val := v.(map[string]interface{}) + + if p1, ok1 := val["vlan_name"]; ok1 { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr("VLAN_NAME"), + Value: utils.StringPtr(p1.(string)), + Secure: false, + Description: utils.StringPtr("Name of the vLAN"), + }) + } + + if p1, ok1 := val["enable_ip_address_selection"]; ok1 { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr("ENABLE_IP_ADDRESS_SELECTION"), + Value: utils.StringPtr(p1.(string)), + }) + } + } + return prop + } + return nil +} + +func expandNetworkHAInstance(ctx context.Context, meta interface{}, ps []interface{}) []*era.ProfileProperties { + prop := []*era.ProfileProperties{} + for _, v := range ps { + val := v.(map[string]interface{}) + if numCls, ok := val["num_of_clusters"]; ok { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr("NUM_CLUSTERS"), + Value: utils.StringPtr(numCls.(string)), + }) + } + + if p1, ok1 := val["enable_ip_address_selection"]; ok1 { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr("ENABLE_IP_ADDRESS_SELECTION"), + Value: utils.StringPtr(p1.(string)), + }) + } + + if numVlan, ok := val["vlan_name"]; ok { + vlans := numVlan.([]interface{}) + for k, vl := range vlans { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr(fmt.Sprintf("VLAN_NAME_%d", k)), + Value: utils.StringPtr(vl.(string)), + }) + } + } + + if clsName, ok := val["cluster_name"]; ok && len(clsName.([]interface{})) > 0 { + vlans := clsName.([]interface{}) + for k, vl := range vlans { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr(fmt.Sprintf("CLUSTER_NAME_%d", k)), + Value: utils.StringPtr(vl.(string)), + }) + + // call the cluster API to fetch cluster id + conn := meta.(*Client).Era + resp, _ := conn.Service.GetCluster(ctx, "", vl.(string)) + + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr(fmt.Sprintf("CLUSTER_ID_%d", k)), + Value: utils.StringPtr(*resp.ID), + }) + } + } + + if clsID, ok := val["cluster_id"]; ok && len(clsID.([]interface{})) > 0 { + vlans := clsID.([]interface{}) + for k, vl := range vlans { + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr(fmt.Sprintf("CLUSTER_ID_%d", k)), + Value: utils.StringPtr(vl.(string)), + }) + + conn := meta.(*Client).Era + resp, _ := conn.Service.GetCluster(ctx, vl.(string), "") + + prop = append(prop, &era.ProfileProperties{ + Name: utils.StringPtr(fmt.Sprintf("CLUSTER_NAME_%d", k)), + Value: utils.StringPtr(*resp.Uniquename), + }) + } + } + } + return prop +} diff --git a/nutanix/resource_nutanix_ndb_profiles_test.go b/nutanix/resource_nutanix_ndb_profiles_test.go new file mode 100644 index 000000000..70630064d --- /dev/null +++ b/nutanix/resource_nutanix_ndb_profiles_test.go @@ -0,0 +1,243 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameProfile = "nutanix_ndb_profile.acctest-managed-profile" + +func TestAccEraProfile_ByCompute(t *testing.T) { + name := "test-compute-tf" + desc := "this is compute desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfileConfigByCompute(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameProfile, "name", name), + resource.TestCheckResourceAttr(resourceNameProfile, "description", desc), + resource.TestCheckResourceAttr(resourceNameProfile, "versions.#", "1"), + resource.TestCheckResourceAttr(resourceNameProfile, "compute_profile.0.cpus", "1"), + resource.TestCheckResourceAttr(resourceNameProfile, "compute_profile.0.core_per_cpu", "2"), + resource.TestCheckResourceAttr(resourceNameProfile, "compute_profile.0.memory_size", "2"), + ), + }, + }, + }) +} +func TestAccEraProfile_BySoftware(t *testing.T) { + t.Skip() + name := "test-software-tf" + desc := "this is software desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfileConfigBySoftware(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameProfile, "name", name), + resource.TestCheckResourceAttr(resourceNameProfile, "description", desc), + resource.TestCheckResourceAttr(resourceNameProfile, "versions.#", "1"), + ), + }, + }, + }) +} + +func TestAccEraProfile_ByDatabaseParams(t *testing.T) { + name := "test-software-tf" + desc := "this is software desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfileConfigByDatabaseParams(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameProfile, "name", name), + resource.TestCheckResourceAttr(resourceNameProfile, "description", desc), + resource.TestCheckResourceAttr(resourceNameProfile, "versions.#", "1"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.max_connections", "100"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.max_replication_slots", "10"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.max_wal_senders", "10"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.max_wal_size", "1GB"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.wal_buffers", "-1"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.random_page_cost", "4"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.autovacuum_freeze_max_age", "200000000"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.checkpoint_completion_target", "0.5"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.checkpoint_timeout", "5min"), + resource.TestCheckResourceAttr(resourceNameProfile, "database_parameter_profile.0.postgres_database.0.max_worker_processes", "8"), + ), + }, + }, + }) +} + +func TestAccEraProfile_ByNetwork(t *testing.T) { + name := "test-network-tf" + desc := "this is network desc" + subnet := testVars.SubnetName + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfileConfigByNetwork(name, desc, subnet), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameProfile, "name", name), + resource.TestCheckResourceAttr(resourceNameProfile, "description", desc), + resource.TestCheckResourceAttr(resourceNameProfile, "versions.#", "1"), + ), + }, + }, + }) +} + +func TestAccEraProfile_ByNetworkHAPostgres(t *testing.T) { + name := "test-network-tf" + desc := "this is network desc for HA postgres" + subnet := testVars.SubnetName + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfileConfigByNetworkHA(name, desc, subnet), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameProfile, "name", name), + resource.TestCheckResourceAttr(resourceNameProfile, "description", desc), + resource.TestCheckResourceAttr(resourceNameProfile, "versions.#", "1"), + ), + }, + }, + }) +} + +func testAccEraProfileConfigByCompute(name, desc string) string { + return fmt.Sprintf(` + resource "nutanix_ndb_profile" "acctest-managed-profile" { + name = "%[1]s" + description = "%[2]s" + compute_profile{ + cpus = 1 + core_per_cpu = 2 + memory_size = 2 + } + published= true + } + `, name, desc) +} + +func testAccEraProfileConfigBySoftware(name, desc string) string { + return fmt.Sprintf(` + data "nutanix_ndb_clusters" "clusters"{} + + locals{ + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + resource "nutanix_ndb_profile" "name12" { + name= "%[1]s" + description = "%[2]s" + engine_type = "postgres_database" + software_profile { + topology = "single" + postgres_database{ + source_dbserver_id = "" + base_profile_version_name = "test1" + base_profile_version_description= "test1 desc" + } + available_cluster_ids= [local.clusters.EraCluster.id] + } + published = true + } + `, name, desc) +} + +func testAccEraProfileConfigByNetwork(name, desc, subnet string) string { + return fmt.Sprintf(` + data "nutanix_ndb_clusters" "clusters"{} + + locals{ + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + resource "nutanix_ndb_profile" "acctest-managed-profile" { + name = "%[1]s" + description = "%[2]s" + engine_type = "postgres_database" + network_profile{ + topology = "single" + postgres_database{ + single_instance{ + vlan_name = "%[3]s" + } + } + version_cluster_association{ + nx_cluster_id = local.clusters.EraCluster.id + } + } + published = true + } + `, name, desc, subnet) +} + +func testAccEraProfileConfigByDatabaseParams(name, desc string) string { + return fmt.Sprintf(` + data "nutanix_ndb_clusters" "clusters"{} + + locals{ + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + resource "nutanix_ndb_profile" "acctest-managed-profile" { + name = "%[1]s" + description = "%[2]s" + engine_type = "postgres_database" + database_parameter_profile { + postgres_database { + max_connections = "100" + max_replication_slots = "10" + } + } + published = true + } + `, name, desc) +} + +func testAccEraProfileConfigByNetworkHA(name, desc, subnet string) string { + return fmt.Sprintf(` + data "nutanix_ndb_clusters" "clusters"{} + + locals{ + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + resource "nutanix_ndb_profile" "acctest-managed-profile" { + name = "%[1]s" + description = "%[2]s" + engine_type = "postgres_database" + network_profile{ + topology = "cluster" + postgres_database{ + ha_instance{ + num_of_clusters= "1" + vlan_name = ["%[3]s"] + cluster_name = [local.clusters.EraCluster.name] + } + } + } + published = true + } + `, name, desc, subnet) +} diff --git a/nutanix/resource_nutanix_ndb_software_version_profile.go b/nutanix/resource_nutanix_ndb_software_version_profile.go new file mode 100644 index 000000000..78ccc8005 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_software_version_profile.go @@ -0,0 +1,411 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBSoftwareVersionProfile() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBSoftwareVersionProfileCreate, + ReadContext: resourceNutanixNDBSoftwareVersionProfileRead, + UpdateContext: resourceNutanixNDBSoftwareVersionProfileUpdate, + DeleteContext: resourceNutanixNDBSoftwareVersionProfileDelete, + Schema: map[string]*schema.Schema{ + "profile_id": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "engine_type": { + Type: schema.TypeString, + Required: true, + }, + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"deprecated", "published", "unpublished"}, false), + }, + "postgres_database": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_dbserver_id": { + Type: schema.TypeString, + Optional: true, + }, + "os_notes": { + Type: schema.TypeString, + Optional: true, + }, + "db_software_notes": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "available_cluster_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + // computed arguments + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "db_version": { + Type: schema.TypeString, + Computed: true, + }, + "topology": { + Type: schema.TypeString, + Computed: true, + }, + "system_profile": { + Type: schema.TypeBool, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + "published": { + Type: schema.TypeBool, + Computed: true, + }, + "deprecated": { + Type: schema.TypeBool, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "properties_map": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "version_cluster_association": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "profile_version_id": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "optimized_for_provisioning": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceNutanixNDBSoftwareVersionProfileCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.ProfileRequest{} + profileID := "" + // pre-filled requests + + req.DBVersion = utils.StringPtr("ALL") + req.SystemProfile = false + req.Type = utils.StringPtr("Software") + + if pID, ok := d.GetOk("profile_id"); ok { + profileID = pID.(string) + } + + if name, ok := d.GetOk("name"); ok { + req.Name = utils.StringPtr(name.(string)) + } + + if desc, ok := d.GetOk("description"); ok { + req.Description = utils.StringPtr(desc.(string)) + } + + if engType, ok := d.GetOk("engine_type"); ok { + req.EngineType = utils.StringPtr(engType.(string)) + } + + if ps, ok := d.GetOk("postgres_database"); ok { + req.Properties = expandSoftwareProfileProp(ps.([]interface{})) + } + + if ac, ok1 := d.GetOk("available_cluster_ids"); ok1 { + st := ac.([]interface{}) + sublist := make([]*string, len(st)) + + for a := range st { + sublist[a] = utils.StringPtr(st[a].(string)) + } + req.AvailableClusterIds = sublist + } + + // API to create software versions + + resp, err := conn.Service.CreateSoftwareProfileVersion(ctx, profileID, req) + if err != nil { + return diag.FromErr(err) + } + + // Get Operation ID from response of SoftwareProfileVersion and poll for the operation to get completed. + opID := resp.OperationID + if opID == utils.StringPtr("") { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: utils.StringValue(opID), + } + + log.Printf("polling for operation with id: %s\n", *opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for software profile version (%s) to create: %s", *resp.EntityID, errWaitTask) + } + d.SetId(*resp.EntityID) + + // spec for update profile request + + updateSpec := &era.ProfileRequest{} + + // getting name & description + updateSpec.Name = req.Name + updateSpec.Description = req.Description + + // now call the Update Profile API if publish params given + if status, ok := d.GetOk("status"); ok { + statusValue := status.(string) + + switch { + case statusValue == "published": + updateSpec.Published = true + updateSpec.Deprecated = false + case statusValue == "unpublished": + updateSpec.Published = false + updateSpec.Deprecated = false + default: + updateSpec.Published = false + updateSpec.Deprecated = true + } + } + + //update for software profile version + _, er := conn.Service.UpdateProfileVersion(ctx, updateSpec, profileID, d.Id()) + if er != nil { + return diag.FromErr(er) + } + + return resourceNutanixNDBSoftwareVersionProfileRead(ctx, d, meta) +} + +func resourceNutanixNDBSoftwareVersionProfileRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + // Get Profile Version API + profileVersionID := d.Get("profile_id") + resp, err := conn.Service.GetSoftwareProfileVersion(ctx, profileVersionID.(string), d.Id()) + if err != nil { + return diag.FromErr(err) + } + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + if err := d.Set("engine_type", resp.Enginetype); err != nil { + return diag.FromErr(err) + } + if err := d.Set("owner", resp.Owner); err != nil { + return diag.FromErr(err) + } + if err := d.Set("db_version", resp.Dbversion); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("topology", resp.Topology); err != nil { + return diag.FromErr(err) + } + if err := d.Set("system_profile", resp.Systemprofile); err != nil { + return diag.FromErr(err) + } + if err := d.Set("version", resp.Version); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("published", resp.Published); err != nil { + return diag.FromErr(err) + } + if err := d.Set("deprecated", resp.Deprecated); err != nil { + return diag.FromErr(err) + } + if err := d.Set("properties", flattenProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties_map", utils.ConvertMapString(resp.Propertiesmap)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("version_cluster_association", flattenClusterAssociation(resp.VersionClusterAssociation)); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func resourceNutanixNDBSoftwareVersionProfileUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.ProfileRequest{} + + profileID := d.Get("profile_id") + // get the software profile version + + oldResp, err := conn.Service.GetSoftwareProfileVersion(ctx, profileID.(string), d.Id()) + if err != nil { + diag.FromErr(err) + } + + if oldResp != nil { + req.Name = oldResp.Name + req.Description = oldResp.Description + } + + if d.HasChange("name") { + req.Name = utils.StringPtr(d.Get("name").(string)) + } + + if d.HasChange("description") { + req.Description = utils.StringPtr(d.Get("description").(string)) + } + + if status, ok := d.GetOk("status"); ok { + statusValue := status.(string) + switch { + case statusValue == "published": + req.Published = true + req.Deprecated = false + case statusValue == "unpublished": + req.Published = false + req.Deprecated = false + default: + req.Published = false + req.Deprecated = true + } + } + + //update for software profile version + _, er := conn.Service.UpdateProfileVersion(ctx, req, profileID.(string), d.Id()) + if er != nil { + return diag.FromErr(er) + } + + return resourceNutanixNDBSoftwareVersionProfileRead(ctx, d, meta) +} + +func resourceNutanixNDBSoftwareVersionProfileDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + profileID := d.Get("profile_id") + resp, err := conn.Service.DeleteProfileVersion(ctx, profileID.(string), d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp == utils.StringPtr("Profile Successfully Deleted.") { + d.SetId("") + } + return nil +} From ada0ad1f0760c9ce59292505c2a1dd683dd2d269 Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Fri, 13 Jan 2023 13:02:34 +0530 Subject: [PATCH 08/18] Feat/m ndb database scale (#517) --- client/era/era_service.go | 12 + client/era/era_structs.go | 5 + examples/ndb/database_scale/main.tf | 24 ++ examples/ndb/database_scale/terraform.tfvars | 4 + examples/ndb/database_scale/variables.tf | 10 + nutanix/data_source_nutanix_ndb_database.go | 31 +- nutanix/provider.go | 2 + .../resource_nutanix_ndb_database_scale.go | 281 ++++++++++++++++++ 8 files changed, 340 insertions(+), 29 deletions(-) create mode 100644 examples/ndb/database_scale/main.tf create mode 100644 examples/ndb/database_scale/terraform.tfvars create mode 100644 examples/ndb/database_scale/variables.tf create mode 100644 nutanix/resource_nutanix_ndb_database_scale.go diff --git a/client/era/era_service.go b/client/era/era_service.go index f4f6e6d3b..34c347e50 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -37,6 +37,7 @@ type Service interface { CreateSoftwareProfileVersion(ctx context.Context, id string, req *ProfileRequest) (*SoftwareProfileResponse, error) UpdateProfileVersion(ctx context.Context, req *ProfileRequest, id string, vid string) (*ListProfileResponse, error) DeleteProfileVersion(ctx context.Context, profileID string, profileVersionID string) (*string, error) + DatabaseScale(ctx context.Context, id string, req *DatabaseScale) (*ProvisionDatabaseResponse, error) } type ServiceClient struct { @@ -352,10 +353,21 @@ func (sc ServiceClient) DatabaseRestore(ctx context.Context, databaseID string, func (sc ServiceClient) LogCatchUp(ctx context.Context, tmsID string, req *LogCatchUpRequest) (*ProvisionDatabaseResponse, error) { httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/log-catchups", tmsID), req) + res := new(ProvisionDatabaseResponse) + if err != nil { return nil, err } + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DatabaseScale(ctx context.Context, databaseID string, req *DatabaseScale) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/databases/%s/update/extend-storage", databaseID), req) res := new(ProvisionDatabaseResponse) + + if err != nil { + return nil, err + } return res, sc.c.Do(ctx, httpReq, res) } diff --git a/client/era/era_structs.go b/client/era/era_structs.go index e7c65be51..03fc6124c 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -1008,6 +1008,11 @@ type LogCatchUpRequest struct { Actionarguments []*Actionarguments `json:"actionArguments,omitempty"` } +type DatabaseScale struct { + ApplicationType *string `json:"applicationType,omitempty"` + Actionarguments []*Actionarguments `json:"actionArguments,omitempty"` +} + type ProfileProperties struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` diff --git a/examples/ndb/database_scale/main.tf b/examples/ndb/database_scale/main.tf new file mode 100644 index 000000000..9a243477a --- /dev/null +++ b/examples/ndb/database_scale/main.tf @@ -0,0 +1,24 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## resource to scale database + +resource "nutanix_ndb_database_scale" "scale" { + application_type = "{{ Application Type }}" + database_uuid = "{{ database_id }}" + data_storage_size = 1 +} \ No newline at end of file diff --git a/examples/ndb/database_scale/terraform.tfvars b/examples/ndb/database_scale/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/database_scale/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/database_scale/variables.tf b/examples/ndb/database_scale/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/database_scale/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/nutanix/data_source_nutanix_ndb_database.go b/nutanix/data_source_nutanix_ndb_database.go index 398eb2c4d..3b0a54626 100644 --- a/nutanix/data_source_nutanix_ndb_database.go +++ b/nutanix/data_source_nutanix_ndb_database.go @@ -1759,35 +1759,8 @@ func dataSourceEraDatabaseNodes() *schema.Schema { Type: schema.TypeString, }, }, - "properties": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, - }, - "value": { - Type: schema.TypeString, - Computed: true, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "secure": { - Type: schema.TypeBool, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - "tags": dataSourceEraDBInstanceTags(), + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), "database_id": { Type: schema.TypeString, Computed: true, diff --git a/nutanix/provider.go b/nutanix/provider.go index 7019f2812..982dfb846 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -230,6 +230,8 @@ func Provider() *schema.Provider { "nutanix_ndb_log_catchups": resourceNutanixNDBLogCatchUps(), "nutanix_ndb_profile": resourceNutanixNDBProfile(), "nutanix_ndb_software_version_profile": resourceNutanixNDBSoftwareVersionProfile(), + "nutanix_ndb_scale_database": resourceNutanixNDBScaleDatabase(), + "nutanix_ndb_database_scale": resourceNutanixNDBScaleDatabase(), }, ConfigureContextFunc: providerConfigure, } diff --git a/nutanix/resource_nutanix_ndb_database_scale.go b/nutanix/resource_nutanix_ndb_database_scale.go new file mode 100644 index 000000000..484e1c170 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_database_scale.go @@ -0,0 +1,281 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBScaleDatabase() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBScaleDatabaseCreate, + ReadContext: resourceNutanixNDBScaleDatabaseRead, + UpdateContext: resourceNutanixNDBScaleDatabaseUpdate, + DeleteContext: resourceNutanixNDBScaleDatabaseDelete, + Schema: map[string]*schema.Schema{ + "database_uuid": { + Type: schema.TypeString, + Required: true, + }, + "application_type": { + Type: schema.TypeString, + Required: true, + }, + "data_storage_size": { + Type: schema.TypeInt, + Required: true, + }, + "pre_script_cmd": { + Type: schema.TypeString, + Optional: true, + }, + "post_script_cmd": { + Type: schema.TypeString, + Optional: true, + }, + "scale_count": { + Type: schema.TypeInt, + Optional: true, + }, + + // Computed values + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "databasetype": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + + "value": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + }, + }, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "tags": dataSourceEraDBInstanceTags(), + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + }, + } +} + +func resourceNutanixNDBScaleDatabaseCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.DatabaseScale{} + dbUUID := "" + if db, ok := d.GetOk("database_uuid"); ok { + dbUUID = db.(string) + } + + if app, ok := d.GetOk("application_type"); ok { + req.ApplicationType = utils.StringPtr(app.(string)) + } + + // action arguments + + args := []*era.Actionarguments{} + + if dataSize, ok := d.GetOk("data_storage_size"); ok { + args = append(args, &era.Actionarguments{ + Name: "data_storage_size", + Value: utils.IntPtr(dataSize.(int)), + }) + } + + if pre, ok := d.GetOk("pre_script_cmd"); ok { + args = append(args, &era.Actionarguments{ + Name: "pre_script_cmd", + Value: utils.StringPtr(pre.(string)), + }) + } + + if post, ok := d.GetOk("post_script_cmd"); ok { + args = append(args, &era.Actionarguments{ + Name: "post_script_cmd", + Value: utils.StringPtr(post.(string)), + }) + } + + // adding working dir + + args = append(args, &era.Actionarguments{ + Name: "working_dir", + Value: "/tmp", + }) + + req.Actionarguments = args + + // call API + + resp, err := conn.Service.DatabaseScale(ctx, dbUUID, req) + if err != nil { + return diag.FromErr(err) + } + + // Get Operation ID from response of ProvisionDatabaseResponse and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for db Instance (%s) to scale: %s", resp.Entityid, errWaitTask) + } + + d.SetId(resp.Operationid) + return resourceNutanixNDBScaleDatabaseRead(ctx, d, meta) +} + +func resourceNutanixNDBScaleDatabaseRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + databaseID := d.Get("database_uuid").(string) + ctx = NewContext(ctx, dbID(databaseID)) + return readDatabaseInstance(ctx, d, meta) +} + +func resourceNutanixNDBScaleDatabaseUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return resourceNutanixNDBScaleDatabaseCreate(ctx, d, meta) +} + +func resourceNutanixNDBScaleDatabaseDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} From 13a48b44f9ab1404b9c5a35b8bf9f0500780f766 Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Fri, 13 Jan 2023 17:05:34 +0530 Subject: [PATCH 09/18] Feat/m register db instance (#527) --- client/era/era_service.go | 12 + client/era/era_structs.go | 29 + examples/ndb/database_register/main.tf | 152 +++++ .../ndb/database_register/terraform.tfvars | 4 + examples/ndb/database_register/variables.tf | 10 + nutanix/provider.go | 1 + .../resource_nutanix_ndb_register_database.go | 540 ++++++++++++++++++ ...urce_nutanix_ndb_register_database_test.go | 99 ++++ 8 files changed, 847 insertions(+) create mode 100644 examples/ndb/database_register/main.tf create mode 100644 examples/ndb/database_register/terraform.tfvars create mode 100644 examples/ndb/database_register/variables.tf create mode 100644 nutanix/resource_nutanix_ndb_register_database.go create mode 100644 nutanix/resource_nutanix_ndb_register_database_test.go diff --git a/client/era/era_service.go b/client/era/era_service.go index 34c347e50..a2afe1c7c 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -38,6 +38,7 @@ type Service interface { UpdateProfileVersion(ctx context.Context, req *ProfileRequest, id string, vid string) (*ListProfileResponse, error) DeleteProfileVersion(ctx context.Context, profileID string, profileVersionID string) (*string, error) DatabaseScale(ctx context.Context, id string, req *DatabaseScale) (*ProvisionDatabaseResponse, error) + RegisterDatabase(ctx context.Context, request *RegisterDBInputRequest) (*ProvisionDatabaseResponse, error) } type ServiceClient struct { @@ -303,6 +304,17 @@ func (sc ServiceClient) CreateProfiles(ctx context.Context, req *ProfileRequest) return res, sc.c.Do(ctx, httpReq, res) } +func (sc ServiceClient) RegisterDatabase(ctx context.Context, req *RegisterDBInputRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/databases/register", req) + res := new(ProvisionDatabaseResponse) + + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} + func (sc ServiceClient) DeleteSLA(ctx context.Context, uuid string) (*SLADeleteResponse, error) { httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/slas/%s", uuid), nil) if err != nil { diff --git a/client/era/era_structs.go b/client/era/era_structs.go index 03fc6124c..fa22b127c 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -1057,3 +1057,32 @@ type ProfileFilter struct { ProfileID string `json:"profile_id,omitempty"` ProfileName string `json:"profile_name,omitempty"` } + +type RegisterDBInputRequest struct { + NxClusterID *string `json:"nxClusterId,omitempty"` + DatabaseType *string `json:"databaseType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + Description *string `json:"description,omitempty"` + Category *string `json:"category,omitempty"` + VMIP *string `json:"vmIp,omitempty"` + WorkingDirectory *string `json:"workingDirectory,omitempty"` + VMUsername *string `json:"vmUsername,omitempty"` + VMPassword *string `json:"vmPassword,omitempty"` + VMSshkey *string `json:"vmSshkey,omitempty"` + VMDescription *string `json:"vmDescription,omitempty"` + ResetDescriptionInNxCluster bool `json:"resetDescriptionInNxCluster,omitempty"` + AutoTuneStagingDrive bool `json:"autoTuneStagingDrive,omitempty"` + Clustered bool `json:"clustered,omitempty"` + ForcedInstall bool `json:"forcedInstall,omitempty"` + TimeMachineInfo *Timemachineinfo `json:"timeMachineInfo,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Actionarguments []*Actionarguments `json:"actionArguments,omitempty"` + MaintenanceTasks *MaintenanceTasks `json:"maintenanceTasks,omitempty"` +} + +type UnRegisterDatabaseRequest struct { + SoftRemove bool `json:"softRemove,omitempty"` + Remove bool `json:"remove,omitempty"` + Delete bool `json:"delete,omitempty"` + DeleteTimeMachine bool `json:"deleteTimeMachine,omitempty"` +} diff --git a/examples/ndb/database_register/main.tf b/examples/ndb/database_register/main.tf new file mode 100644 index 000000000..ff2374817 --- /dev/null +++ b/examples/ndb/database_register/main.tf @@ -0,0 +1,152 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## register PostgreSQL database with registered DBServer VM +resource "nutanix_ndb_register_database" "name" { + database_type = "postgres_database" + database_name= "test-inst" + description = "added by terraform" + category = "DEFAULT" + + // registered vm IP + vm_ip = "{{ vm_ip }}" + + // optional + working_directory= "/tmp" + + reset_description_in_nx_cluster= false + + // time Machine Info + time_machine_info { + name= "test-pg-inst-regis" + description= "description of tms" + slaid=" {{ SLA ID}}" + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + postgress_info{ + + // required args + listener_port= "5432" + db_password ="pass" + db_name= "testdb1" + + // Optional with default values + db_user= "postgres" + backup_policy= "prefer_secondary" + postgres_software_home= "{{ directory where the PostgreSQL database software is installed.}}" + software_home= "{{ directory where the PostgreSQL database software is installed. }}" + + } +} + + +## register PostgreSQL database with instance not registered on VM +resource "nutanix_ndb_register_database" "name" { + database_type = "postgres_database" + database_name= "test-inst" + description = "added by terraform" + category = "DEFAULT" + nx_cluster_id = "{{ cluster_ID }}" + + // registered vm info + vm_ip = "{{ vm_ip }}" + vm_username = "{{ vm_username }}" + vm_password = "{{ vm_password }}" + + // optional + working_directory= "/tmp" + + reset_description_in_nx_cluster= false + + // time Machine Info + time_machine_info { + name= "test-pg-inst-regis" + description= "description of tms" + slaid=" {{ SLA ID}}" + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + postgress_info{ + + // required args + listener_port= "5432" + db_password ="pass" + db_name= "testdb1" + + // Optional with default values + db_user= "postgres" + backup_policy= "prefer_secondary" + postgres_software_home= "{{ directory where the PostgreSQL database software is installed }}" + } +} diff --git a/examples/ndb/database_register/terraform.tfvars b/examples/ndb/database_register/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/database_register/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/database_register/variables.tf b/examples/ndb/database_register/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/database_register/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/nutanix/provider.go b/nutanix/provider.go index 982dfb846..7214657da 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -232,6 +232,7 @@ func Provider() *schema.Provider { "nutanix_ndb_software_version_profile": resourceNutanixNDBSoftwareVersionProfile(), "nutanix_ndb_scale_database": resourceNutanixNDBScaleDatabase(), "nutanix_ndb_database_scale": resourceNutanixNDBScaleDatabase(), + "nutanix_ndb_register_database": resourceNutanixNDBRegisterDatabase(), }, ConfigureContextFunc: providerConfigure, } diff --git a/nutanix/resource_nutanix_ndb_register_database.go b/nutanix/resource_nutanix_ndb_register_database.go new file mode 100644 index 000000000..9b5557b4d --- /dev/null +++ b/nutanix/resource_nutanix_ndb_register_database.go @@ -0,0 +1,540 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBRegisterDatabase() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBRegisterDatabaseCreate, + ReadContext: resourceNutanixNDBRegisterDatabaseRead, + UpdateContext: resourceNutanixNDBRegisterDatabaseUpdate, + DeleteContext: resourceNutanixNDBRegisterDatabaseDelete, + Schema: map[string]*schema.Schema{ + "database_type": { + Type: schema.TypeString, + Required: true, + }, + "database_name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "clustered": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "forced_install": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "category": { + Type: schema.TypeString, + Optional: true, + Default: "DEFAULT", + }, + "vm_ip": { + Type: schema.TypeString, + Required: true, + }, + "vm_username": { + Type: schema.TypeString, + Optional: true, + }, + "vm_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + "vm_sshkey": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + "vm_description": { + Type: schema.TypeString, + Optional: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "reset_description_in_nx_cluster": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "auto_tune_staging_drive": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "working_directory": { + Type: schema.TypeString, + Optional: true, + Default: "/tmp", + }, + "time_machine_info": timeMachineInfoSchema(), + "tags": dataSourceEraDBInstanceTags(), + "actionarguments": actionArgumentsSchema(), + "postgress_info": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "listener_port": { + Type: schema.TypeString, + Required: true, + }, + "db_user": { + Type: schema.TypeString, + Optional: true, + }, + "switch_log": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "allow_multiple_databases": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "backup_policy": { + Type: schema.TypeString, + Optional: true, + Default: "prefer_secondary", + }, + "vm_ip": { + Type: schema.TypeString, + Optional: true, + }, + "postgres_software_home": { + Type: schema.TypeString, + Required: true, + }, + "software_home": { + Type: schema.TypeString, + Optional: true, + }, + "db_password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + "db_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + // computed values + + "name": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + + "value": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + }, + }, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine": dataSourceEraTimeMachine(), + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + }, + } +} + +func resourceNutanixNDBRegisterDatabaseCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + log.Println("Creating the request!!!") + req, err := buildReisterDBRequest(d) + if err != nil { + return diag.FromErr(err) + } + + resp, er := conn.Service.RegisterDatabase(ctx, req) + if er != nil { + return diag.FromErr(er) + } + d.SetId(resp.Entityid) + + // Get Operation ID from response of RegisterDatabaseResponse and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for db register (%s) to create: %s", resp.Entityid, errWaitTask) + } + return nil +} + +func resourceNutanixNDBRegisterDatabaseRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + databaseID := d.Id() + ctx = NewContext(ctx, dbID(databaseID)) + return readDatabaseInstance(ctx, d, meta) +} + +func resourceNutanixNDBRegisterDatabaseUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return updateDatabaseInstance(ctx, d, meta) +} + +func resourceNutanixNDBRegisterDatabaseDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + if conn == nil { + return diag.Errorf("era is nil") + } + + dbID := d.Id() + + req := era.DeleteDatabaseRequest{ + Delete: false, + Remove: true, + Softremove: false, + Forced: false, + Deletetimemachine: true, + Deletelogicalcluster: true, + } + res, err := conn.Service.DeleteDatabase(ctx, &req, dbID) + if err != nil { + return diag.FromErr(err) + } + + log.Printf("Operation to unregister instance with id %s has started, operation id: %s", dbID, res.Operationid) + opID := res.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Cluster GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for unregister db Instance (%s) to delete: %s", res.Entityid, errWaitTask) + } + return nil +} + +func buildReisterDBRequest(d *schema.ResourceData) (*era.RegisterDBInputRequest, error) { + res := &era.RegisterDBInputRequest{} + + if dbType, ok := d.GetOk("database_type"); ok && len(dbType.(string)) > 0 { + res.DatabaseType = utils.StringPtr(dbType.(string)) + } + + if dbName, ok := d.GetOk("database_name"); ok && len(dbName.(string)) > 0 { + res.DatabaseName = utils.StringPtr(dbName.(string)) + } + + if desc, ok := d.GetOk("description"); ok && len(desc.(string)) > 0 { + res.Description = utils.StringPtr(desc.(string)) + } + + if cls, ok := d.GetOk("clustered"); ok { + res.Clustered = cls.(bool) + } + + if forcedInstall, ok := d.GetOk("forced_install"); ok { + res.ForcedInstall = forcedInstall.(bool) + } + + if category, ok := d.GetOk("category"); ok && len(category.(string)) > 0 { + res.Category = utils.StringPtr(category.(string)) + } + + if vmIP, ok := d.GetOk("vm_ip"); ok && len(vmIP.(string)) > 0 { + res.VMIP = utils.StringPtr(vmIP.(string)) + } + + if vmUsername, ok := d.GetOk("vm_username"); ok && len(vmUsername.(string)) > 0 { + res.VMUsername = utils.StringPtr(vmUsername.(string)) + } + + if vmPass, ok := d.GetOk("vm_password"); ok && len(vmPass.(string)) > 0 { + res.VMPassword = utils.StringPtr(vmPass.(string)) + } + + if vmSshkey, ok := d.GetOk("vm_sshkey"); ok && len(vmSshkey.(string)) > 0 { + res.VMSshkey = utils.StringPtr(vmSshkey.(string)) + } + + if forcedInstall, ok := d.GetOk("vm_description"); ok && len(forcedInstall.(string)) > 0 { + res.ForcedInstall = forcedInstall.(bool) + } + + if nxCls, ok := d.GetOk("nx_cluster_id"); ok && len(nxCls.(string)) > 0 { + res.NxClusterID = utils.StringPtr(nxCls.(string)) + } + + if resetDesc, ok := d.GetOk("reset_description_in_nx_cluster"); ok { + res.ResetDescriptionInNxCluster = resetDesc.(bool) + } + + if autoTune, ok := d.GetOk("auto_tune_staging_drive"); ok { + res.AutoTuneStagingDrive = (autoTune.(bool)) + } + + if wrk, ok := d.GetOk("working_directory"); ok && len(wrk.(string)) > 0 { + res.WorkingDirectory = utils.StringPtr(wrk.(string)) + } + + if tms, ok := d.GetOk("time_machine_info"); ok && len(tms.(*schema.Set).List()) > 0 { + res.TimeMachineInfo = buildTimeMachineFromResourceData(tms.(*schema.Set)) + } + + if tags, ok := d.GetOk("tags"); ok && len(tags.([]interface{})) > 0 { + res.Tags = expandTags(tags.([]interface{})) + } + + if maintenance, ok := d.GetOk("maintenance_tasks"); ok && len(maintenance.([]interface{})) > 0 { + res.MaintenanceTasks = expandMaintenanceTasks(maintenance.([]interface{})) + } + + res.Actionarguments = expandRegisterDBActionArguments(d) + return res, nil +} + +func expandRegisterDBActionArguments(d *schema.ResourceData) []*era.Actionarguments { + args := []*era.Actionarguments{} + if post, ok := d.GetOk("postgress_info"); ok { + brr := post.([]interface{}) + + for _, arg := range brr { + val := arg.(map[string]interface{}) + var values interface{} + if plist, pok := val["listener_port"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "listener_port", + Value: values, + }) + } + if plist, pok := val["db_user"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "db_user", + Value: values, + }) + } + if plist, pok := val["switch_log"]; pok && plist.(bool) { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "switch_log", + Value: values, + }) + } + if plist, pok := val["allow_multiple_databases"]; pok && plist.(bool) { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "allow_multiple_databases", + Value: values, + }) + } + if plist, pok := val["backup_policy"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "backup_policy", + Value: values, + }) + } + if plist, pok := val["vm_ip"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "vmIp", + Value: values, + }) + } + if plist, pok := val["postgres_software_home"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "postgres_software_home", + Value: values, + }) + } + if plist, pok := val["software_home"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "software_home", + Value: values, + }) + } + if plist, pok := val["db_password"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "db_password", + Value: values, + }) + } + if plist, pok := val["db_name"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "db_name", + Value: values, + }) + } + } + } + + resp := buildActionArgumentsFromResourceData(d.Get("actionarguments").(*schema.Set), args) + return resp +} diff --git a/nutanix/resource_nutanix_ndb_register_database_test.go b/nutanix/resource_nutanix_ndb_register_database_test.go new file mode 100644 index 000000000..419f7b179 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_register_database_test.go @@ -0,0 +1,99 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceRegisterDB = "nutanix_ndb_database.acctest-managed" + +func TestAccEra_Registerbasic(t *testing.T) { + name := "test-pg-inst-tf" + desc := "this is desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseRegisterConfig(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceRegisterDB, "name", name), + resource.TestCheckResourceAttr(resourceRegisterDB, "description", desc), + ), + }, + }, + }) +} + +func testAccEraDatabaseRegisterConfig(name, desc string) string { + return fmt.Sprintf(` + data "nutanix_ndb_profiles" "p"{ + } + data "nutanix_ndb_slas" "slas"{} + data "nutanix_ndb_clusters" "clusters"{} + + locals { + slas = { + for p in data.nutanix_ndb_slas.slas.slas: p.name => p + } + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + + resource "nutanix_ndb_register_database" "name" { + database_type = "postgres_database" + database_name= "%[1]s" + description = "%[2]s" + vm_username = "era" + vm_password = "pass" + vm_ip = "10.51.144.226" + nx_cluster_id = local.clusters.EraCluster.id + time_machine_info { + name= "test-pg-inst-regis" + description="tms by terraform" + slaid=local.slas["DEFAULT_OOB_BRONZE_SLA"].id + schedule { + snapshottimeofday{ + hours= 13 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + postgress_info{ + listener_port= "5432" + db_user= "postgres" + // postgres_software_home= "/usr/pgsql-10.4" + // software_home= "/usr/pgsql-10.4" + db_password ="pass" + db_name= "testdb1" + } + } + `, name, desc) +} From d23a13522e6fa4179e533a1e705d86477166c23e Mon Sep 17 00:00:00 2001 From: Abhishekism9450 <32683845+Abhishekism9450@users.noreply.github.com> Date: Wed, 18 Jan 2023 17:50:48 +0530 Subject: [PATCH 10/18] Feat/m database snapshot (#516) --- client/era/era_service.go | 192 +++ client/era/era_structs.go | 340 ++++- examples/ndb/clone/main.tf | 42 + examples/ndb/clone/terraform.tfvars | 4 + examples/ndb/clone/variables.tf | 10 + examples/ndb/database_snapshot/main.tf | 32 + .../ndb/database_snapshot/terraform.tfvars | 4 + examples/ndb/database_snapshot/variables.tf | 10 + nutanix/data_source_nutanix_ndb_clone.go | 380 +++++ nutanix/data_source_nutanix_ndb_clones.go | 258 ++++ nutanix/data_source_nutanix_ndb_database.go | 6 +- nutanix/data_source_nutanix_ndb_snapshot.go | 480 +++++++ .../data_source_nutanix_ndb_snapshot_test.go | 101 ++ nutanix/data_source_nutanix_ndb_snapshots.go | 341 +++++ .../data_source_nutanix_ndb_snapshots_test.go | 67 + .../data_source_nutanix_ndb_time_machine.go | 676 +++++++++ ...rce_nutanix_ndb_time_machine_capability.go | 1217 +++++++++++++++++ ...utanix_ndb_time_machine_capability_test.go | 39 + ...ta_source_nutanix_ndb_time_machine_test.go | 69 + .../data_source_nutanix_ndb_time_machines.go | 86 ++ ...a_source_nutanix_ndb_time_machines_test.go | 35 + nutanix/provider.go | 10 + ...esource_nutanix_ndb_authorize_dbservers.go | 125 ++ nutanix/resource_nutanix_ndb_clone.go | 833 +++++++++++ ...esource_nutanix_ndb_database_scale_test.go | 44 + .../resource_nutanix_ndb_database_snapshot.go | 596 ++++++++ ...urce_nutanix_ndb_database_snapshot_test.go | 88 ++ .../resource_nutanix_ndb_log_catchups_test.go | 36 + 28 files changed, 6105 insertions(+), 16 deletions(-) create mode 100644 examples/ndb/clone/main.tf create mode 100644 examples/ndb/clone/terraform.tfvars create mode 100644 examples/ndb/clone/variables.tf create mode 100644 examples/ndb/database_snapshot/main.tf create mode 100644 examples/ndb/database_snapshot/terraform.tfvars create mode 100644 examples/ndb/database_snapshot/variables.tf create mode 100644 nutanix/data_source_nutanix_ndb_clone.go create mode 100644 nutanix/data_source_nutanix_ndb_clones.go create mode 100644 nutanix/data_source_nutanix_ndb_snapshot.go create mode 100644 nutanix/data_source_nutanix_ndb_snapshot_test.go create mode 100644 nutanix/data_source_nutanix_ndb_snapshots.go create mode 100644 nutanix/data_source_nutanix_ndb_snapshots_test.go create mode 100644 nutanix/data_source_nutanix_ndb_time_machine.go create mode 100644 nutanix/data_source_nutanix_ndb_time_machine_capability.go create mode 100644 nutanix/data_source_nutanix_ndb_time_machine_capability_test.go create mode 100644 nutanix/data_source_nutanix_ndb_time_machine_test.go create mode 100644 nutanix/data_source_nutanix_ndb_time_machines.go create mode 100644 nutanix/data_source_nutanix_ndb_time_machines_test.go create mode 100644 nutanix/resource_nutanix_ndb_authorize_dbservers.go create mode 100644 nutanix/resource_nutanix_ndb_clone.go create mode 100644 nutanix/resource_nutanix_ndb_database_scale_test.go create mode 100644 nutanix/resource_nutanix_ndb_database_snapshot.go create mode 100644 nutanix/resource_nutanix_ndb_database_snapshot_test.go create mode 100644 nutanix/resource_nutanix_ndb_log_catchups_test.go diff --git a/client/era/era_service.go b/client/era/era_service.go index a2afe1c7c..66bb1db6b 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -39,6 +39,21 @@ type Service interface { DeleteProfileVersion(ctx context.Context, profileID string, profileVersionID string) (*string, error) DatabaseScale(ctx context.Context, id string, req *DatabaseScale) (*ProvisionDatabaseResponse, error) RegisterDatabase(ctx context.Context, request *RegisterDBInputRequest) (*ProvisionDatabaseResponse, error) + GetTimeMachine(ctx context.Context, tmsID string, tmsName string) (*TimeMachine, error) + ListTimeMachines(ctx context.Context) (*ListTimeMachines, error) + DatabaseSnapshot(ctx context.Context, id string, req *DatabaseSnapshotRequest) (*ProvisionDatabaseResponse, error) + UpdateSnapshot(ctx context.Context, id string, req *UpdateSnapshotRequest) (*SnapshotResponse, error) + GetSnapshot(ctx context.Context, id string, filter *FilterParams) (*SnapshotResponse, error) + DeleteSnapshot(ctx context.Context, id string) (*ProvisionDatabaseResponse, error) + ListSnapshots(ctx context.Context, tmsID string) (*ListSnapshots, error) + CreateClone(ctx context.Context, id string, req *CloneRequest) (*ProvisionDatabaseResponse, error) + UpdateCloneDatabase(ctx context.Context, id string, req *UpdateDatabaseRequest) (*UpdateDatabaseResponse, error) + GetClone(ctx context.Context, id string, name string, filterParams *FilterParams) (*GetDatabaseResponse, error) + ListClones(ctx context.Context, filter *FilterParams) (*ListDatabaseInstance, error) + DeleteClone(ctx context.Context, id string, req *DeleteDatabaseRequest) (*ProvisionDatabaseResponse, error) + AuthorizeDBServer(ctx context.Context, id string, req []*string) (*AuthorizeDBServerResponse, error) + DeAuthorizeDBServer(ctx context.Context, id string, req []*string) (*AuthorizeDBServerResponse, error) + TimeMachineCapability(ctx context.Context, tmsID string) (*TimeMachineCapability, error) } type ServiceClient struct { @@ -362,6 +377,15 @@ func (sc ServiceClient) DatabaseRestore(ctx context.Context, databaseID string, res := new(ProvisionDatabaseResponse) return res, sc.c.Do(ctx, httpReq, res) } +func (sc ServiceClient) DatabaseSnapshot(ctx context.Context, id string, req *DatabaseSnapshotRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/snapshots", id), req) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} func (sc ServiceClient) LogCatchUp(ctx context.Context, tmsID string, req *LogCatchUpRequest) (*ProvisionDatabaseResponse, error) { httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/log-catchups", tmsID), req) @@ -436,3 +460,171 @@ func (sc ServiceClient) DeleteProfileVersion(ctx context.Context, profileID stri return res, sc.c.Do(ctx, httpReq, res) } + +func (sc ServiceClient) UpdateSnapshot(ctx context.Context, snapshotID string, req *UpdateSnapshotRequest) (*SnapshotResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/snapshots/i/%s", snapshotID), req) + if err != nil { + return nil, err + } + + res := new(SnapshotResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteSnapshot(ctx context.Context, snapshotID string) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/snapshots/%s", snapshotID), nil) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetSnapshot(ctx context.Context, snapshotID string, filter *FilterParams) (*SnapshotResponse, error) { + path := fmt.Sprintf("/snapshots/%s", snapshotID) + if filter != nil { + path = path + "?load-replicated-child-snapshots=" + filter.LoadReplicatedChildSnapshots + "&time-zone=" + filter.TimeZone + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(SnapshotResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListSnapshots(ctx context.Context, tmsID string) (*ListSnapshots, error) { + path := ("/snapshots?all=false&time-zone=UTC") + if tmsID != "" { + path = path + "&value-type=time-machine&value=" + tmsID + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(ListSnapshots) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetTimeMachine(ctx context.Context, tmsID string, tmsName string) (*TimeMachine, error) { + path := "" + + if len(tmsName) > 0 { + path = fmt.Sprintf("/tms/%s?value-type=name&detailed=false&load-database=false&load-clones=false&time-zone=UTC", tmsName) + } else { + path = fmt.Sprintf("/tms/%s?value-type=id&detailed=false&load-database=false&load-clones=false&time-zone=UTC", tmsID) + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(TimeMachine) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListTimeMachines(ctx context.Context) (*ListTimeMachines, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/tms", nil) + if err != nil { + return nil, err + } + + res := new(ListTimeMachines) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateClone(ctx context.Context, id string, req *CloneRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/clones", id), req) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetClone(ctx context.Context, id string, name string, filter *FilterParams) (*GetDatabaseResponse, error) { + path := "" + + if name != "" { + path = fmt.Sprintf("/clones/%s?value-type=name&detailed=%s&any-status=%s&load-dbserver-cluster=%s&time-zone=%s", name, filter.Detailed, filter.AnyStatus, filter.LoadDBServerCluster, filter.TimeZone) + } else { + path = fmt.Sprintf("/clones/%s?value-type=id&detailed=%s&any-status=%s&load-dbserver-cluster=%s&time-zone=%s", id, filter.Detailed, filter.AnyStatus, filter.LoadDBServerCluster, filter.TimeZone) + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(GetDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListClones(ctx context.Context, filter *FilterParams) (*ListDatabaseInstance, error) { + path := fmt.Sprintf("/clones?detailed=%s&any-status=%s&load-dbserver-cluster=%s&order-by-dbserver-cluster=%s&order-by-dbserver-logical-cluster=%s&time-zone=%s", + filter.Detailed, filter.AnyStatus, filter.LoadDBServerCluster, filter.OrderByDBServerCluster, filter.OrderByDBServerLogicalCluster, filter.TimeZone) + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(ListDatabaseInstance) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateCloneDatabase(ctx context.Context, id string, req *UpdateDatabaseRequest) (*UpdateDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/clones/%s", id), req) + res := new(UpdateDatabaseResponse) + + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteClone(ctx context.Context, cloneID string, req *DeleteDatabaseRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/clones/%s", cloneID), req) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) AuthorizeDBServer(ctx context.Context, tmsID string, req []*string) (*AuthorizeDBServerResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/tms/%s/dbservers", tmsID), req) + if err != nil { + return nil, err + } + + res := new(AuthorizeDBServerResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeAuthorizeDBServer(ctx context.Context, tmsID string, req []*string) (*AuthorizeDBServerResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/tms/%s/dbservers", tmsID), req) + if err != nil { + return nil, err + } + + res := new(AuthorizeDBServerResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) TimeMachineCapability(ctx context.Context, tmsID string) (*TimeMachineCapability, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/tms/%s/capability?time-zone=UTC&type=detailed&load-db-logs=true&load-snapshots=true", tmsID), "") + if err != nil { + return nil, err + } + + res := new(TimeMachineCapability) + + return res, sc.c.Do(ctx, httpReq, res) +} diff --git a/client/era/era_structs.go b/client/era/era_structs.go index fa22b127c..5743a16d8 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -288,13 +288,14 @@ type IPInfos struct { } type Nodes struct { - Properties []*NodesProperties `json:"properties"` - Vmname *string `json:"vmName,omitempty"` - Networkprofileid *string `json:"networkProfileId,omitempty"` - DatabaseServerID *string `json:"dbserverId,omitempty"` - NxClusterID *string `json:"nxClusterId,omitempty"` - ComputeProfileID *string `json:"computeProfileId,omitempty"` - IPInfos []*IPInfos `json:"ipInfos,omitempty"` + Properties []*NodesProperties `json:"properties"` + Vmname *string `json:"vmName,omitempty"` + Networkprofileid *string `json:"networkProfileId,omitempty"` + DatabaseServerID *string `json:"dbserverId,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + ComputeProfileID *string `json:"computeProfileId,omitempty"` + NewDBServerTimeZone *string `json:"newDbServerTimeZone,omitempty"` + IPInfos []*IPInfos `json:"ipInfos,omitempty"` } // ProvisionDatabaseResponse structs @@ -700,6 +701,7 @@ type InfoBpgConfig struct { type Info struct { Secureinfo interface{} `json:"secureInfo"` Info *InfoBpgConfig `json:"info"` + CreatedBy *string `json:"created_by,omitempty"` } type DBInstanceMetadata struct { Logcatchupforrestoredispatched bool `json:"logCatchUpForRestoreDispatched,omitempty"` @@ -763,13 +765,13 @@ type MetricMemoryInfo struct { } type MetricStorageInfo struct { - LastUpdatedTimeInUTC *string `json:"lastUpdatedTimeInUTC,omitempty"` - ControllerNumIops []*int `json:"controllerNumIops,omitempty"` - ControllerAvgIoLatencyUsecs []*int `json:"controllerAvgIoLatencyUsecs,omitempty"` - Size *int `json:"size,omitempty"` - AllocatedSize *int `json:"allocatedSize,omitempty"` - UsedSize *int `json:"usedSize,omitempty"` - Unit *string `json:"unit,omitempty"` + LastUpdatedTimeInUTC interface{} `json:"lastUpdatedTimeInUTC,omitempty"` + ControllerNumIops []*int `json:"controllerNumIops,omitempty"` + ControllerAvgIoLatencyUsecs []*int `json:"controllerAvgIoLatencyUsecs,omitempty"` + Size interface{} `json:"size,omitempty"` + AllocatedSize interface{} `json:"allocatedSize,omitempty"` + UsedSize interface{} `json:"usedSize,omitempty"` + Unit interface{} `json:"unit,omitempty"` } type Metric struct { @@ -1086,3 +1088,313 @@ type UnRegisterDatabaseRequest struct { Delete bool `json:"delete,omitempty"` DeleteTimeMachine bool `json:"deleteTimeMachine,omitempty"` } +type DatabaseSnapshotRequest struct { + Name *string `json:"name,omitempty"` + LcmConfig *LCMConfigSnapshot `json:"lcmConfig,omitempty"` + ReplicateToClusters []*string `json:"replicateToClusterIds,omitempty"` +} + +type LCMConfigSnapshot struct { + SnapshotLCMConfig *SnapshotLCMConfig `json:"snapshotLCMConfig,omitempty"` +} + +type SnapshotLCMConfig struct { + ExpiryDetails *DBExpiryDetails `json:"expiryDetails,omitempty"` +} + +type ListTimeMachines []*TimeMachine + +type CloneLCMConfig struct { + DatabaseLCMConfig *DatabaseLCMConfig `json:"databaseLCMConfig,omitempty"` +} + +type DatabaseLCMConfig struct { + ExpiryDetails *DBExpiryDetails `json:"expiryDetails,omitempty"` + RefreshDetails *DBRefreshDetails `json:"refreshDetails,omitempty"` +} + +type CloneRequest struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + SSHPublicKey *string `json:"sshPublicKey,omitempty"` + DbserverID *string `json:"dbserverId,omitempty"` + DbserverClusterID *string `json:"dbserverClusterId,omitempty"` + DbserverLogicalClusterID *string `json:"dbserverLogicalClusterId,omitempty"` + TimeMachineID *string `json:"timeMachineId,omitempty"` + SnapshotID *string `json:"snapshotId,omitempty"` + UserPitrTimestamp *string `json:"userPitrTimestamp,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + VMPassword *string `json:"vmPassword,omitempty"` + ComputeProfileID *string `json:"computeProfileId,omitempty"` + NetworkProfileID *string `json:"networkProfileId,omitempty"` + DatabaseParameterProfileID *string `json:"databaseParameterProfileId,omitempty"` + NodeCount *int `json:"nodeCount,omitempty"` + Nodes []*Nodes `json:"nodes,omitempty"` + ActionArguments []*Actionarguments `json:"actionArguments,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + LatestSnapshot bool `json:"latestSnapshot,omitempty"` + CreateDbserver bool `json:"createDbserver,omitempty"` + Clustered bool `json:"clustered,omitempty"` + LcmConfig *CloneLCMConfig `json:"lcmConfig,omitempty"` +} + +type AuthorizeDBServerResponse struct { + ErrorCode *int `json:"errorCode,omitempty"` + Info *string `json:"info,omitempty"` + Message *string `json:"message,omitempty"` + Status *string `json:"status,omitempty"` +} + +type FilterParams struct { + Detailed string `json:"detailed,omitempty"` + AnyStatus string `json:"any-status,omitempty"` + LoadDBServerCluster string `json:"load-dbserver-cluster"` + TimeZone string `json:"time-zone,omitempty"` + OrderByDBServerCluster string `json:"order-by-dbserver-cluster,omitempty"` + OrderByDBServerLogicalCluster string `json:"order-by-dbserver-logical-cluster,omitempty"` + LoadReplicatedChildSnapshots string `json:"load-replicated-child-snapshots,omitempty"` +} + +type UpdateSnapshotRequest struct { + Name *string `json:"name,omitempty"` + ResetName bool `json:"resetName,omitempty"` +} + +type ListSnapshots []SnapshotResponse + +type SnapshotResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + SnapshotID *string `json:"snapshotId,omitempty"` + SnapshotUUID *string `json:"snapshotUuid,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + ProtectionDomainID *string `json:"protectionDomainId,omitempty"` + ParentSnapshotID *string `json:"parentSnapshotId,omitempty"` + TimeMachineID *string `json:"timeMachineId,omitempty"` + DatabaseNodeID *string `json:"databaseNodeId,omitempty"` + AppInfoVersion *string `json:"appInfoVersion,omitempty"` + Status *string `json:"status,omitempty"` + Type *string `json:"type,omitempty"` + SnapshotTimeStamp *string `json:"snapshotTimeStamp,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + SoftwareSnapshotID *string `json:"softwareSnapshotId,omitempty"` + FromTimeStamp *string `json:"fromTimeStamp,omitempty"` + ToTimeStamp *string `json:"toTimeStamp,omitempty"` + ApplicableTypes []*string `json:"applicableTypes,omitempty"` + DBServerStorageMetadataVersion *int `json:"dbServerStorageMetadataVersion,omitempty"` + SnapshotTimeStampDate *int64 `json:"snapshotTimeStampDate,omitempty"` + SnapshotSize *float64 `json:"snapshotSize,omitempty"` + ParentSnapshot *bool `json:"parentSnapshot,omitempty"` + SoftwareDatabaseSnapshot bool `json:"softwareDatabaseSnapshot,omitempty"` + Processed bool `json:"processed,omitempty"` + DatabaseSnapshot bool `json:"databaseSnapshot,omitempty"` + Properties []*DBInstanceProperties `json:"properties"` + Tags []*Tags `json:"tags"` + Info *CloneInfo `json:"info,omitempty"` + Metadata *ClonedMetadata `json:"metadata,omitempty"` + Metric *Metric `json:"metric,omitempty"` + LcmConfig *LcmConfig `json:"lcmConfig,omitempty"` + SanitisedFromSnapshotID interface{} `json:"sanitisedFromSnapshotId,omitempty"` + AccessLevel interface{} `json:"accessLevel"` + DbserverID interface{} `json:"dbserverId,omitempty"` + DbserverName interface{} `json:"dbserverName,omitempty"` + DbserverIP interface{} `json:"dbserverIp,omitempty"` + ReplicatedSnapshots interface{} `json:"replicatedSnapshots,omitempty"` + SoftwareSnapshot interface{} `json:"softwareSnapshot,omitempty"` + SanitisedSnapshots interface{} `json:"sanitisedSnapshots,omitempty"` + SnapshotFamily interface{} `json:"snapshotFamily,omitempty"` +} + +type LinkedDBInfo struct { + Info *Info `json:"info,omitempty"` +} + +type CloneLinkedDBInfo struct { + ID *string `json:"id,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + Status *string `json:"status,omitempty"` + Info *LinkedDBInfo `json:"info,omitempty"` + AppConsistent bool `json:"appConsistent,omitempty"` + Clone bool `json:"clone,omitempty"` + Message interface{} `json:"message,omitempty"` +} + +type CloneInfo struct { + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + LinkedDatabases []*CloneLinkedDBInfo `json:"linkedDatabases,omitempty"` + Databases interface{} `json:"databases,omitempty"` + DatabaseGroupID interface{} `json:"databaseGroupId,omitempty"` + MissingDatabases interface{} `json:"missingDatabases,omitempty"` + ReplicationHistory interface{} `json:"replicationHistory,omitempty"` +} + +type ClonedMetadata struct { + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + DeregisterInfo interface{} `json:"deregisterInfo,omitempty"` + FromTimeStamp string `json:"fromTimeStamp,omitempty"` + ToTimeStamp string `json:"toTimeStamp,omitempty"` + ReplicationRetryCount int `json:"replicationRetryCount,omitempty"` + LastReplicationRetryTimestamp interface{} `json:"lastReplicationRetryTimestamp,omitempty"` + LastReplicationRetrySourceSnapshotID interface{} `json:"lastReplicationRetrySourceSnapshotId,omitempty"` + Async bool `json:"async,omitempty"` + Standby bool `json:"standby,omitempty"` + CurationRetryCount int `json:"curationRetryCount,omitempty"` + OperationsUsingSnapshot []interface{} `json:"operationsUsingSnapshot,omitempty"` +} + +type Capability struct { + Mode *string `json:"mode,omitempty"` + From *string `json:"from,omitempty"` + To *string `json:"to,omitempty"` + TimeUnit *string `json:"timeUnit,omitempty"` + TimeUnitNumber *string `json:"timeUnitNumber,omitempty"` + DatabaseIds []*string `json:"databaseIds,omitempty"` + Snapshots *ListSnapshots `json:"snapshots,omitempty"` + ContinuousRegion *ContinuousRegion `json:"continuousRegion,omitempty"` + DatabasesContinuousRegion interface{} `json:"databasesContinuousRegion,omitempty"` +} + +type TimeMachineCapability struct { + TimeMachineID *string `json:"timeMachineId,omitempty"` + OutputTimeZone *string `json:"outputTimeZone,omitempty"` + Type *string `json:"type,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + NxClusterAssociationType *string `json:"nxClusterAssociationType,omitempty"` + SLAID *string `json:"slaId,omitempty"` + CapabilityResetTime *string `json:"capabilityResetTime,omitempty"` + LastContinuousSnapshotTime *string `json:"lastContinuousSnapshotTime,omitempty"` + LogCatchupStartTime *string `json:"logCatchupStartTime,omitempty"` + DatabaseIds []*string `json:"databaseIds,omitempty"` + HealWithResetCapability bool `json:"healWithResetCapability,omitempty"` + Source bool `json:"source,omitempty"` + Capability []*Capability `json:"capability,omitempty"` + LogTimeInfo map[string]interface{} `json:"logTimeInfo,omitempty"` + LastDBLog *DBLogs `json:"lastDbLog,omitempty"` + LastContinuousSnapshot *LastContinuousSnapshot `json:"lastContinuousSnapshot,omitempty"` + OverallContinuousRangeEndTime interface{} `json:"overallContinuousRangeEndTime,omitempty"` +} + +type ProcessedRanges struct { + First string `json:"first,omitempty"` + Second string `json:"second,omitempty"` +} + +type DBLogsInfo struct { + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + UnknownTimeRange bool `json:"unknownTimeRange,omitempty"` +} + +type DBLogsMetadata struct { + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + DeregisterInfo *DeregisterInfo `json:"deregisterInfo,omitempty"` + CurationRetryCount int `json:"curationRetryCount,omitempty"` + CreatedDirectly bool `json:"createdDirectly,omitempty"` + UpdatedDirectly bool `json:"updatedDirectly,omitempty"` +} + +type DBLogs struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + EraLogDriveID string `json:"eraLogDriveId,omitempty"` + DatabaseNodeID string `json:"databaseNodeId,omitempty"` + FromTime string `json:"fromTime,omitempty"` + ToTime string `json:"toTime,omitempty"` + Status string `json:"status,omitempty"` + Size int `json:"size,omitempty"` + Info *DBLogsInfo `json:"info,omitempty"` + Metadata *DBLogsMetadata `json:"metadata,omitempty"` + DateCreated string `json:"dateCreated,omitempty"` + DateModified string `json:"dateModified,omitempty"` + OwnerID string `json:"ownerId,omitempty"` + DatabaseID interface{} `json:"databaseId,omitempty"` + Message interface{} `json:"message,omitempty"` + Unprocessed bool `json:"unprocessed,omitempty"` + LogCopyOperationID interface{} `json:"logCopyOperationId,omitempty"` +} + +type ContinuousRegion struct { + FromTime string `json:"fromTime,omitempty"` + ToTime string `json:"toTime,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + SnapshotIds []string `json:"snapshotIds,omitempty"` + PartialRanges bool `json:"partialRanges,omitempty"` + SubRange bool `json:"subRange,omitempty"` + Message interface{} `json:"message,omitempty"` + UnknownTimeRanges interface{} `json:"unknownTimeRanges,omitempty"` + TimeRangeAndDatabases interface{} `json:"timeRangeAndDatabases,omitempty"` + Snapshots interface{} `json:"snapshots,omitempty"` + DBLogs []*DBLogs `json:"dbLogs,omitempty"` + ProcessedRanges []*ProcessedRanges `json:"processedRanges,omitempty"` + UnprocessedRanges []*ProcessedRanges `json:"unprocessedRanges,omitempty"` +} + +type LastContinuousSnapshotMetadata struct { + FromTimeStamp string `json:"fromTimeStamp,omitempty"` + ToTimeStamp string `json:"toTimeStamp,omitempty"` + ReplicationRetryCount int `json:"replicationRetryCount,omitempty"` + CurationRetryCount int `json:"curationRetryCount,omitempty"` + Async bool `json:"async,omitempty"` + Standby bool `json:"standby,omitempty"` + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + DeregisterInfo interface{} `json:"deregisterInfo,omitempty"` + LastReplicationRetryTimestamp interface{} `json:"lastReplicationRetryTimestamp,omitempty"` + LastReplicationRetrySourceSnapshotID interface{} `json:"lastReplicationRetrySourceSnapshotId,omitempty"` + OperationsUsingSnapshot []interface{} `json:"operationsUsingSnapshot,omitempty"` +} + +type LastContinuousSnapshot struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + OwnerID string `json:"ownerId,omitempty"` + DateCreated string `json:"dateCreated,omitempty"` + DateModified string `json:"dateModified,omitempty"` + SnapshotID string `json:"snapshotId,omitempty"` + SnapshotUUID string `json:"snapshotUuid,omitempty"` + NxClusterID string `json:"nxClusterId,omitempty"` + ProtectionDomainID string `json:"protectionDomainId,omitempty"` + TimeMachineID string `json:"timeMachineId,omitempty"` + DatabaseNodeID string `json:"databaseNodeId,omitempty"` + AppInfoVersion string `json:"appInfoVersion,omitempty"` + Status string `json:"status,omitempty"` + Type string `json:"type,omitempty"` + SnapshotTimeStamp string `json:"snapshotTimeStamp,omitempty"` + SoftwareSnapshotID string `json:"softwareSnapshotId,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + FromTimeStamp string `json:"fromTimeStamp,omitempty"` + ToTimeStamp string `json:"toTimeStamp,omitempty"` + ApplicableTypes []string `json:"applicableTypes,omitempty"` + SoftwareDatabaseSnapshot bool `json:"softwareDatabaseSnapshot,omitempty"` + Processed bool `json:"processed,omitempty"` + DatabaseSnapshot bool `json:"databaseSnapshot,omitempty"` + ParentSnapshot bool `json:"parentSnapshot,omitempty"` + DBServerStorageMetadataVersion int `json:"dbServerStorageMetadataVersion,omitempty"` + SnapshotTimeStampDate int64 `json:"snapshotTimeStampDate,omitempty"` + SnapshotSize float64 `json:"snapshotSize,omitempty"` + AccessLevel interface{} `json:"accessLevel,omitempty"` + Metric interface{} `json:"metric,omitempty"` + SanitisedFromSnapshotID interface{} `json:"sanitisedFromSnapshotId,omitempty"` + DBserverID interface{} `json:"dbserverId,omitempty"` + DBserverName interface{} `json:"dbserverName,omitempty"` + DBserverIP interface{} `json:"dbserverIp,omitempty"` + ReplicatedSnapshots interface{} `json:"replicatedSnapshots,omitempty"` + SoftwareSnapshot interface{} `json:"softwareSnapshot,omitempty"` + SanitisedSnapshots interface{} `json:"sanitisedSnapshots,omitempty"` + Description interface{} `json:"description,omitempty"` + SnapshotFamily interface{} `json:"snapshotFamily,omitempty"` + ParentSnapshotID interface{} `json:"parentSnapshotId,omitempty"` + Properties []*DBInstanceProperties `json:"properties,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Info *CloneInfo `json:"info,omitempty"` + Metadata *LastContinuousSnapshotMetadata `json:"metadata,omitempty"` + LcmConfig *LcmConfig `json:"lcmConfig,omitempty"` +} diff --git a/examples/ndb/clone/main.tf b/examples/ndb/clone/main.tf new file mode 100644 index 000000000..6d6f1f8f5 --- /dev/null +++ b/examples/ndb/clone/main.tf @@ -0,0 +1,42 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + + +## resource for ndb_clone with Point in time given time machine name + +resource "nutanix_ndb_clone" "name" { + time_machine_name = "test-pg-inst" + name = "test-inst-tf-check" + nx_cluster_id = "{{ nx_Cluster_id }}" + ssh_public_key = "{{ sshkey }}" + user_pitr_timestamp= "{{ point_in_time }}" + time_zone = "Asia/Calcutta" + create_dbserver = true + compute_profile_id = "{{ compute_profile_id }}" + network_profile_id ="{{ network_profile_id }}" + database_parameter_profile_id = "{{ databse_profile_id }}" + nodes{ + vm_name= "test_vm_clone" + compute_profile_id = "{{ compute_profile_id }}" + network_profile_id ="{{ network_profile_id }}" + nx_cluster_id = "{{ nx_Cluster_id }}" + } + postgresql_info{ + vm_name="test_vm_clone" + db_password= "pass" + } +} diff --git a/examples/ndb/clone/terraform.tfvars b/examples/ndb/clone/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/clone/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/clone/variables.tf b/examples/ndb/clone/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/clone/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/examples/ndb/database_snapshot/main.tf b/examples/ndb/database_snapshot/main.tf new file mode 100644 index 000000000..a343539d8 --- /dev/null +++ b/examples/ndb/database_snapshot/main.tf @@ -0,0 +1,32 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +// resource to create snapshot with time machine id + +resource "nutanix_ndb_database_snapshot" "name" { + time_machine_id = "{{ tms_ID }}" + name = "test-snap" + remove_schedule_in_days = 1 +} + +// resource to craete snapshot with time machine name + +resource "nutanix_ndb_database_snapshot" "name" { + time_machine_name = "{{ tms_name }}" + name = "test-snap" + remove_schedule_in_days = 1 +} \ No newline at end of file diff --git a/examples/ndb/database_snapshot/terraform.tfvars b/examples/ndb/database_snapshot/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/database_snapshot/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/database_snapshot/variables.tf b/examples/ndb/database_snapshot/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/database_snapshot/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/nutanix/data_source_nutanix_ndb_clone.go b/nutanix/data_source_nutanix_ndb_clone.go new file mode 100644 index 000000000..ec4f61489 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_clone.go @@ -0,0 +1,380 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBClone() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBCloneRead, + Schema: map[string]*schema.Schema{ + "clone_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"clone_name"}, + }, + "clone_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"clone_id"}, + }, + "filters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "detailed": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "any_status": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "load_dbserver_cluster": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + }, + }, + }, + }, + + // computed + + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + "databases": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_group_state_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func dataSourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + cloneID, ok := d.GetOk("clone_id") + cloneName, cok := d.GetOk("clone_name") + + if !ok && !cok { + return diag.Errorf("atleast one of clone_id or clone_name is required") + } + + filterParams := &era.FilterParams{} + if filter, fok := d.GetOk("filters"); fok { + filterList := filter.([]interface{}) + + for _, v := range filterList { + val := v.(map[string]interface{}) + + if detailed, dok := val["detailed"]; dok { + filterParams.Detailed = detailed.(string) + } + + if anyStatus, aok := val["any_status"]; aok { + filterParams.AnyStatus = anyStatus.(string) + } + if loadDB, lok := val["load_dbserver_details"]; lok { + filterParams.LoadDBServerCluster = loadDB.(string) + } + + if timezone, tok := val["timezone"]; tok { + filterParams.TimeZone = timezone.(string) + } + } + } else { + filterParams.Detailed = "false" + filterParams.AnyStatus = "false" + filterParams.LoadDBServerCluster = "false" + filterParams.TimeZone = "UTC" + } + + resp, err := conn.Service.GetClone(ctx, cloneID.(string), cloneName.(string), filterParams) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.Ownerid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.Datecreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.Datemodified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties", flattenDBInstanceProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clustered", resp.Clustered); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("internal", resp.Internal); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("placeholder", resp.Placeholder); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_name", resp.Databasename); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_cluster_type", resp.Databaseclustertype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_status", resp.Databasestatus); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster_id", resp.Dbserverlogicalclusterid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.Timemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_time_machine_id", resp.Parenttimemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_zone", resp.Timezone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("info", flattenDBInfo(resp.Info)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("group_info", resp.GroupInfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metric", resp.Metric); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("category", resp.Category); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_source_database_id", resp.ParentSourceDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.Lcmconfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine", flattenDBTimeMachine(resp.TimeMachine)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_nodes", flattenDBNodes(resp.Databasenodes)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("linked_databases", flattenDBLinkedDbs(resp.Linkeddatabases)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("databases", resp.Databases); err != nil { + return diag.FromErr(err) + } + if err := d.Set("database_group_state_info", resp.DatabaseGroupStateInfo); err != nil { + return diag.FromErr(err) + } + + d.SetId(resp.ID) + + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_clones.go b/nutanix/data_source_nutanix_ndb_clones.go new file mode 100644 index 000000000..1696c57ee --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_clones.go @@ -0,0 +1,258 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBClones() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBClonesRead, + Schema: map[string]*schema.Schema{ + "filters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "detailed": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "any_status": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "load_dbserver_cluster": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + }, + "order_by_dbserver_cluster": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "order_by_dbserver_logical_cluster": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + }, + }, + }, + "clones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + "databases": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_group_state_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBClonesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + filterParams := &era.FilterParams{} + if filter, fok := d.GetOk("filters"); fok { + filterList := filter.([]interface{}) + + for _, v := range filterList { + val := v.(map[string]interface{}) + + if detailed, dok := val["detailed"]; dok { + filterParams.Detailed = detailed.(string) + } + + if anyStatus, aok := val["any_status"]; aok { + filterParams.AnyStatus = anyStatus.(string) + } + if loadDB, lok := val["load_dbserver_details"]; lok { + filterParams.LoadDBServerCluster = loadDB.(string) + } + + if timezone, tok := val["timezone"]; tok { + filterParams.TimeZone = timezone.(string) + } + + if orderCls, ok := val["order_by_dbserver_cluster"]; ok { + filterParams.OrderByDBServerCluster = orderCls.(string) + } + + if orderLogicalCls, ok := val["order_by_dbserver_logical_cluster"]; ok { + filterParams.OrderByDBServerLogicalCluster = orderLogicalCls.(string) + } + } + } else { + filterParams.Detailed = "false" + filterParams.AnyStatus = "false" + filterParams.LoadDBServerCluster = "false" + filterParams.TimeZone = "UTC" + filterParams.OrderByDBServerCluster = "false" + filterParams.OrderByDBServerLogicalCluster = "false" + } + + resp, err := conn.Service.ListClones(ctx, filterParams) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("clones", flattenDatabaseIntancesList(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + if er != nil { + return diag.Errorf("Error generating UUID for era clones: %+v", er) + } + d.SetId(uuid) + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_database.go b/nutanix/data_source_nutanix_ndb_database.go index 3b0a54626..6ba87668d 100644 --- a/nutanix/data_source_nutanix_ndb_database.go +++ b/nutanix/data_source_nutanix_ndb_database.go @@ -572,7 +572,7 @@ func flattenDBLcmConfig(pr *Era.LcmConfig) []map[string]interface{} { if pr != nil { lcm := map[string]interface{}{} - lcm["expiryDetails"] = flattenEraExpiryDetails(pr.ExpiryDetails) + lcm["expiry_details"] = flattenEraExpiryDetails(pr.ExpiryDetails) lcm["refresh_details"] = flattenEraRefreshDetails(pr.RefreshDetails) var preLcmComm []map[string]interface{} @@ -847,7 +847,9 @@ func flattenTimeMachineMetadata(pr *Era.TimeMachineMetadata) []map[string]interf tm["secure_info"] = pr.SecureInfo tm["info"] = pr.Info - tm["deregister_info"] = flattenDeRegiserInfo(pr.DeregisterInfo) + if pr.DeregisterInfo != nil { + tm["deregister_info"] = flattenDeRegiserInfo(pr.DeregisterInfo) + } tm["capability_reset_time"] = pr.CapabilityResetTime tm["auto_heal"] = pr.AutoHeal tm["auto_heal_snapshot_count"] = pr.AutoHealSnapshotCount diff --git a/nutanix/data_source_nutanix_ndb_snapshot.go b/nutanix/data_source_nutanix_ndb_snapshot.go new file mode 100644 index 000000000..dd45803ef --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_snapshot.go @@ -0,0 +1,480 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBSnapshot() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBSnapshotRead, + Schema: map[string]*schema.Schema{ + "snapshot_id": { + Type: schema.TypeString, + Required: true, + }, + "filters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + }, + "load_replicated_child_snapshots": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + }, + }, + }, + + // computed args + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retyr_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + } +} + +func dataSourceNutanixNDBSnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + snapID := "" + if snapshotID, ok := d.GetOk("snapshot_id"); ok { + snapID = snapshotID.(string) + } + + filterParams := &era.FilterParams{} + if filter, ok := d.GetOk("filters"); ok { + filterList := filter.([]interface{}) + + for _, v := range filterList { + val := v.(map[string]interface{}) + + if timezone, tok := val["timezone"]; tok { + filterParams.TimeZone = timezone.(string) + } + + if loadRep, lok := val["load_replicated_child_snapshots"]; lok { + filterParams.LoadReplicatedChildSnapshots = loadRep.(string) + } + } + } else { + filterParams.TimeZone = "UTC" + filterParams.LoadReplicatedChildSnapshots = "false" + } + + resp, err := conn.Service.GetSnapshot(ctx, snapID, filterParams) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.OwnerID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties", flattenDBInstanceProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_uuid", resp.SnapshotUUID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_id", resp.NxClusterID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("protection_domain_id", resp.ProtectionDomainID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_snapshot_id", resp.ParentSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.TimeMachineID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_node_id", resp.DatabaseNodeID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("app_info_version", resp.AppInfoVersion); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("applicable_types", resp.ApplicableTypes); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_timestamp", resp.SnapshotTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_snapshot_id", resp.SoftwareSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_database_snapshot", resp.SoftwareDatabaseSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_storage_metadata_version", resp.DBServerStorageMetadataVersion); err != nil { + return diag.FromErr(err) + } + + // if err := d.Set("santised", resp.Sanitized); err != nil { + // return diag.FromErr(err) + // } + + if err := d.Set("santised_from_snapshot_id", resp.SanitisedFromSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("timezone", resp.TimeZone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("processed", resp.Processed); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_snapshot", resp.DatabaseSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("from_timestamp", resp.FromTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("to_timestamp", resp.ToTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_id", resp.DbserverID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_name", resp.DbserverName); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_ip", resp.DbserverIP); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("replicated_snapshots", resp.ReplicatedSnapshots); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_snapshot", resp.SoftwareSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("santised_snapshots", resp.SanitisedSnapshots); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_family", resp.SnapshotFamily); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_timestamp_date", resp.SnapshotTimeStampDate); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_snapshot", resp.ParentSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_size", resp.SnapshotSize); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.LcmConfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenClonedMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + d.SetId(snapID) + return nil +} + +func flattenClonedMetadata(pr *era.ClonedMetadata) []interface{} { + if pr != nil { + cloneMetadata := make([]interface{}, 0) + meta := make(map[string]interface{}) + + meta["secure_info"] = pr.SecureInfo + meta["info"] = pr.Info + meta["deregister_info"] = pr.DeregisterInfo + meta["from_timestamp"] = pr.FromTimeStamp + meta["to_timestamp"] = pr.ToTimeStamp + meta["replication_retry_count"] = pr.ReplicationRetryCount + meta["last_replication_retyr_source_snapshot_id"] = pr.LastReplicationRetrySourceSnapshotID + meta["async"] = pr.Async + meta["stand_by"] = pr.Standby + meta["curation_retry_count"] = pr.CurationRetryCount + meta["operations_using_snapshot"] = pr.OperationsUsingSnapshot + + cloneMetadata = append(cloneMetadata, meta) + + return cloneMetadata + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_snapshot_test.go b/nutanix/data_source_nutanix_ndb_snapshot_test.go new file mode 100644 index 000000000..56db008b6 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_snapshot_test.go @@ -0,0 +1,101 @@ +package nutanix + +import ( + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceNDBSnapshotName = "data.nutanix_ndb_snapshot.test" + +func TestAccEraSnapshotDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "name"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "owner_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "properties.#"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "metadata.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "snapshot_uuid"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "status", "ACTIVE"), + ), + }, + }, + }) +} + +func TestAccEraSnapshotDataSource_WithFilters(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotDataSourceConfigWithFilters(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "name"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "owner_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "properties.#"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "nx_cluster_id"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "metadata.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "snapshot_uuid"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "status", "ACTIVE"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "type", "DAILY_EXTRA"), + ), + }, + }, + }) +} + +func TestAccEraSnapshotDataSource_WithWrongFilters(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotDataSourceConfigWithWrongFilters(), + ExpectError: regexp.MustCompile("An internal error has occurred"), + }, + }, + }) +} + +func testAccEraSnapshotDataSourceConfig() string { + return ` + data "nutanix_ndb_snapshots" "test1" {} + + data "nutanix_ndb_snapshot" "test" { + snapshot_id = data.nutanix_ndb_snapshots.test1.snapshots.0.id + } + ` +} + +func testAccEraSnapshotDataSourceConfigWithFilters() string { + return ` + data "nutanix_ndb_snapshots" "test1" {} + + data "nutanix_ndb_snapshot" "test" { + snapshot_id = data.nutanix_ndb_snapshots.test1.snapshots.0.id + filters{ + timezone= "UTC" + } + } + ` +} + +func testAccEraSnapshotDataSourceConfigWithWrongFilters() string { + return ` + data "nutanix_ndb_snapshots" "test1" {} + + data "nutanix_ndb_snapshot" "test" { + snapshot_id = data.nutanix_ndb_snapshots.test1.snapshots.0.id + filters{ + timezone= "IST" + } + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_snapshots.go b/nutanix/data_source_nutanix_ndb_snapshots.go new file mode 100644 index 000000000..888c85e09 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_snapshots.go @@ -0,0 +1,341 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBSnapshots() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBSnapshotsRead, + Schema: map[string]*schema.Schema{ + "filters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retyr_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBSnapshotsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tmsID := "" + if filter, ok := d.GetOk("filters"); ok { + filterList := filter.([]interface{}) + + for _, v := range filterList { + val := v.(map[string]interface{}) + + if tms, ok := val["time_machine_id"]; ok { + tmsID = tms.(string) + } + } + } + + resp, err := conn.Service.ListSnapshots(ctx, tmsID) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("snapshots", flattenSnapshotsList(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + if er != nil { + return diag.Errorf("Error generating UUID for era snapshots: %+v", er) + } + d.SetId(uuid) + return nil +} + +func flattenSnapshotsList(sn *era.ListSnapshots) []map[string]interface{} { + if sn != nil { + snpList := []map[string]interface{}{} + for _, val := range *sn { + snap := map[string]interface{}{} + + snap["id"] = val.ID + snap["name"] = val.Name + snap["description"] = val.Description + snap["owner_id"] = val.OwnerID + snap["date_created"] = val.DateCreated + snap["date_modified"] = val.DateModified + snap["properties"] = flattenDBInstanceProperties(val.Properties) + snap["tags"] = flattenDBTags(val.Tags) + snap["snapshot_uuid"] = val.SnapshotUUID + snap["nx_cluster_id"] = val.NxClusterID + snap["protection_domain_id"] = val.ProtectionDomainID + snap["parent_snapshot_id"] = val.ParentSnapshotID + snap["time_machine_id"] = val.TimeMachineID + snap["database_node_id"] = val.DatabaseNodeID + snap["app_info_version"] = val.AppInfoVersion + snap["status"] = val.Status + snap["type"] = val.Type + snap["applicable_types"] = val.ApplicableTypes + snap["snapshot_timestamp"] = val.SnapshotTimeStamp + snap["metadata"] = flattenClonedMetadata(val.Metadata) + snap["software_snapshot_id"] = val.SoftwareSnapshotID + snap["software_database_snapshot"] = val.SoftwareDatabaseSnapshot + snap["dbserver_storage_metadata_version"] = val.DBServerStorageMetadataVersion + // snap["santised"] = val.Sanitized + snap["santised_from_snapshot_id"] = val.SanitisedFromSnapshotID + snap["timezone"] = val.TimeZone + snap["processed"] = val.Processed + snap["database_snapshot"] = val.DatabaseSnapshot + snap["from_timestamp"] = val.FromTimeStamp + snap["to_timestamp"] = val.ToTimeStamp + snap["dbserver_id"] = val.DbserverID + snap["dbserver_name"] = val.DbserverName + snap["dbserver_ip"] = val.DbserverIP + snap["replicated_snapshots"] = val.ReplicatedSnapshots + snap["software_snapshot"] = val.SoftwareSnapshot + snap["santised_snapshots"] = val.SanitisedSnapshots + snap["snapshot_family"] = val.SnapshotFamily + snap["snapshot_timestamp_date"] = val.SnapshotTimeStampDate + snap["lcm_config"] = flattenDBLcmConfig(val.LcmConfig) + snap["parent_snapshot"] = val.ParentSnapshot + snap["snapshot_size"] = val.SnapshotSize + + snpList = append(snpList, snap) + } + return snpList + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_snapshots_test.go b/nutanix/data_source_nutanix_ndb_snapshots_test.go new file mode 100644 index 000000000..2bfc67a34 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_snapshots_test.go @@ -0,0 +1,67 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceNDBSnapshotsName = "data.nutanix_ndb_snapshots.test" + +func TestAccEraSnapshotsDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotsDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.name"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.owner_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.properties.#"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.metadata.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.snapshot_uuid"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.status", "ACTIVE"), + ), + }, + }, + }) +} + +func TestAccEraSnapshotsDataSource_WithFilters(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotsDataSourceConfigWithFilters(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.name"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.owner_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.properties.#"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.metadata.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.snapshot_uuid"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.status", "ACTIVE"), + ), + }, + }, + }) +} + +func testAccEraSnapshotsDataSourceConfig() string { + return ` + data "nutanix_ndb_snapshots" "test" {} + ` +} + +func testAccEraSnapshotsDataSourceConfigWithFilters() string { + return ` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_snapshots" "test" { + filters{ + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_time_machine.go b/nutanix/data_source_nutanix_ndb_time_machine.go new file mode 100644 index 000000000..789aac348 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machine.go @@ -0,0 +1,676 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceNutanixNDBTimeMachine() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBTimeMachineRead, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_name"}, + }, + "time_machine_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "database_id": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "ea_status": { + Type: schema.TypeString, + Computed: true, + }, + "scope": { + Type: schema.TypeString, + Computed: true, + }, + "sla_id": { + Type: schema.TypeString, + Computed: true, + }, + "schedule_id": { + Type: schema.TypeString, + Computed: true, + }, + "database": { + Type: schema.TypeString, + Computed: true, + }, + "clones": { + Type: schema.TypeString, + Computed: true, + }, + "source_nx_clusters": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "sla_update_in_progress": { + Type: schema.TypeBool, + Computed: true, + }, + "metric": { + Type: schema.TypeString, + Computed: true, + }, + "sla_update_metadata": { + Type: schema.TypeString, + Computed: true, + }, + "sla": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "system_sla": { + Type: schema.TypeBool, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + + "continuous_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "daily_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "weekly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "monthly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "quarterly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "yearly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "pitr_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "current_active_frequency": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "system_policy": { + Type: schema.TypeBool, + Computed: true, + }, + "global_policy": { + Type: schema.TypeBool, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_time_of_day": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Computed: true, + }, + "minutes": { + Type: schema.TypeInt, + Computed: true, + }, + "seconds": { + Type: schema.TypeInt, + Computed: true, + }, + "extra": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "continuous_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_backup_interval": { + Type: schema.TypeInt, + Computed: true, + }, + "snapshots_per_day": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "weekly_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day_of_week": { + Type: schema.TypeString, + Computed: true, + }, + "day_of_week_value": { + Type: schema.TypeString, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "monthly_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "yearly_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "month": { + Type: schema.TypeString, + Computed: true, + }, + "month_value": { + Type: schema.TypeString, + Computed: true, + }, + "day_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "quartely_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_month": { + Type: schema.TypeString, + Computed: true, + }, + "start_month_value": { + Type: schema.TypeString, + Computed: true, + }, + "day_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "daily_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "start_time": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "capability_reset_time": { + Type: schema.TypeString, + Computed: true, + }, + "auto_heal": { + Type: schema.TypeBool, + Computed: true, + }, + "auto_heal_snapshot_count": { + Type: schema.TypeInt, + Computed: true, + }, + "auto_heal_log_catchup_count": { + Type: schema.TypeInt, + Computed: true, + }, + "first_snapshot_captured": { + Type: schema.TypeBool, + Computed: true, + }, + "first_snapshot_dispatched": { + Type: schema.TypeBool, + Computed: true, + }, + "last_snapshot_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_auto_snapshot_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_snapshot_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "last_auto_snapshot_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "last_successful_snapshot_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_successive_failure_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_heal_snapshot_operation": { + Type: schema.TypeString, + Computed: true, + }, + "last_log_catchup_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_successful_log_catchup_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "last_log_catchup_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "log_catchup_successive_failure_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_pause_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_pause_by_force": { + Type: schema.TypeBool, + Computed: true, + }, + "last_resume_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_pause_reason": { + Type: schema.TypeString, + Computed: true, + }, + "state_before_restore": { + Type: schema.TypeString, + Computed: true, + }, + "last_health_alerted_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_ea_breakdown_time": { + Type: schema.TypeString, + Computed: true, + }, + "authorized_dbservers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "last_heal_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_heal_system_triggered": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBTimeMachineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + // call time Machine API + + resp, err := conn.Service.GetTimeMachine(ctx, tmsID.(string), tmsName.(string)) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.OwnerID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("access_level", resp.AccessLevel); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties", flattenDBInstanceProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clustered", resp.Clustered); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("internal", resp.Internal); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_id", resp.DatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("category", resp.Category); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("ea_status", resp.EaStatus); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clustered", resp.Clustered); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("scope", resp.Scope); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("sla_id", resp.SLAID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("schedule_id", resp.ScheduleID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metric", resp.Metric); err != nil { + return diag.FromErr(err) + } + if err := d.Set("database", resp.Database); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clones", resp.Clones); err != nil { + return diag.FromErr(err) + } + if err := d.Set("source_nx_clusters", resp.SourceNxClusters); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("sla_update_in_progress", resp.SLAUpdateInProgress); err != nil { + return diag.FromErr(err) + } + if err := d.Set("sla", flattenDBSLA(resp.SLA)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("schedule", flattenSchedule(resp.Schedule)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenTimeMachineMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + d.SetId(*resp.ID) + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_time_machine_capability.go b/nutanix/data_source_nutanix_ndb_time_machine_capability.go new file mode 100644 index 000000000..43d3bc489 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machine_capability.go @@ -0,0 +1,1217 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func dataSourceNutanixNDBTmsCapability() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBTmsCapabilityRead, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Required: true, + }, + "output_time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "source": { + Type: schema.TypeBool, + Computed: true, + }, + "nx_cluster_association_type": { + Type: schema.TypeString, + Computed: true, + }, + "sla_id": { + Type: schema.TypeString, + Computed: true, + }, + "overall_continuous_range_end_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_continuous_snapshot_time": { + Type: schema.TypeString, + Computed: true, + }, + "log_catchup_start_time": { + Type: schema.TypeString, + Computed: true, + }, + "heal_with_reset_capability": { + Type: schema.TypeBool, + Computed: true, + }, + "database_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + // check data schema later + "log_time_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "capability": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Computed: true, + }, + "from": { + Type: schema.TypeString, + Computed: true, + }, + "to": { + Type: schema.TypeString, + Computed: true, + }, + "time_unit": { + Type: schema.TypeString, + Computed: true, + }, + "time_unit_number": { + Type: schema.TypeString, + Computed: true, + }, + "database_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retyr_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "continuous_region": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_time": { + Type: schema.TypeString, + Computed: true, + }, + "to_time": { + Type: schema.TypeString, + Computed: true, + }, + "sub_range": { + Type: schema.TypeBool, + Computed: true, + }, + "message": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "unknown_time_ranges": { + Type: schema.TypeString, + Computed: true, + }, + "processed_ranges": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "first": { + Type: schema.TypeString, + Computed: true, + }, + "second": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "unprocessed_ranges": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "first": { + Type: schema.TypeString, + Computed: true, + }, + "second": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "partial_ranges": { + Type: schema.TypeBool, + Computed: true, + }, + "time_range_and_databases": { + Type: schema.TypeString, + Computed: true, + }, + "snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "db_logs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "era_log_drive_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "from_time": { + Type: schema.TypeString, + Computed: true, + }, + "to_time": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + "info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "unknown_time_range": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "deregister_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + }, + "operations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "created_directly": { + Type: schema.TypeBool, + Computed: true, + }, + "updated_directly": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_id": { + Type: schema.TypeString, + Computed: true, + }, + "message": { + Type: schema.TypeString, + Computed: true, + }, + "unprocessed": { + Type: schema.TypeBool, + Computed: true, + }, + "log_copy_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "databases_continuous_region": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "capability_reset_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_db_log": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "era_log_drive_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "from_time": { + Type: schema.TypeString, + Computed: true, + }, + "to_time": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "deregister_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + }, + "operations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "created_directly": { + Type: schema.TypeBool, + Computed: true, + }, + "updated_directly": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_id": { + Type: schema.TypeString, + Computed: true, + }, + "message": { + Type: schema.TypeString, + Computed: true, + }, + "unprocessed": { + Type: schema.TypeBool, + Computed: true, + }, + "log_copy_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "last_continuous_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retry_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_replication_retry_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBTmsCapabilityRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tmsID := d.Get("time_machine_id") + resp, er := conn.Service.TimeMachineCapability(ctx, tmsID.(string)) + if er != nil { + return diag.FromErr(er) + } + + if err := d.Set("output_time_zone", resp.OutputTimeZone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_id", resp.NxClusterID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("source", resp.Source); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_association_type", resp.NxClusterAssociationType); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("sla_id", resp.SLAID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("overall_continuous_range_end_time", resp.OverallContinuousRangeEndTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("last_continuous_snapshot_time", resp.LastContinuousSnapshotTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("log_catchup_start_time", resp.LogCatchupStartTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("heal_with_reset_capability", resp.HealWithResetCapability); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_ids", utils.StringValueSlice(resp.DatabaseIds)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("capability", flattenTmsCapability(resp.Capability)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("capability_reset_time", resp.CapabilityResetTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("last_db_log", flattenLastDBLog(resp.LastDBLog)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("last_continuous_snapshot", flattenLastContinousSnapshot(resp.LastContinuousSnapshot)); err != nil { + return diag.FromErr(err) + } + uuid, e := uuid.GenerateUUID() + + if e != nil { + return diag.Errorf("Error generating UUID for era tms capability: %+v", e) + } + d.SetId(uuid) + return nil +} + +func flattenTmsCapability(pr []*era.Capability) []map[string]interface{} { + if len(pr) > 0 { + tmsList := []map[string]interface{}{} + + for _, v := range pr { + cap := map[string]interface{}{} + + cap["continuous_region"] = flattenContinousRegion(v.ContinuousRegion) + cap["database_ids"] = utils.StringValueSlice(v.DatabaseIds) + cap["databases_continuous_region"] = v.DatabasesContinuousRegion + cap["from"] = v.From + cap["mode"] = v.Mode + cap["snapshots"] = flattenSnapshotsList(v.Snapshots) + cap["time_unit"] = v.TimeUnit + cap["time_unit_number"] = v.TimeUnitNumber + cap["to"] = v.To + + tmsList = append(tmsList, cap) + } + return tmsList + } + return nil +} + +func flattenContinousRegion(pr *era.ContinuousRegion) []map[string]interface{} { + if pr != nil { + continousRegion := make([]map[string]interface{}, 0) + conReg := map[string]interface{}{} + + conReg["from_time"] = pr.FromTime + conReg["to_time"] = pr.ToTime + conReg["sub_range"] = pr.SubRange + conReg["message"] = pr.Message + conReg["snapshot_ids"] = utils.StringSlice(pr.SnapshotIds) + conReg["unknown_time_ranges"] = pr.UnknownTimeRanges + conReg["processed_ranges"] = flattenProcessedRanges(pr.ProcessedRanges) + conReg["unprocessed_ranges"] = flattenProcessedRanges(pr.UnprocessedRanges) + conReg["partial_ranges"] = pr.PartialRanges + conReg["time_range_and_databases"] = pr.TimeRangeAndDatabases + conReg["snapshots"] = pr.Snapshots + conReg["db_logs"] = flattenDBLogs(pr.DBLogs) + conReg["timezone"] = pr.TimeZone + + continousRegion = append(continousRegion, conReg) + return continousRegion + } + return nil +} + +func flattenDBLogs(pr []*era.DBLogs) []map[string]interface{} { + if len(pr) > 0 { + res := make([]map[string]interface{}, len(pr)) + + for _, v := range pr { + val := map[string]interface{}{} + + val["id"] = v.ID + val["name"] = v.Name + val["era_log_drive_id"] = v.EraLogDriveID + val["database_node_id"] = v.DatabaseNodeID + val["from_time"] = v.FromTime + val["to_time"] = v.ToTime + val["status"] = v.Status + val["size"] = v.Size + val["metadata"] = flattenDBLogMetadata(v.Metadata) + val["date_created"] = v.DateCreated + val["date_modified"] = v.DateModified + val["owner_id"] = v.OwnerID + val["database_id"] = v.DatabaseID + val["message"] = v.Message + val["unprocessed"] = v.Unprocessed + val["log_copy_operation_id"] = v.LogCopyOperationID + + res = append(res, val) + } + return res + } + return nil +} + +func flattenDBLogMetadata(pr *era.DBLogsMetadata) []map[string]interface{} { + if pr != nil { + logsMeta := make([]map[string]interface{}, 0) + log := map[string]interface{}{} + + log["secure_info"] = pr.SecureInfo + log["info"] = pr.Info + log["deregister_info"] = flattenDeRegiserInfo(pr.DeregisterInfo) + log["curation_retry_count"] = pr.CurationRetryCount + log["created_directly"] = pr.CreatedDirectly + log["updated_directly"] = pr.UpdatedDirectly + + logsMeta = append(logsMeta, log) + return logsMeta + } + return nil +} + +func flattenLastDBLog(pr *era.DBLogs) []map[string]interface{} { + if pr != nil { + res := make([]map[string]interface{}, 0) + val := map[string]interface{}{} + + val["id"] = pr.ID + val["name"] = pr.Name + val["era_log_drive_id"] = pr.EraLogDriveID + val["database_node_id"] = pr.DatabaseNodeID + val["from_time"] = pr.FromTime + val["to_time"] = pr.ToTime + val["status"] = pr.Status + val["size"] = pr.Size + val["metadata"] = flattenDBLogMetadata(pr.Metadata) + val["date_created"] = pr.DateCreated + val["date_modified"] = pr.DateModified + val["owner_id"] = pr.OwnerID + val["database_id"] = pr.DatabaseID + val["message"] = pr.Message + val["unprocessed"] = pr.Unprocessed + val["log_copy_operation_id"] = pr.LogCopyOperationID + + res = append(res, val) + return res + } + return nil +} + +func flattenLastContinousSnapshot(pr *era.LastContinuousSnapshot) []map[string]interface{} { + if pr != nil { + snpList := make([]map[string]interface{}, 0) + snap := map[string]interface{}{} + + snap["id"] = pr.ID + snap["name"] = pr.Name + snap["description"] = pr.Description + snap["owner_id"] = pr.OwnerID + snap["date_created"] = pr.DateCreated + snap["date_modified"] = pr.DateModified + snap["properties"] = flattenDBInstanceProperties(pr.Properties) + snap["tags"] = flattenDBTags(pr.Tags) + snap["snapshot_uuid"] = pr.SnapshotUUID + snap["nx_cluster_id"] = pr.NxClusterID + snap["protection_domain_id"] = pr.ProtectionDomainID + snap["parent_snapshot_id"] = pr.ParentSnapshotID + snap["time_machine_id"] = pr.TimeMachineID + snap["database_node_id"] = pr.DatabaseNodeID + snap["app_info_version"] = pr.AppInfoVersion + snap["status"] = pr.Status + snap["type"] = pr.Type + snap["applicable_types"] = pr.ApplicableTypes + snap["snapshot_timestamp"] = pr.SnapshotTimeStamp + snap["metadata"] = flattenLastContinousSnapshotMetadata(pr.Metadata) + snap["software_snapshot_id"] = pr.SoftwareSnapshotID + snap["software_database_snapshot"] = pr.SoftwareDatabaseSnapshot + snap["santised_from_snapshot_id"] = pr.SanitisedFromSnapshotID + snap["processed"] = pr.Processed + snap["database_snapshot"] = pr.DatabaseSnapshot + snap["from_timestamp"] = pr.FromTimeStamp + snap["to_timestamp"] = pr.ToTimeStamp + snap["dbserver_id"] = pr.DBserverID + snap["dbserver_name"] = pr.DBserverName + snap["dbserver_ip"] = pr.DBserverIP + snap["replicated_snapshots"] = pr.ReplicatedSnapshots + snap["software_snapshot"] = pr.SoftwareSnapshot + snap["santised_snapshots"] = pr.SanitisedSnapshots + snap["snapshot_family"] = pr.SnapshotFamily + snap["snapshot_timestamp_date"] = pr.SnapshotTimeStampDate + snap["lcm_config"] = flattenDBLcmConfig(pr.LcmConfig) + snap["parent_snapshot"] = pr.ParentSnapshot + snap["snapshot_size"] = pr.SnapshotSize + + snpList = append(snpList, snap) + return snpList + } + return nil +} + +func flattenLastContinousSnapshotMetadata(pr *era.LastContinuousSnapshotMetadata) []map[string]interface{} { + if pr != nil { + res := make([]map[string]interface{}, 0) + + meta := map[string]interface{}{} + + meta["secure_info"] = pr.SecureInfo + meta["info"] = pr.Info + meta["deregister_info"] = pr.DeregisterInfo + meta["from_timestamp"] = pr.FromTimeStamp + meta["to_timestamp"] = pr.ToTimeStamp + meta["replication_retry_count"] = pr.ReplicationRetryCount + meta["last_replication_retry_timestamp"] = pr.LastReplicationRetryTimestamp + meta["last_replication_retry_source_snapshot_id"] = pr.LastReplicationRetrySourceSnapshotID + meta["async"] = pr.Async + meta["stand_by"] = pr.Standby + meta["curation_retry_count"] = pr.CurationRetryCount + meta["operations_using_snapshot"] = pr.OperationsUsingSnapshot + + res = append(res, meta) + return res + } + return nil +} + +func flattenProcessedRanges(pr []*era.ProcessedRanges) []interface{} { + if len(pr) > 0 { + res := make([]interface{}, len(pr)) + + for _, v := range pr { + proRanges := map[string]interface{}{} + + proRanges["first"] = v.First + proRanges["second"] = v.Second + + res = append(res, proRanges) + } + return res + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_time_machine_capability_test.go b/nutanix/data_source_nutanix_ndb_time_machine_capability_test.go new file mode 100644 index 000000000..b0bab83ac --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machine_capability_test.go @@ -0,0 +1,39 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceNDBTmsCapability = "data.nutanix_ndb_tms_capability.test" + +func TestAccEraTmsCapabilityDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTmsCapabilityDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "output_time_zone"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "type"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "nx_cluster_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "sla_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "capability.#"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "capability.0.mode"), + ), + }, + }, + }) +} + +func testAccEraTmsCapabilityDataSourceConfig() string { + return ` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_tms_capability" "test"{ + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_time_machine_test.go b/nutanix/data_source_nutanix_ndb_time_machine_test.go new file mode 100644 index 000000000..60242bc29 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machine_test.go @@ -0,0 +1,69 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceTMName = "data.nutanix_ndb_time_machine.test" + +func TestAccEraTimeMachineDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTimeMachineDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceTMName, "name"), + resource.TestCheckResourceAttrSet(dataSourceTMName, "description"), + resource.TestCheckResourceAttr(dataSourceTMName, "metadata.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMName, "clone", "false"), + resource.TestCheckResourceAttr(dataSourceTMName, "sla.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMName, "schedule.#", "1"), + ), + }, + }, + }) +} + +func TestAccEraTimeMachineDataSource_basicWithID(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTimeMachineDataSourceConfigWithID(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceTMName, "name"), + resource.TestCheckResourceAttrSet(dataSourceTMName, "description"), + resource.TestCheckResourceAttr(dataSourceTMName, "metadata.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMName, "clone", "false"), + resource.TestCheckResourceAttr(dataSourceTMName, "sla.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMName, "schedule.#", "1"), + ), + }, + }, + }) +} + +func testAccEraTimeMachineDataSourceConfig() string { + return ` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_time_machine" "test"{ + time_machine_name = data.nutanix_ndb_time_machines.test1.time_machines.0.name + } + ` +} + +func testAccEraTimeMachineDataSourceConfigWithID() string { + return ` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_time_machine" "test"{ + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_time_machines.go b/nutanix/data_source_nutanix_ndb_time_machines.go new file mode 100644 index 000000000..3365492bc --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machines.go @@ -0,0 +1,86 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBTimeMachines() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBTimeMachinesRead, + Schema: map[string]*schema.Schema{ + "time_machines": dataSourceEraTimeMachine(), + }, + } +} + +func dataSourceNutanixNDBTimeMachinesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + // call tms API + resp, err := conn.Service.ListTimeMachines(ctx) + + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("time_machines", flattenTimeMachines(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era time machines: %+v", err) + } + d.SetId(uuid) + return nil +} + +func flattenTimeMachines(tms *era.ListTimeMachines) []map[string]interface{} { + if tms != nil { + lst := []map[string]interface{}{} + + for _, pr := range *tms { + tmac := map[string]interface{}{} + + tmac["id"] = pr.ID + tmac["name"] = pr.Name + tmac["description"] = pr.Description + tmac["owner_id"] = pr.OwnerID + tmac["date_created"] = pr.DateCreated + tmac["date_modified"] = pr.DateModified + tmac["access_level"] = pr.AccessLevel + tmac["properties"] = flattenDBInstanceProperties(pr.Properties) + tmac["tags"] = flattenDBTags(pr.Tags) + tmac["clustered"] = pr.Clustered + tmac["clone"] = pr.Clone + tmac["internal"] = pr.Internal + tmac["database_id"] = pr.DatabaseID + tmac["type"] = pr.Type + tmac["category"] = pr.Category + tmac["status"] = pr.Status + tmac["ea_status"] = pr.EaStatus + tmac["scope"] = pr.Scope + tmac["sla_id"] = pr.SLAID + tmac["schedule_id"] = pr.ScheduleID + tmac["metric"] = pr.Metric + // tmac["sla_update_metadata"] = pr.SLAUpdateMetadata + tmac["database"] = pr.Database + tmac["clones"] = pr.Clones + tmac["source_nx_clusters"] = pr.SourceNxClusters + tmac["sla_update_in_progress"] = pr.SLAUpdateInProgress + tmac["sla"] = flattenDBSLA(pr.SLA) + tmac["schedule"] = flattenSchedule(pr.Schedule) + tmac["metadata"] = flattenTimeMachineMetadata(pr.Metadata) + + lst = append(lst, tmac) + } + return lst + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_time_machines_test.go b/nutanix/data_source_nutanix_ndb_time_machines_test.go new file mode 100644 index 000000000..f3f91e74c --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machines_test.go @@ -0,0 +1,35 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceTMsName = "data.nutanix_ndb_time_machines.test" + +func TestAccEraTimeMachinesDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTimeMachinesDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceTMsName, "time_machines.0.name"), + resource.TestCheckResourceAttrSet(dataSourceTMsName, "time_machines.0.description"), + resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.metadata.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.clone", "false"), + resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.sla.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.schedule.#", "1"), + ), + }, + }, + }) +} + +func testAccEraTimeMachinesDataSourceConfig() string { + return ` + data "nutanix_ndb_time_machines" "test" {} + ` +} diff --git a/nutanix/provider.go b/nutanix/provider.go index 7214657da..ad894a93a 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -196,6 +196,13 @@ func Provider() *schema.Provider { "nutanix_ndb_clusters": dataSourceNutanixEraClusters(), "nutanix_ndb_database": dataSourceNutanixEraDatabase(), "nutanix_ndb_databases": dataSourceNutanixEraDatabases(), + "nutanix_ndb_time_machine": dataSourceNutanixNDBTimeMachine(), + "nutanix_ndb_time_machines": dataSourceNutanixNDBTimeMachines(), + "nutanix_ndb_clone": dataSourceNutanixNDBClone(), + "nutanix_ndb_clones": dataSourceNutanixNDBClones(), + "nutanix_ndb_snapshot": dataSourceNutanixNDBSnapshot(), + "nutanix_ndb_snapshots": dataSourceNutanixNDBSnapshots(), + "nutanix_ndb_tms_capability": dataSourceNutanixNDBTmsCapability(), }, ResourcesMap: map[string]*schema.Resource{ "nutanix_virtual_machine": resourceNutanixVirtualMachine(), @@ -233,6 +240,9 @@ func Provider() *schema.Provider { "nutanix_ndb_scale_database": resourceNutanixNDBScaleDatabase(), "nutanix_ndb_database_scale": resourceNutanixNDBScaleDatabase(), "nutanix_ndb_register_database": resourceNutanixNDBRegisterDatabase(), + "nutanix_ndb_database_snapshot": resourceNutanixNDBDatabaseSnapshot(), + "nutanix_ndb_clone": resourceNutanixNDBClone(), + "nutanix_ndb_authorize_dbserver": resourceNutanixNDBAuthorizeDBServer(), }, ConfigureContextFunc: providerConfigure, } diff --git a/nutanix/resource_nutanix_ndb_authorize_dbservers.go b/nutanix/resource_nutanix_ndb_authorize_dbservers.go new file mode 100644 index 000000000..87736d6f4 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_authorize_dbservers.go @@ -0,0 +1,125 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBAuthorizeDBServer() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBAuthorizeDBServerCreate, + ReadContext: resourceNutanixNDBAuthorizeDBServerRead, + UpdateContext: resourceNutanixNDBAuthorizeDBServerUpdate, + DeleteContext: resourceNutanixNDBAuthorizeDBServerDelete, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_name"}, + }, + "time_machine_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + }, + "dbservers_id": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceNutanixNDBAuthorizeDBServerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + req := make([]*string, 0) + + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + if len(tmsName.(string)) > 0 { + // call time machine API with value-type name + res, er := conn.Service.GetTimeMachine(ctx, "", tmsName.(string)) + if er != nil { + return diag.FromErr(er) + } + + tmsID = *res.ID + } + + if dbserversID, ok := d.GetOk("dbservers_id"); ok { + dbser := dbserversID.([]interface{}) + + for _, v := range dbser { + req = append(req, utils.StringPtr(v.(string))) + } + } + // call for Authorize API + + resp, err := conn.Service.AuthorizeDBServer(ctx, tmsID.(string), req) + if err != nil { + return diag.FromErr(err) + } + + if resp.Status == utils.StringPtr("success") { + d.SetId(tmsID.(string)) + } + + return nil +} + +func resourceNutanixNDBAuthorizeDBServerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func resourceNutanixNDBAuthorizeDBServerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func resourceNutanixNDBAuthorizeDBServerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + if len(tmsName.(string)) > 0 { + // call time machine API with value-type name + res, er := conn.Service.GetTimeMachine(ctx, "", tmsName.(string)) + if er != nil { + return diag.FromErr(er) + } + + tmsID = *res.ID + } + + deauthorizeDBs := make([]*string, 0) + + if dbserversID, ok := d.GetOk("dbservers_id"); ok { + dbser := dbserversID.([]interface{}) + + for _, v := range dbser { + deauthorizeDBs = append(deauthorizeDBs, utils.StringPtr(v.(string))) + } + } + + _, err := conn.Service.DeAuthorizeDBServer(ctx, tmsID.(string), deauthorizeDBs) + if err != nil { + return diag.FromErr(err) + } + d.SetId("") + return nil +} diff --git a/nutanix/resource_nutanix_ndb_clone.go b/nutanix/resource_nutanix_ndb_clone.go new file mode 100644 index 000000000..31ad0ff43 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_clone.go @@ -0,0 +1,833 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBClone() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBCloneCreate, + ReadContext: resourceNutanixNDBCloneRead, + UpdateContext: resourceNutanixNDBCloneUpdate, + DeleteContext: resourceNutanixNDBCloneDelete, + + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_name"}, + }, + "time_machine_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"user_pitr_timestamp"}, + }, + "user_pitr_timestamp": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"snapshot_id"}, + }, + "time_zone": { + Type: schema.TypeString, + Optional: true, + }, + "node_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "nodes": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vm_name": { + Type: schema.TypeString, + Optional: true, + }, + "compute_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "network_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "new_db_server_time_zone": { + Type: schema.TypeString, + Optional: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "dbserver_id": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "lcm_config": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database_lcm_config": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expiry_details": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expire_in_days": { + Type: schema.TypeInt, + Optional: true, + }, + "expiry_date_timezone": { + Type: schema.TypeString, + Required: true, + }, + "delete_database": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "refresh_details": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "refresh_in_days": { + Type: schema.TypeInt, + Optional: true, + }, + "refresh_time": { + Type: schema.TypeString, + Optional: true, + }, + "refresh_date_timezone": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Required: true, + }, + "ssh_public_key": { + Type: schema.TypeString, + Optional: true, + }, + "compute_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "network_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "database_parameter_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "vm_password": { + Type: schema.TypeString, + Optional: true, + }, + "create_dbserver": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "clustered": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "dbserver_id": { + Type: schema.TypeString, + Optional: true, + }, + "dbserver_cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "latest_snapshot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "postgresql_info": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vm_name": { + Type: schema.TypeString, + Required: true, + }, + "dbserver_description": { + Type: schema.TypeString, + Optional: true, + }, + "db_password": { + Type: schema.TypeString, + Required: true, + }, + "pre_clone_cmd": { + Type: schema.TypeString, + Optional: true, + }, + "post_clone_cmd": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "actionarguments": actionArgumentsSchema(), + // Computed values + + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "tags": dataSourceEraDBInstanceTags(), + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + }, + } +} + +func resourceNutanixNDBCloneCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + req := &era.CloneRequest{} + + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + if len(tmsName.(string)) > 0 { + // call time machine API with value-type name + res, err := conn.Service.GetTimeMachine(ctx, "", tmsName.(string)) + if err != nil { + return diag.FromErr(err) + } + + tmsID = *res.ID + } + + req.TimeMachineID = utils.StringPtr(tmsID.(string)) + + // build request for clone + if err := buildCloneRequest(d, req); err != nil { + return diag.FromErr(err) + } + + // call clone API + + resp, err := conn.Service.CreateClone(ctx, tmsID.(string), req) + if err != nil { + return diag.FromErr(err) + } + d.SetId(resp.Entityid) + + // Get Operation ID from response of Clone and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for time machine clone (%s) to create: %s", resp.Entityid, errWaitTask) + } + return nil +} + +func resourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.GetClone(ctx, d.Id(), "", nil) + if err != nil { + return diag.FromErr(err) + } + if resp != nil { + if err = d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + props := []interface{}{} + for _, prop := range resp.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + if err := d.Set("properties", props); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.Datecreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.Datemodified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("internal", resp.Internal); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("placeholder", resp.Placeholder); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_name", resp.Databasename); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_cluster_type", resp.Databaseclustertype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_status", resp.Databasestatus); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster_id", resp.Dbserverlogicalclusterid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.Timemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_time_machine_id", resp.Parenttimemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_zone", resp.Timezone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("info", flattenDBInfo(resp.Info)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("group_info", resp.GroupInfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metric", resp.Metric); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("category", resp.Category); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_source_database_id", resp.ParentSourceDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.Lcmconfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine", flattenDBTimeMachine(resp.TimeMachine)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_nodes", flattenDBNodes(resp.Databasenodes)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("linked_databases", flattenDBLinkedDbs(resp.Linkeddatabases)); err != nil { + return diag.FromErr(err) + } + } + + return nil +} + +func resourceNutanixNDBCloneUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + dbID := d.Id() + + name := "" + description := "" + + if d.HasChange("name") { + name = d.Get("name").(string) + } + + if d.HasChange("description") { + description = d.Get("description").(string) + } + + tags := make([]*era.Tags, 0) + if d.HasChange("tags") { + tags = expandTags(d.Get("tags").([]interface{})) + } + + updateReq := era.UpdateDatabaseRequest{ + Name: name, + Description: description, + Tags: tags, + Resetname: true, + Resetdescription: true, + Resettags: true, + } + + res, err := conn.Service.UpdateCloneDatabase(ctx, dbID, &updateReq) + if err != nil { + return diag.FromErr(err) + } + + if res != nil { + if err = d.Set("description", res.Description); err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", res.Name); err != nil { + return diag.FromErr(err) + } + } + return nil +} + +func resourceNutanixNDBCloneDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + if conn == nil { + return diag.Errorf("era is nil") + } + + dbID := d.Id() + + req := era.DeleteDatabaseRequest{ + Delete: true, + Remove: false, + Softremove: false, + Forced: false, + Deletetimemachine: true, + Deletelogicalcluster: true, + } + res, err := conn.Service.DeleteClone(ctx, dbID, &req) + if err != nil { + return diag.FromErr(err) + } + + log.Printf("Operation to unregister clone instance with id %s has started, operation id: %s", dbID, res.Operationid) + opID := res.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Cluster GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for clone Instance (%s) to unregister: %s", res.Entityid, errWaitTask) + } + return nil +} + +func buildCloneRequest(d *schema.ResourceData, res *era.CloneRequest) error { + if name, ok := d.GetOk("name"); ok { + res.Name = utils.StringPtr(name.(string)) + } + + if des, ok := d.GetOk("description"); ok { + res.Description = utils.StringPtr(des.(string)) + } + + if nxcls, ok := d.GetOk("nx_cluster_id"); ok { + res.NxClusterID = utils.StringPtr(nxcls.(string)) + } + + if ssh, ok := d.GetOk("ssh_public_key"); ok { + res.SSHPublicKey = utils.StringPtr(ssh.(string)) + } + if userPitrTimestamp, ok := d.GetOk("user_pitr_timestamp"); ok { + res.UserPitrTimestamp = utils.StringPtr(userPitrTimestamp.(string)) + } + if timeZone, ok := d.GetOk("time_zone"); ok && len(timeZone.(string)) > 0 { + res.TimeZone = utils.StringPtr(timeZone.(string)) + } + if computeProfileID, ok := d.GetOk("compute_profile_id"); ok { + res.ComputeProfileID = utils.StringPtr(computeProfileID.(string)) + } + if networkProfileID, ok := d.GetOk("network_profile_id"); ok { + res.NetworkProfileID = utils.StringPtr(networkProfileID.(string)) + } + if databaseParameterProfileID, ok := d.GetOk("database_parameter_profile_id"); ok { + res.DatabaseParameterProfileID = utils.StringPtr(databaseParameterProfileID.(string)) + } + if snapshotID, ok := d.GetOk("snapshot_id"); ok { + res.SnapshotID = utils.StringPtr(snapshotID.(string)) + } + + if dbserverID, ok := d.GetOk("dbserver_id"); ok { + res.DbserverID = utils.StringPtr(dbserverID.(string)) + } + if dbserverClusterID, ok := d.GetOk("dbserver_cluster_id"); ok { + res.DbserverClusterID = utils.StringPtr(dbserverClusterID.(string)) + } + if dbserverLogicalClusterID, ok := d.GetOk("dbserver_logical_cluster_id"); ok { + res.DbserverLogicalClusterID = utils.StringPtr(dbserverLogicalClusterID.(string)) + } + if createDbserver, ok := d.GetOk("create_dbserver"); ok { + res.CreateDbserver = createDbserver.(bool) + } + if clustered, ok := d.GetOk("clustered"); ok { + res.Clustered = clustered.(bool) + } + if nodeCount, ok := d.GetOk("node_count"); ok { + res.NodeCount = utils.IntPtr(nodeCount.(int)) + } + + if nodes, ok := d.GetOk("nodes"); ok { + res.Nodes = expandClonesNodes(nodes.([]interface{})) + } + + if lcmConfig, ok := d.GetOk("lcm_config"); ok { + res.LcmConfig = expandLCMConfig(lcmConfig.([]interface{})) + } + + if postgres, ok := d.GetOk("postgresql_info"); ok && len(postgres.([]interface{})) > 0 { + res.ActionArguments = expandPostgreSQLCloneActionArgs(d, postgres.([]interface{})) + } + + if tags, ok := d.GetOk("tags"); ok && len(tags.([]interface{})) > 0 { + res.Tags = expandTags(tags.([]interface{})) + } + return nil +} + +func expandClonesNodes(pr []interface{}) []*era.Nodes { + nodes := make([]*era.Nodes, len(pr)) + if len(pr) > 0 { + for k, v := range pr { + val := v.(map[string]interface{}) + node := &era.Nodes{} + + if v1, ok1 := val["network_profile_id"]; ok1 && len(v1.(string)) > 0 { + node.Networkprofileid = utils.StringPtr(v1.(string)) + } + + if v1, ok1 := val["compute_profile_id"]; ok1 && len(v1.(string)) > 0 { + node.ComputeProfileID = utils.StringPtr(v1.(string)) + } + + if v1, ok1 := val["vm_name"]; ok1 && len(v1.(string)) > 0 { + node.Vmname = utils.StringPtr(v1.(string)) + } + + if v1, ok1 := val["nx_cluster_id"]; ok1 && len(v1.(string)) > 0 { + node.NxClusterID = utils.StringPtr(v1.(string)) + } + + if v1, ok1 := val["new_db_server_time_zone"]; ok1 && len(v1.(string)) > 0 { + node.NewDBServerTimeZone = utils.StringPtr(v1.(string)) + } + if v1, ok1 := val["properties"]; ok1 && len(v1.([]interface{})) > 0 { + node.Properties = v1.([]*era.NodesProperties) + } + + if v1, ok1 := val["dbserver_id"]; ok1 && len(v1.(string)) > 0 { + node.DatabaseServerID = utils.StringPtr(v1.(string)) + } + nodes[k] = node + } + return nodes + } + return nil +} + +func expandPostgreSQLCloneActionArgs(d *schema.ResourceData, pr []interface{}) []*era.Actionarguments { + if len(pr) > 0 { + args := []*era.Actionarguments{} + + postgresProp := pr[0].(map[string]interface{}) + for key, value := range postgresProp { + args = append(args, &era.Actionarguments{ + Name: key, + Value: utils.StringPtr(value.(string)), + }) + } + resp := buildActionArgumentsFromResourceData(d.Get("actionarguments").(*schema.Set), args) + return resp + } + return nil +} + +func expandLCMConfig(pr []interface{}) *era.CloneLCMConfig { + if len(pr) > 0 { + cloneLcm := &era.CloneLCMConfig{} + for _, v := range pr { + val := v.(map[string]interface{}) + + if v1, ok1 := val["database_lcm_config"]; ok1 && len(v1.([]interface{})) > 0 { + dbLcm := v1.([]interface{}) + dbLcmConfig := &era.DatabaseLCMConfig{} + for _, v := range dbLcm { + val := v.(map[string]interface{}) + + if exp, ok1 := val["expiry_details"]; ok1 { + dbLcmConfig.ExpiryDetails = expandDBExpiryDetails(exp.([]interface{})) + } + + if ref, ok1 := val["refresh_details"]; ok1 { + dbLcmConfig.RefreshDetails = expandDBRefreshDetails(ref.([]interface{})) + } + } + cloneLcm.DatabaseLCMConfig = dbLcmConfig + } + } + return cloneLcm + } + return nil +} + +func expandDBExpiryDetails(pr []interface{}) *era.DBExpiryDetails { + if len(pr) > 0 { + expDetails := &era.DBExpiryDetails{} + + for _, v := range pr { + val := v.(map[string]interface{}) + + if v1, ok1 := val["expire_in_days"]; ok1 { + expDetails.ExpireInDays = utils.IntPtr(v1.(int)) + } + if v1, ok1 := val["expiry_date_timezone"]; ok1 && len(v1.(string)) > 0 { + expDetails.ExpiryDateTimezone = utils.StringPtr(v1.(string)) + } + if v1, ok1 := val["delete_database"]; ok1 { + expDetails.DeleteDatabase = v1.(bool) + } + } + return expDetails + } + return nil +} + +func expandDBRefreshDetails(pr []interface{}) *era.DBRefreshDetails { + if len(pr) > 0 { + refDetails := &era.DBRefreshDetails{} + + for _, v := range pr { + val := v.(map[string]interface{}) + + if v1, ok1 := val["refresh_in_days"]; ok1 { + refDetails.RefreshInDays = v1.(int) + } + if v1, ok1 := val["refresh_time"]; ok1 && len(v1.(string)) > 0 { + refDetails.RefreshTime = v1.(string) + } + if v1, ok1 := val["refresh_date_timezone"]; ok1 && len(v1.(string)) > 0 { + refDetails.RefreshDateTimezone = v1.(string) + } + } + return refDetails + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_database_scale_test.go b/nutanix/resource_nutanix_ndb_database_scale_test.go new file mode 100644 index 000000000..3a32ce64f --- /dev/null +++ b/nutanix/resource_nutanix_ndb_database_scale_test.go @@ -0,0 +1,44 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameScaleDB = "nutanix_ndb_database_scale.acctest-managed" + +func TestAccEra_Scalebasic(t *testing.T) { + storageSize := "4" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseScaleConfig(storageSize), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameScaleDB, "application_type", "postgres_database"), + resource.TestCheckResourceAttr(resourceNameScaleDB, "data_storage_size", storageSize), + resource.TestCheckResourceAttr(resourceNameScaleDB, "metadata.#", "1"), + resource.TestCheckResourceAttrSet(resourceNameScaleDB, "name"), + resource.TestCheckResourceAttrSet(resourceNameScaleDB, "description"), + ), + }, + }, + }) +} + +func testAccEraDatabaseScaleConfig(size string) string { + return fmt.Sprintf(` + data "nutanix_ndb_databases" "test" { + database_type = "postgres_database" + } + + resource "nutanix_ndb_database_scale" "acctest-managed" { + application_type = "postgres_database" + database_uuid = data.nutanix_ndb_databases.test.database_instances.1.id + data_storage_size = %[1]s + } + `, size) +} diff --git a/nutanix/resource_nutanix_ndb_database_snapshot.go b/nutanix/resource_nutanix_ndb_database_snapshot.go new file mode 100644 index 000000000..cdd680433 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_database_snapshot.go @@ -0,0 +1,596 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBDatabaseSnapshot() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBDatabaseSnapshotCreate, + ReadContext: resourceNutanixNDBDatabaseSnapshotRead, + UpdateContext: resourceNutanixNDBDatabaseSnapshotUpdate, + DeleteContext: resourceNutanixNDBDatabaseSnapshotDelete, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_name"}, + }, + "time_machine_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "remove_schedule_in_days": { + Type: schema.TypeInt, + Optional: true, + }, + "expiry_date_timezone": { + Type: schema.TypeString, + Optional: true, + Default: "Asia/Calcutta", + }, + "replicate_to_clusters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + // computed + "id": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retyr_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + } +} + +func resourceNutanixNDBDatabaseSnapshotCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.DatabaseSnapshotRequest{} + snapshotName := "" + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + if len(tmsName.(string)) > 0 { + // call time machine API with value-type name + res, err := conn.Service.GetTimeMachine(ctx, tmsID.(string), tmsName.(string)) + if err != nil { + return diag.FromErr(err) + } + + tmsID = *res.ID + } + + if name, ok := d.GetOk("name"); ok { + req.Name = utils.StringPtr(name.(string)) + snapshotName = utils.StringValue(req.Name) + } + + if rm, ok := d.GetOk("remove_schedule_in_days"); ok { + lcmConfig := &era.LCMConfigSnapshot{} + snapshotLCM := &era.SnapshotLCMConfig{} + expDetails := &era.DBExpiryDetails{} + + expDetails.ExpireInDays = utils.IntPtr(rm.(int)) + + if tmzone, pk := d.GetOk("expiry_date_timezone"); pk { + expDetails.ExpiryDateTimezone = utils.StringPtr(tmzone.(string)) + } + + snapshotLCM.ExpiryDetails = expDetails + lcmConfig.SnapshotLCMConfig = snapshotLCM + req.LcmConfig = lcmConfig + } + + if rep, ok := d.GetOk("replicate_to_clusters"); ok && len(rep.([]interface{})) > 0 { + repList := rep.([]interface{}) + + for _, v := range repList { + req.ReplicateToClusters = append(req.ReplicateToClusters, utils.StringPtr(v.(string))) + } + } + + // call the snapshot API + + resp, err := conn.Service.DatabaseSnapshot(ctx, tmsID.(string), req) + if err != nil { + return diag.FromErr(err) + } + + // d.SetId(resp.Entityid) + + // Get Operation ID from response of snapshot and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for snapshot (%s) to create: %s", resp.Entityid, errWaitTask) + } + + // Get all the Snapshots based on tms + + uniqueID := "" + timeStamp := 0 + tmsResp, ter := conn.Service.ListSnapshots(ctx, resp.Entityid) + if ter != nil { + return diag.FromErr(ter) + } + for _, val := range *tmsResp { + if snapshotName == utils.StringValue(val.Name) { + if (int(*val.SnapshotTimeStampDate)) > timeStamp { + uniqueID = utils.StringValue(val.ID) + timeStamp = int(utils.Int64Value(val.SnapshotTimeStampDate)) + } + } + } + d.SetId(uniqueID) + return resourceNutanixNDBDatabaseSnapshotRead(ctx, d, meta) +} + +func resourceNutanixNDBDatabaseSnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + // setting the default values for Get snapshot + filterParams := &era.FilterParams{} + filterParams.LoadReplicatedChildSnapshots = "false" + filterParams.TimeZone = "UTC" + + resp, err := conn.Service.GetSnapshot(ctx, d.Id(), filterParams) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.OwnerID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties", flattenDBInstanceProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_uuid", resp.SnapshotUUID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_id", resp.NxClusterID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("protection_domain_id", resp.ProtectionDomainID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_snapshot_id", resp.ParentSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.TimeMachineID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_node_id", resp.DatabaseNodeID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("app_info_version", resp.AppInfoVersion); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("applicable_types", resp.ApplicableTypes); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_timestamp", resp.SnapshotTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_snapshot_id", resp.SoftwareSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_database_snapshot", resp.SoftwareDatabaseSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_storage_metadata_version", resp.DBServerStorageMetadataVersion); err != nil { + return diag.FromErr(err) + } + + // if err := d.Set("santised", resp.Sanitized); err != nil { + // return diag.FromErr(err) + // } + + if err := d.Set("santised_from_snapshot_id", resp.SanitisedFromSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("timezone", resp.TimeZone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("processed", resp.Processed); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_snapshot", resp.DatabaseSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("from_timestamp", resp.FromTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("to_timestamp", resp.ToTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_id", resp.DbserverID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_name", resp.DbserverName); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_ip", resp.DbserverIP); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("replicated_snapshots", resp.ReplicatedSnapshots); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_snapshot", resp.SoftwareSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("santised_snapshots", resp.SanitisedSnapshots); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_family", resp.SnapshotFamily); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_timestamp_date", resp.SnapshotTimeStampDate); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_snapshot", resp.ParentSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_size", resp.SnapshotSize); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.LcmConfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenClonedMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + return nil +} + +func resourceNutanixNDBDatabaseSnapshotUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + updateReq := &era.UpdateSnapshotRequest{} + + if d.HasChange("name") { + updateReq.Name = utils.StringPtr(d.Get("name").(string)) + } + + // reset the name is by default value provided + updateReq.ResetName = true + + // API to update database snapshot + + resp, err := conn.Service.UpdateSnapshot(ctx, d.Id(), updateReq) + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + if err = d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + } + + return resourceNutanixNDBDatabaseSnapshotRead(ctx, d, meta) +} + +func resourceNutanixNDBDatabaseSnapshotDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.DeleteSnapshot(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + opID := resp.Operationid + + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for snapshot (%s) to delete: %s", resp.Entityid, errWaitTask) + } + + d.SetId("") + return nil +} diff --git a/nutanix/resource_nutanix_ndb_database_snapshot_test.go b/nutanix/resource_nutanix_ndb_database_snapshot_test.go new file mode 100644 index 000000000..e804719b7 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_database_snapshot_test.go @@ -0,0 +1,88 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameSnapshotDB = "nutanix_ndb_database_snapshot.acctest-managed" + +func TestAccEra_Snapshotbasic(t *testing.T) { + name := "test-acc-snapshot" + removalIndays := "2" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseSnapshotConfig(name, removalIndays), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "name", name), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "remove_schedule_in_days", removalIndays), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "metadata.#", "1"), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "database_snapshot", "false"), + ), + }, + }, + }) +} + +func TestAccEra_Snapshot_ReplicateToClusters(t *testing.T) { + name := "test-acc-snapshot" + removalIndays := "2" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseSnapshotConfigReplicateToClusters(name, removalIndays), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "name", name), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "remove_schedule_in_days", removalIndays), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "metadata.#", "1"), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "database_snapshot", "false"), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "replicate_to_clusters.#", "2"), + ), + }, + }, + }) +} + +func testAccEraDatabaseSnapshotConfig(name, removalIndays string) string { + return fmt.Sprintf(` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_time_machine" "test"{ + time_machine_name = data.nutanix_ndb_time_machines.test1.time_machines.0.name + } + + resource "nutanix_ndb_database_snapshot" "acctest-managed" { + time_machine_id = data.nutanix_ndb_time_machine.test.id + name = "%[1]s" + remove_schedule_in_days = "%[2]s" + } + `, name, removalIndays) +} + +func testAccEraDatabaseSnapshotConfigReplicateToClusters(name, removalIndays string) string { + return fmt.Sprintf(` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_time_machine" "test"{ + time_machine_name = data.nutanix_ndb_time_machines.test1.time_machines.0.name + } + + data "nutanix_ndb_clusters" "test" { } + + resource "nutanix_ndb_database_snapshot" "acctest-managed" { + time_machine_id = data.nutanix_ndb_time_machine.test.id + name = "%[1]s" + remove_schedule_in_days = "%[2]s" + replicate_to_clusters = [ + data.nutanix_ndb_clusters.test.clusters.0.id, data.nutanix_ndb_clusters.test.clusters.1.id + ] + } + `, name, removalIndays) +} diff --git a/nutanix/resource_nutanix_ndb_log_catchups_test.go b/nutanix/resource_nutanix_ndb_log_catchups_test.go new file mode 100644 index 000000000..9ee93b7d9 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_log_catchups_test.go @@ -0,0 +1,36 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameLogCatchDB = "nutanix_ndb_database_log_catchup.acctest-managed" + +func TestAccEra_LogCatchUpbasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseLogCatchUpConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameLogCatchDB, "log_catchup_version", ""), + resource.TestCheckResourceAttr(resourceNameLogCatchDB, "database_id", ""), + resource.TestCheckResourceAttrSet(resourceNameLogCatchDB, "time_machine_id"), + ), + }, + }, + }) +} + +func testAccEraDatabaseLogCatchUpConfig() string { + return (` + data "nutanix_ndb_time_machines" "test1" {} + + resource "nutanix_ndb_log_catchups" "name" { + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + `) +} From c8f5bd7f1a6ad5cba1b4e7ebf53d5f145542ede0 Mon Sep 17 00:00:00 2001 From: Abhishek Date: Thu, 19 Jan 2023 14:08:43 +0530 Subject: [PATCH 11/18] tcs changes --- .../data_source_nutanix_ndb_cluster_test.go | 2 +- .../data_source_nutanix_ndb_snapshot_test.go | 1 - nutanix/resource_nutanix_nbd_database_test.go | 34 ++++++++++--------- ...esource_nutanix_ndb_database_scale_test.go | 2 +- ...urce_nutanix_ndb_register_database_test.go | 1 + 5 files changed, 21 insertions(+), 19 deletions(-) diff --git a/nutanix/data_source_nutanix_ndb_cluster_test.go b/nutanix/data_source_nutanix_ndb_cluster_test.go index 890bd7b15..b4e929e50 100644 --- a/nutanix/data_source_nutanix_ndb_cluster_test.go +++ b/nutanix/data_source_nutanix_ndb_cluster_test.go @@ -37,7 +37,7 @@ func TestAccEraClusterDataSource_ByName(t *testing.T) { resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "status", "UP"), resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "cloud_type", "NTNX"), resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "hypervisor_type", "AHV"), - resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "properties.#", "0"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_cluster.test", "properties.#"), resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "healthy", "true"), ), }, diff --git a/nutanix/data_source_nutanix_ndb_snapshot_test.go b/nutanix/data_source_nutanix_ndb_snapshot_test.go index 56db008b6..0277815c8 100644 --- a/nutanix/data_source_nutanix_ndb_snapshot_test.go +++ b/nutanix/data_source_nutanix_ndb_snapshot_test.go @@ -44,7 +44,6 @@ func TestAccEraSnapshotDataSource_WithFilters(t *testing.T) { resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "metadata.#", "1"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "snapshot_uuid"), resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "status", "ACTIVE"), - resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "type", "DAILY_EXTRA"), ), }, }, diff --git a/nutanix/resource_nutanix_nbd_database_test.go b/nutanix/resource_nutanix_nbd_database_test.go index 36d74a5ed..69d7b6231 100644 --- a/nutanix/resource_nutanix_nbd_database_test.go +++ b/nutanix/resource_nutanix_nbd_database_test.go @@ -10,16 +10,17 @@ import ( const resourceNameDB = "nutanix_ndb_database.acctest-managed" func TestAccEra_basic(t *testing.T) { - name := "test-pg-inst-tf" + r := randIntBetween(1, 10) + name := fmt.Sprintf("test-pg-inst-tf-%d", r) desc := "this is desc" - vmName := "testvm12" + vmName := fmt.Sprintf("testvm-%d", r) sshKey := testVars.SSHKey resource.Test(t, resource.TestCase{ PreCheck: func() { testAccEraPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccEraDatabaseConfig(name, desc, vmName, sshKey), + Config: testAccEraDatabaseConfig(name, desc, vmName, sshKey, r), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceNameDB, "name", name), resource.TestCheckResourceAttr(resourceNameDB, "description", desc), @@ -33,7 +34,8 @@ func TestAccEra_basic(t *testing.T) { } func TestAccEraDatabaseProvisionHA(t *testing.T) { - name := "test-pg-inst-HA-tf" + r := randIntBetween(11, 25) + name := fmt.Sprintf("test-pg-inst-HA-tf-%d", r) desc := "this is desc" sshKey := testVars.SSHKey resource.Test(t, resource.TestCase{ @@ -41,7 +43,7 @@ func TestAccEraDatabaseProvisionHA(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccEraDatabaseHAConfig(name, desc, sshKey), + Config: testAccEraDatabaseHAConfig(name, desc, sshKey, r), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceNameDB, "name", name), resource.TestCheckResourceAttr(resourceNameDB, "description", desc), @@ -55,7 +57,7 @@ func TestAccEraDatabaseProvisionHA(t *testing.T) { }) } -func testAccEraDatabaseConfig(name, desc, vmName, sshKey string) string { +func testAccEraDatabaseConfig(name, desc, vmName, sshKey string, r int) string { return fmt.Sprintf(` data "nutanix_ndb_profiles" "p"{ } @@ -112,7 +114,7 @@ func testAccEraDatabaseConfig(name, desc, vmName, sshKey string) string { networkprofileid= local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id } timemachineinfo { - name= "test-pg-inst-12" + name= "test-pg-inst-%[5]d" description="" slaid=local.slas["DEFAULT_OOB_BRONZE_SLA"].id schedule { @@ -147,10 +149,10 @@ func testAccEraDatabaseConfig(name, desc, vmName, sshKey string) string { } } } - `, name, desc, vmName, sshKey) + `, name, desc, vmName, sshKey, r) } -func testAccEraDatabaseHAConfig(name, desc, sshKey string) string { +func testAccEraDatabaseHAConfig(name, desc, sshKey string, r int) string { return fmt.Sprintf(` data "nutanix_ndb_profiles" "p"{ } @@ -208,7 +210,7 @@ func testAccEraDatabaseHAConfig(name, desc, sshKey string) string { proxy_write_port = "5000" - cluster_name= "ha-cls" + cluster_name= "ha-cls-%[4]d" patroni_cluster_name = "ha-patroni-cluster" } @@ -220,7 +222,7 @@ func testAccEraDatabaseHAConfig(name, desc, sshKey string) string { name = "node_type" value = "haproxy" } - vmname = "ha-cls_haproxy1" + vmname = "ha-cls_haproxy-%[4]d" nx_cluster_id = local.clusters.EraCluster.id } nodes{ @@ -236,7 +238,7 @@ func testAccEraDatabaseHAConfig(name, desc, sshKey string) string { name= "node_type" value= "database" } - vmname = "ha-cls-1" + vmname = "ha-cls-1%[4]d" networkprofileid=local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id computeprofileid= local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id nx_cluster_id= local.clusters.EraCluster.id @@ -254,7 +256,7 @@ func testAccEraDatabaseHAConfig(name, desc, sshKey string) string { name= "node_type" value= "database" } - vmname = "ha-cls-2" + vmname = "ha-cls-2%[4]d" networkprofileid=local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id computeprofileid= local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id nx_cluster_id= local.clusters.EraCluster.id @@ -273,13 +275,13 @@ func testAccEraDatabaseHAConfig(name, desc, sshKey string) string { name= "node_type" value= "database" } - vmname = "ha-cls-3" + vmname = "ha-cls-3%[4]d" networkprofileid=local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id computeprofileid= local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id nx_cluster_id= local.clusters.EraCluster.id } timemachineinfo { - name= "test-pg-inst" + name= "test-pg-inst-%[4]d" description="" sla_details{ @@ -322,5 +324,5 @@ func testAccEraDatabaseHAConfig(name, desc, sshKey string) string { } } } - `, name, desc, sshKey) + `, name, desc, sshKey, r) } diff --git a/nutanix/resource_nutanix_ndb_database_scale_test.go b/nutanix/resource_nutanix_ndb_database_scale_test.go index 3a32ce64f..8e9288f16 100644 --- a/nutanix/resource_nutanix_ndb_database_scale_test.go +++ b/nutanix/resource_nutanix_ndb_database_scale_test.go @@ -37,7 +37,7 @@ func testAccEraDatabaseScaleConfig(size string) string { resource "nutanix_ndb_database_scale" "acctest-managed" { application_type = "postgres_database" - database_uuid = data.nutanix_ndb_databases.test.database_instances.1.id + database_uuid = data.nutanix_ndb_databases.test.database_instances.0.id data_storage_size = %[1]s } `, size) diff --git a/nutanix/resource_nutanix_ndb_register_database_test.go b/nutanix/resource_nutanix_ndb_register_database_test.go index 419f7b179..e626a2021 100644 --- a/nutanix/resource_nutanix_ndb_register_database_test.go +++ b/nutanix/resource_nutanix_ndb_register_database_test.go @@ -10,6 +10,7 @@ import ( const resourceRegisterDB = "nutanix_ndb_database.acctest-managed" func TestAccEra_Registerbasic(t *testing.T) { + t.Skip() name := "test-pg-inst-tf" desc := "this is desc" resource.Test(t, resource.TestCase{ From df620d5ea4ca5e556e35fb166ce45378a85faba1 Mon Sep 17 00:00:00 2001 From: Abhishek Date: Fri, 20 Jan 2023 02:47:20 +0530 Subject: [PATCH 12/18] added docs and log for operation completed in resources --- ...esource_nutanix_ndb_authorize_dbservers.go | 12 +- nutanix/resource_nutanix_ndb_clone.go | 31 +- nutanix/resource_nutanix_ndb_database.go | 6 +- .../resource_nutanix_ndb_database_restore.go | 1 + .../resource_nutanix_ndb_database_scale.go | 1 + .../resource_nutanix_ndb_database_snapshot.go | 2 + nutanix/resource_nutanix_ndb_log_catchups.go | 1 + nutanix/resource_nutanix_ndb_profiles.go | 4 +- .../resource_nutanix_ndb_register_database.go | 4 +- nutanix/resource_nutanix_ndb_sla.go | 5 +- ...ce_nutanix_ndb_software_version_profile.go | 5 +- website/docs/d/ndb_clone.html.markdown | 80 ++++++ website/docs/d/ndb_clones.html.markdown | 82 ++++++ website/docs/d/ndb_snapshot.html.markdown | 75 +++++ website/docs/d/ndb_snapshots.html.markdown | 77 +++++ website/docs/d/ndb_time_machine.html.markdown | 59 ++++ .../ndb_time_machine_capability.html.markdown | 44 +++ .../docs/d/ndb_time_machines.html.markdown | 55 ++++ .../r/ndb_authorize_dbserver.html.markdown | 28 ++ website/docs/r/ndb_clone.html.markdown | 122 ++++++++ website/docs/r/ndb_database.html.markdown | 196 ++++++++++++- .../docs/r/ndb_database_restore.html.markdown | 78 +++++ .../docs/r/ndb_database_scale.html.markdown | 70 +++++ .../r/ndb_database_snapshot.html.markdown | 87 ++++++ website/docs/r/ndb_log_catchups.html.markdown | 33 +++ website/docs/r/ndb_profiles.html.markdown | 127 +++++++++ .../r/ndb_register_database.html.markdown | 268 ++++++++++++++++++ website/docs/r/ndb_sla.html.markdown | 52 ++++ ...ndb_software_profile_version.html.markdown | 77 +++++ 29 files changed, 1656 insertions(+), 26 deletions(-) create mode 100644 website/docs/d/ndb_clone.html.markdown create mode 100644 website/docs/d/ndb_clones.html.markdown create mode 100644 website/docs/d/ndb_snapshot.html.markdown create mode 100644 website/docs/d/ndb_snapshots.html.markdown create mode 100644 website/docs/d/ndb_time_machine.html.markdown create mode 100644 website/docs/d/ndb_time_machine_capability.html.markdown create mode 100644 website/docs/d/ndb_time_machines.html.markdown create mode 100644 website/docs/r/ndb_authorize_dbserver.html.markdown create mode 100644 website/docs/r/ndb_clone.html.markdown create mode 100644 website/docs/r/ndb_database_restore.html.markdown create mode 100644 website/docs/r/ndb_database_scale.html.markdown create mode 100644 website/docs/r/ndb_database_snapshot.html.markdown create mode 100644 website/docs/r/ndb_log_catchups.html.markdown create mode 100644 website/docs/r/ndb_profiles.html.markdown create mode 100644 website/docs/r/ndb_register_database.html.markdown create mode 100644 website/docs/r/ndb_sla.html.markdown create mode 100644 website/docs/r/ndb_software_profile_version.html.markdown diff --git a/nutanix/resource_nutanix_ndb_authorize_dbservers.go b/nutanix/resource_nutanix_ndb_authorize_dbservers.go index 87736d6f4..e2834a078 100644 --- a/nutanix/resource_nutanix_ndb_authorize_dbservers.go +++ b/nutanix/resource_nutanix_ndb_authorize_dbservers.go @@ -2,7 +2,9 @@ package nutanix import ( "context" + "log" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-nutanix/utils" @@ -72,9 +74,14 @@ func resourceNutanixNDBAuthorizeDBServerCreate(ctx context.Context, d *schema.Re } if resp.Status == utils.StringPtr("success") { - d.SetId(tmsID.(string)) - } + uuid, er := uuid.GenerateUUID() + if er != nil { + return diag.Errorf("Error generating UUID for era clusters: %+v", err) + } + d.SetId(uuid) + } + log.Printf("NDB Authorize dbservers with %s id created successfully", d.Id()) return nil } @@ -120,6 +127,7 @@ func resourceNutanixNDBAuthorizeDBServerDelete(ctx context.Context, d *schema.Re if err != nil { return diag.FromErr(err) } + log.Printf("NDB Authorize dbservers with %s id deleted successfully", d.Id()) d.SetId("") return nil } diff --git a/nutanix/resource_nutanix_ndb_clone.go b/nutanix/resource_nutanix_ndb_clone.go index 31ad0ff43..d28694aea 100644 --- a/nutanix/resource_nutanix_ndb_clone.go +++ b/nutanix/resource_nutanix_ndb_clone.go @@ -167,8 +167,9 @@ func resourceNutanixNDBClone() *schema.Resource { Required: true, }, "ssh_public_key": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Sensitive: true, }, "compute_profile_id": { Type: schema.TypeString, @@ -183,8 +184,9 @@ func resourceNutanixNDBClone() *schema.Resource { Optional: true, }, "vm_password": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Sensitive: true, }, "create_dbserver": { Type: schema.TypeBool, @@ -228,8 +230,9 @@ func resourceNutanixNDBClone() *schema.Resource { Optional: true, }, "db_password": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Sensitive: true, }, "pre_clone_cmd": { Type: schema.TypeString, @@ -396,7 +399,9 @@ func resourceNutanixNDBCloneCreate(ctx context.Context, d *schema.ResourceData, if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for time machine clone (%s) to create: %s", resp.Entityid, errWaitTask) } - return nil + + log.Printf("NDB clone with %s id created successfully", d.Id()) + return resourceNutanixNDBCloneRead(ctx, d, meta) } func resourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -573,15 +578,10 @@ func resourceNutanixNDBCloneUpdate(ctx context.Context, d *schema.ResourceData, } if res != nil { - if err = d.Set("description", res.Description); err != nil { - return diag.FromErr(err) - } - - if err = d.Set("name", res.Name); err != nil { - return diag.FromErr(err) - } + log.Printf("NDB clone with %s id update successfully", d.Id()) } - return nil + + return resourceNutanixNDBCloneRead(ctx, d, meta) } func resourceNutanixNDBCloneDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -628,6 +628,7 @@ func resourceNutanixNDBCloneDelete(ctx context.Context, d *schema.ResourceData, if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for clone Instance (%s) to unregister: %s", res.Entityid, errWaitTask) } + log.Printf("NDB clone with %s id deleted successfully", d.Id()) return nil } diff --git a/nutanix/resource_nutanix_ndb_database.go b/nutanix/resource_nutanix_ndb_database.go index ad26da89e..a94591020 100644 --- a/nutanix/resource_nutanix_ndb_database.go +++ b/nutanix/resource_nutanix_ndb_database.go @@ -473,7 +473,7 @@ func createDatabaseInstance(ctx context.Context, d *schema.ResourceData, meta in if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for db Instance (%s) to create: %s", resp.Entityid, errWaitTask) } - + log.Printf("NDB database with %s id created successfully", d.Id()) return readDatabaseInstance(ctx, d, meta) } @@ -690,8 +690,8 @@ func updateDatabaseInstance(ctx context.Context, d *schema.ResourceData, m inter return diag.FromErr(err) } } - - return nil + log.Printf("NDB database with %s id updated successfully", d.Id()) + return readDatabaseInstance(ctx, d, m) } func deleteDatabaseInstance(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { diff --git a/nutanix/resource_nutanix_ndb_database_restore.go b/nutanix/resource_nutanix_ndb_database_restore.go index 29d9156a2..fb792a4e2 100644 --- a/nutanix/resource_nutanix_ndb_database_restore.go +++ b/nutanix/resource_nutanix_ndb_database_restore.go @@ -252,6 +252,7 @@ func resourceNutanixNDBDatabaseRestoreCreate(ctx context.Context, d *schema.Reso } d.SetId(resp.Operationid) + log.Printf("NDB database restore with %s id created successfully", d.Id()) return resourceNutanixNDBDatabaseRestoreRead(ctx, d, meta) } diff --git a/nutanix/resource_nutanix_ndb_database_scale.go b/nutanix/resource_nutanix_ndb_database_scale.go index 484e1c170..bbaedad3d 100644 --- a/nutanix/resource_nutanix_ndb_database_scale.go +++ b/nutanix/resource_nutanix_ndb_database_scale.go @@ -263,6 +263,7 @@ func resourceNutanixNDBScaleDatabaseCreate(ctx context.Context, d *schema.Resour } d.SetId(resp.Operationid) + log.Printf("NDB database scale with %s id created successfully", d.Id()) return resourceNutanixNDBScaleDatabaseRead(ctx, d, meta) } diff --git a/nutanix/resource_nutanix_ndb_database_snapshot.go b/nutanix/resource_nutanix_ndb_database_snapshot.go index cdd680433..2aa4c7a78 100644 --- a/nutanix/resource_nutanix_ndb_database_snapshot.go +++ b/nutanix/resource_nutanix_ndb_database_snapshot.go @@ -361,6 +361,7 @@ func resourceNutanixNDBDatabaseSnapshotCreate(ctx context.Context, d *schema.Res } } d.SetId(uniqueID) + log.Printf("NDB database snapshot with %s id created successfully", d.Id()) return resourceNutanixNDBDatabaseSnapshotRead(ctx, d, meta) } @@ -559,6 +560,7 @@ func resourceNutanixNDBDatabaseSnapshotUpdate(ctx context.Context, d *schema.Res } } + log.Printf("NDB database snapshot with %s id updated successfully", d.Id()) return resourceNutanixNDBDatabaseSnapshotRead(ctx, d, meta) } diff --git a/nutanix/resource_nutanix_ndb_log_catchups.go b/nutanix/resource_nutanix_ndb_log_catchups.go index 3008ea1ba..ff815bd60 100644 --- a/nutanix/resource_nutanix_ndb_log_catchups.go +++ b/nutanix/resource_nutanix_ndb_log_catchups.go @@ -121,6 +121,7 @@ func resourceNutanixNDBLogCatchUpsCreate(ctx context.Context, d *schema.Resource return diag.Errorf("error waiting to perform log-catchups (%s) to create: %s", resp.Entityid, errWaitTask) } d.SetId(resp.Operationid) + log.Printf("NDB log catchup with %s id created successfully", d.Id()) return nil } diff --git a/nutanix/resource_nutanix_ndb_profiles.go b/nutanix/resource_nutanix_ndb_profiles.go index d36324bab..54deb73c5 100644 --- a/nutanix/resource_nutanix_ndb_profiles.go +++ b/nutanix/resource_nutanix_ndb_profiles.go @@ -720,6 +720,7 @@ func resourceNutanixNDBProfileCreate(ctx context.Context, d *schema.ResourceData return diag.FromErr(er) } } + log.Printf("NDB Profile with %s id created successfully", d.Id()) return resourceNutanixNDBProfileRead(ctx, d, meta) } @@ -894,7 +895,7 @@ func resourceNutanixNDBProfileUpdate(ctx context.Context, d *schema.ResourceData if er != nil { return diag.FromErr(er) } - + log.Printf("NDB Profile with %s id updated successfully", d.Id()) return resourceNutanixNDBProfileRead(ctx, d, meta) } @@ -907,6 +908,7 @@ func resourceNutanixNDBProfileDelete(ctx context.Context, d *schema.ResourceData } if resp == utils.StringPtr("Profile Successfully Deleted.") { + log.Printf("NDB Profile with %s id deleted successfully", d.Id()) d.SetId("") } return nil diff --git a/nutanix/resource_nutanix_ndb_register_database.go b/nutanix/resource_nutanix_ndb_register_database.go index 9b5557b4d..a2a17bf4d 100644 --- a/nutanix/resource_nutanix_ndb_register_database.go +++ b/nutanix/resource_nutanix_ndb_register_database.go @@ -305,7 +305,8 @@ func resourceNutanixNDBRegisterDatabaseCreate(ctx context.Context, d *schema.Res if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for db register (%s) to create: %s", resp.Entityid, errWaitTask) } - return nil + log.Printf("NDB register database with %s id created successfully", d.Id()) + return resourceNutanixNDBRegisterDatabaseRead(ctx, d, meta) } func resourceNutanixNDBRegisterDatabaseRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -362,6 +363,7 @@ func resourceNutanixNDBRegisterDatabaseDelete(ctx context.Context, d *schema.Res if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for unregister db Instance (%s) to delete: %s", res.Entityid, errWaitTask) } + log.Printf("NDB register database with %s id deleted successfully", d.Id()) return nil } diff --git a/nutanix/resource_nutanix_ndb_sla.go b/nutanix/resource_nutanix_ndb_sla.go index 826ac5736..0edec7930 100644 --- a/nutanix/resource_nutanix_ndb_sla.go +++ b/nutanix/resource_nutanix_ndb_sla.go @@ -2,6 +2,7 @@ package nutanix import ( "context" + "log" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -128,6 +129,7 @@ func resourceNutanixNDBSlaCreate(ctx context.Context, d *schema.ResourceData, me } d.SetId(*resp.ID) + log.Printf("NDB SLA with %s id created successfully", d.Id()) return resourceNutanixNDBSlaRead(ctx, d, meta) } @@ -257,7 +259,7 @@ func resourceNutanixNDBSlaUpdate(ctx context.Context, d *schema.ResourceData, me if err != nil { return diag.FromErr(err) } - + log.Printf("NDB SLA with %s id updated successfully", d.Id()) return resourceNutanixNDBSlaRead(ctx, d, meta) } @@ -270,6 +272,7 @@ func resourceNutanixNDBSlaDelete(ctx context.Context, d *schema.ResourceData, me } if resp.Status == utils.StringPtr("success") { + log.Printf("NDB SLA with %s id deleted successfully", d.Id()) d.SetId("") } return nil diff --git a/nutanix/resource_nutanix_ndb_software_version_profile.go b/nutanix/resource_nutanix_ndb_software_version_profile.go index 78ccc8005..2f2e9584f 100644 --- a/nutanix/resource_nutanix_ndb_software_version_profile.go +++ b/nutanix/resource_nutanix_ndb_software_version_profile.go @@ -287,7 +287,7 @@ func resourceNutanixNDBSoftwareVersionProfileCreate(ctx context.Context, d *sche if er != nil { return diag.FromErr(er) } - + log.Printf("NDB Software Version Profile with %s id created successfully", d.Id()) return resourceNutanixNDBSoftwareVersionProfileRead(ctx, d, meta) } @@ -392,7 +392,7 @@ func resourceNutanixNDBSoftwareVersionProfileUpdate(ctx context.Context, d *sche if er != nil { return diag.FromErr(er) } - + log.Printf("NDB Software Version Profile with %s id updated successfully", d.Id()) return resourceNutanixNDBSoftwareVersionProfileRead(ctx, d, meta) } @@ -405,6 +405,7 @@ func resourceNutanixNDBSoftwareVersionProfileDelete(ctx context.Context, d *sche } if resp == utils.StringPtr("Profile Successfully Deleted.") { + log.Printf("NDB Software Version Profile with %s id deleted successfully", d.Id()) d.SetId("") } return nil diff --git a/website/docs/d/ndb_clone.html.markdown b/website/docs/d/ndb_clone.html.markdown new file mode 100644 index 000000000..4a160a92f --- /dev/null +++ b/website/docs/d/ndb_clone.html.markdown @@ -0,0 +1,80 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_clone" +sidebar_current: "docs-nutanix-datasource-ndb-clone" +description: |- + Describes a clone in Nutanix Database Service +--- + +# nutanix_ndb_clone + +Describes the clone present in Nutanix Database Service + +## Example Usage + +```hcl + + data "nutanix_ndb_clone" "name" { + clone_name = "test-inst-tf-check" + } + + data "nutanix_ndb_clone" "name" { + clone_name = "test-inst-tf-check" + filters{ + detailed= true + } + } + +``` + +## Argument Reference + +* `clone_id`: (Optional) Clone id +* `clone_name`: (Optional) Clone Name +* `filters`: (Optional) Fetchs info based on filter + +### filters +* `detailed`: (Optional) Default is false +* `any_status`: (Optional) Default is false +* `load_dbserver_cluster`:(Optional) Default is false +* `timezone`:(Optional) Default is UTC + + +## Attribute Reference + +* `id`: cloned id +* `name`: cloned name +* `description`: cloned description +* `owner_id`: owner id +* `date_created`: date created for clone +* `date_modified`: last modified date for clone +* `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. +* `properties`: properties of clone +* `clustered`: clustered or not +* `clone`: clone or not +* `era_created`: era created +* `internal`: internal or not +* `placeholder`: placeholder of clone +* `database_name`: database name +* `type`: type +* `database_cluster_type`: database cluster type +* `status`: status of clone +* `database_status`: database status +* `dbserver_logical_cluster_id`: dbserver logical cluster id +* `time_machine_id`: time machine id +* `parent_time_machine_id`: parent time machine id +* `time_zone`: time zone +* `info`: cloned info +* `group_info`: cloned group info +* `metadata`: metadata of clone +* `metric`: Metric of clone +* `category`: category +* `parent_database_id`: parent database id +* `parent_source_database_id`: parent source database id +* `lcm_config`: LCM Config +* `time_machine`: Time machine info +* `dbserver_logical_cluster`: dbserver logical cluster +* `database_nodes`: database nodes associated with database instance +* `linked_databases`: linked databases within database instance +* `databases`: database for a cloned instance +* `database_group_state_info`: database group state info diff --git a/website/docs/d/ndb_clones.html.markdown b/website/docs/d/ndb_clones.html.markdown new file mode 100644 index 000000000..3bccc5f23 --- /dev/null +++ b/website/docs/d/ndb_clones.html.markdown @@ -0,0 +1,82 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_clones" +sidebar_current: "docs-nutanix-datasource-ndb-clones" +description: |- + List all the clone in Nutanix Database Service +--- + +# nutanix_ndb_clones + +List all the clone present in Nutanix Database Service + +## Example Usage + +```hcl + + data "nutanix_ndb_clones" "clones"{ } + + data "nutanix_ndb_clones" "clones"{ + filters{ + detailed= true + } + } + +``` + +## Argument Reference + +* `filters`: (Optional) Fetches the clone info based on given params + +### filters + +* `detailed`: (Optional) Default is false +* `any_status`: (Optional) Default is false +* `load_dbserver_cluster`: (Optional) Default is false +* `timezone`: (Optional) Default is UTC +* `order_by_dbserver_cluster`: (Optional) Default is false +* `order_by_dbserver_logical_cluster`: (Optional) Default is false + + +## Attribute Reference + +* `clones`: List of clones based on filters + +### clones + +* `id`: cloned id +* `name`: cloned name +* `description`: cloned description +* `owner_id`: owner id +* `date_created`: date created for clone +* `date_modified`: last modified date for clone +* `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. +* `properties`: properties of clone +* `clustered`: clustered or not +* `clone`: clone or not +* `era_created`: era created +* `internal`: internal or not +* `placeholder`: placeholder of clone +* `database_name`: database name +* `type`: type +* `database_cluster_type`: database cluster type +* `status`: status of clone +* `database_status`: database status +* `dbserver_logical_cluster_id`: dbserver logical cluster id +* `time_machine_id`: time machine id +* `parent_time_machine_id`: parent time machine id +* `time_zone`: time zone +* `info`: cloned info +* `group_info`: cloned group info +* `metadata`: metadata of clone +* `metric`: Metric of clone +* `category`: category +* `parent_database_id`: parent database id +* `parent_source_database_id`: parent source database id +* `lcm_config`: LCM Config +* `time_machine`: Time machine info +* `dbserver_logical_cluster`: dbserver logical cluster +* `database_nodes`: database nodes associated with database instance +* `linked_databases`: linked databases within database instance +* `databases`: database for a cloned instance +* `database_group_state_info`: database group state info \ No newline at end of file diff --git a/website/docs/d/ndb_snapshot.html.markdown b/website/docs/d/ndb_snapshot.html.markdown new file mode 100644 index 000000000..5048895da --- /dev/null +++ b/website/docs/d/ndb_snapshot.html.markdown @@ -0,0 +1,75 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_snapshot" +sidebar_current: "docs-nutanix-datasource-ndb-snapshot" +description: |- + Describes a snaphot in Nutanix Database Service +--- + +# nutanix_ndb_snapshot + +Describes the snapshot present in Nutanix Database Service + +## Example Usage + +```hcl + + data "nutanix_ndb_snapshot" "snaps"{ + snapshot_id = "{{ snapshot_id }}" + filters { + load_replicated_child_snapshots = true + } + } +``` + +## Argument Reference + +* `snapshot_id`: (Required) Snapshot ID to be given +* `filters`: (Optional) Filters will fetch the snapshot details as per input + +### filters +* `timezone`: (Optional) Default is UTC +* `load_replicated_child_snapshots`: (Optional) load child snapshots. Default is false + +## Attribute Refrence + +* `id`: name of snapshot +* `description`: description of snapshot +* `properties`: properties +* `owner_id`: owner id +* `date_created`: created date +* `date_modified`: modified date +* `properties`: properties +* `tags`: tags +* `snapshot_uuid`: snapshot uuid +* `nx_cluster_id`: nx cluster id +* `protection_domain_id`: protection domain +* `parent_snapshot_id`: parent snapshot id +* `database_node_id`: database node id +* `app_info_version`: App info version +* `status`: status +* `type`: type +* `applicable_types`: Applicable types +* `snapshot_timestamp`: snapshot timeStamp +* `metadata`: metadata of snapshot +* `software_snapshot_id`: software snapshot id +* `software_database_snapshot`: software database snapshot +* `dbserver_storage_metadata_version`: dbserver storage metadata version +* `santised_from_snapshot_id`: sanitized snapshot id +* `timezone`: timezone +* `processed`: processed +* `database_snapshot`: database snapshot +* `from_timestamp`: from timestamp +* `to_timestamp`: to timestamp +* `dbserver_id`: dbserver id +* `dbserver_name`: dbserver name +* `dbserver_ip`:dbserver ip +* `replicated_snapshots`: replicated snapshots +* `software_snapshot`: software snapshot +* `santised_snapshots`:santised snapshots +* `snapshot_family`: snapshot family +* `snapshot_timestamp_date`: snapshot timestamp date +* `lcm_config`: LCM config +* `parent_snapshot`: parent snapshot +* `snapshot_size`: snapshot size + diff --git a/website/docs/d/ndb_snapshots.html.markdown b/website/docs/d/ndb_snapshots.html.markdown new file mode 100644 index 000000000..0f2f94f70 --- /dev/null +++ b/website/docs/d/ndb_snapshots.html.markdown @@ -0,0 +1,77 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_snapshots" +sidebar_current: "docs-nutanix-datasource-ndb-snapshots" +description: |- + List all snaphots in Nutanix Database Service +--- + +# nutanix_ndb_snapshots + +List all snapshots present in Nutanix Database Service + +## Example Usage + +```hcl + + data "nutanix_ndb_snapshots" "snaps"{ } + + data "nutanix_ndb_snapshots" "snaps"{ + filters{ + time_machine_id = "{{ time_machine_id }}" + } + } +``` + +## Argument Reference + +* `filters`: (Optional) filters help to fetch the snapshots based on input + +### filters +* `time_machine_id`: (Optional) Fetches all the snapshots for a given time machine + +## Attribute Reference + +* `snapshots`: List of snapshots + +### snapshots + +* `id`: name of snapshot +* `description`: description of snapshot +* `properties`: properties +* `owner_id`: owner id +* `date_created`: created date +* `date_modified`: modified date +* `properties`: properties +* `tags`: tags +* `snapshot_uuid`: snapshot uuid +* `nx_cluster_id`: nx cluster id +* `protection_domain_id`: protection domain +* `parent_snapshot_id`: parent snapshot id +* `database_node_id`: database node id +* `app_info_version`: App info version +* `status`: status +* `type`: type +* `applicable_types`: Applicable types +* `snapshot_timestamp`: snapshot timeStamp +* `metadata`: metadata of snapshot +* `software_snapshot_id`: software snapshot id +* `software_database_snapshot`: software database snapshot +* `dbserver_storage_metadata_version`: dbserver storage metadata version +* `santised_from_snapshot_id`: sanitized snapshot id +* `timezone`: timezone +* `processed`: processed +* `database_snapshot`: database snapshot +* `from_timestamp`: from timestamp +* `to_timestamp`: to timestamp +* `dbserver_id`: dbserver id +* `dbserver_name`: dbserver name +* `dbserver_ip`:dbserver ip +* `replicated_snapshots`: replicated snapshots +* `software_snapshot`: software snapshot +* `santised_snapshots`:santised snapshots +* `snapshot_family`: snapshot family +* `snapshot_timestamp_date`: snapshot timestamp date +* `lcm_config`: LCM config +* `parent_snapshot`: parent snapshot +* `snapshot_size`: snapshot size diff --git a/website/docs/d/ndb_time_machine.html.markdown b/website/docs/d/ndb_time_machine.html.markdown new file mode 100644 index 000000000..f3d92de6e --- /dev/null +++ b/website/docs/d/ndb_time_machine.html.markdown @@ -0,0 +1,59 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_time_machine" +sidebar_current: "docs-nutanix-datasource-ndb-time_machine" +description: |- + Describes a time machine in Nutanix Database Service +--- + +# nutanix_ndb_time_machine + + Describes a time machine present in Nutanix Database Service + +## Example Usage + +```hcl + + data "nutanix_ndb_time_machine" "tm"{ + time_machine_id = {{ time_machine_id }} + } + +``` + +## Argument Reference + +* `time_machine_id`: (Optional) time machine id +* `time_machine_name`:(Optional) time machine name + +## Attribute Reference + +* `id`: time machine id +* `name`: time machine name +* `description`: time machine description +* `owner_id`: owner id +* `date_created`: date created +* `date_modified`: date modified +* `access_level`: access level to time machines +* `properties`: properties of time machines +* `tags`: tags +* `clustered`: clustered or not +* `clone`: clone time machine or not +* `internal`: internal +* `database_id`: database id +* `type`: type of time machine +* `category`: category of time machine +* `status`: status of time machine +* `ea_status`: ea status of time machine +* `scope`: scope +* `sla_id`: sla id +* `schedule_id`: schedule id +* `database`: database info +* `clones`: clone info +* `source_nx_clusters`: source clusters +* `sla_update_in_progress`: sla update in progress +* `metric`: Metric info +* `sla_update_metadata`: sla update metadata +* `sla`: sla info +* `schedule`: schedule info +* `metadata`: metadata info + diff --git a/website/docs/d/ndb_time_machine_capability.html.markdown b/website/docs/d/ndb_time_machine_capability.html.markdown new file mode 100644 index 000000000..40cac24c9 --- /dev/null +++ b/website/docs/d/ndb_time_machine_capability.html.markdown @@ -0,0 +1,44 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_tms_capability" +sidebar_current: "docs-nutanix-datasource-ndb-tms-capability" +description: |- + Describes a time machine in Nutanix Database Service +--- + +# nutanix_ndb_tms_capability + + Describes a time machine present in Nutanix Database Service + +## Example Usage + +```hcl + + data "nutanix_ndb_tms_capability" "tms_cap"{ + time_machine_id = {{ timeMachine_ID }} + } + +``` + +## Argument Reference + +* `time_machine_id`: (Required) Time machine Id + +## Attribute Reference + +* `output_time_zone`: output time zone +* `type`: type of tms +* `nx_cluster_id`: cluster id where time machine is present +* `source`: source of time machine +* `nx_cluster_association_type`: cluster association +* `sla_id`: SLA id +* `overall_continuous_range_end_time`: continuous range end time info +* `last_continuous_snapshot_time`: last continuous snapshot time +* `log_catchup_start_time`: log catchup start time +* `heal_with_reset_capability`: heal with reset capability +* `database_ids`: database ids +* `log_time_info`: log time info +* `capability`: capability info +* `capability_reset_time`: capability reset time +* `last_db_log`: last db log info +* `last_continuous_snapshot`: last continuous snapshot info \ No newline at end of file diff --git a/website/docs/d/ndb_time_machines.html.markdown b/website/docs/d/ndb_time_machines.html.markdown new file mode 100644 index 000000000..c6ddc0151 --- /dev/null +++ b/website/docs/d/ndb_time_machines.html.markdown @@ -0,0 +1,55 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_time_machines" +sidebar_current: "docs-nutanix-datasource-ndb-time-machines" +description: |- + List all time machines in Nutanix Database Service +--- + +# nutanix_ndb_time_machines + +List all time machines present in Nutanix Database Service + +## Example Usage + +```hcl + + data "nutanix_ndb_time_machines" "tms" {} + +``` + +## Argument Reference + +* `time_machines`: List of all time machines in NDB + +### time machines + +* `id`: time machine id +* `name`: time machine name +* `description`: time machine description +* `owner_id`: owner id +* `date_created`: date created +* `date_modified`: date modified +* `access_level`: access level to time machines +* `properties`: properties of time machines +* `tags`: tags +* `clustered`: clustered or not +* `clone`: clone time machine or not +* `internal`: internal +* `database_id`: database id +* `type`: type of time machine +* `category`: category of time machine +* `status`: status of time machine +* `ea_status`: ea status of time machine +* `scope`: scope +* `sla_id`: sla id +* `schedule_id`: schedule id +* `database`: database info +* `clones`: clone info +* `source_nx_clusters`: source clusters +* `sla_update_in_progress`: sla update in progress +* `metric`: Metric info +* `sla_update_metadata`: sla update metadata +* `sla`: sla info +* `schedule`: schedule info +* `metadata`: metadata info diff --git a/website/docs/r/ndb_authorize_dbserver.html.markdown b/website/docs/r/ndb_authorize_dbserver.html.markdown new file mode 100644 index 000000000..55322dfa7 --- /dev/null +++ b/website/docs/r/ndb_authorize_dbserver.html.markdown @@ -0,0 +1,28 @@ +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_authorize_dbserver" +sidebar_current: "docs-nutanix-resource-ndb-authorize-dbserver" +description: |- + This operation submits a request to authorize db server VMs for cloning of the database instance in Nutanix database service (NDB). +--- + +# nutanix_ndb_authorize_dbserver + +Provides a resource to authorize db server VMs for cloning of database instance based on the input parameters. + +## Example Usage + +```hcl + + resource "nutanix_ndb_authorize_dbserver" "name" { + time_machine_name = "test-pg-inst" + dbservers_id = [ + "{{ dbServer_IDs}}" + ] + } +``` + +## Arguments Reference + +* `time_machine_id`: (Optional) +* `time_machine_name`: (Optional) +* `dbservers_id `: (Required) diff --git a/website/docs/r/ndb_clone.html.markdown b/website/docs/r/ndb_clone.html.markdown new file mode 100644 index 000000000..3409f2c50 --- /dev/null +++ b/website/docs/r/ndb_clone.html.markdown @@ -0,0 +1,122 @@ +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_clone" +sidebar_current: "docs-nutanix-resource-ndb-clone" +description: |- + This operation submits a request to perform clone of the database instance in Nutanix database service (NDB). +--- + +# nutanix_ndb_clone + +Provides a resource to perform the clone for database instance based on the input parameters. + +## Example Usage + +```hcl +## resource for ndb_clone with Point in time given time machine name + + resource "nutanix_ndb_clone" "name" { + time_machine_name = "test-pg-inst" + name = "test-inst-tf-check" + nx_cluster_id = "{{ nx_Cluster_id }}" + ssh_public_key = "{{ sshkey }}" + user_pitr_timestamp= "{{ point_in_time }}" + time_zone = "Asia/Calcutta" + create_dbserver = true + compute_profile_id = "{{ compute_profile_id }}" + network_profile_id ="{{ network_profile_id }}" + database_parameter_profile_id = "{{ databse_profile_id }}" + nodes{ + vm_name= "test_vm_clone" + compute_profile_id = "{{ compute_profile_id }}" + network_profile_id ="{{ network_profile_id }}" + nx_cluster_id = "{{ nx_Cluster_id }}" + } + postgresql_info{ + vm_name="test_vm_clone" + db_password= "pass" + } + } +``` + +## Argument Reference + +* `time_machine_id`: (Optional) time machine id +* `time_machine_name`: (Optional) time machine name +* `snapshot_id`: (Optional) snapshot id from where clone is created +* `user_pitr_timestamp`:(Optional) point in time for clone to be created +* `time_zone`:(Optional) timezone +* `node_count`: Node count. Default is 1 for single instance +* `nodes`: Nodes contain info about dbservers vm +* `lcm_config`: LCM Config contains the expiry details and refresh details +* `name`: Clone name +* `description`: Clone description +* `nx_cluster_id`: cluster id on where clone will be present +* `ssh_public_key`: ssh public key +* `compute_profile_id`: specify the compute profile id +* `network_profile_id`: specify the network profile id +* `database_parameter_profile_id`: specify the database parameter profile id +* `vm_password`: vm password +* `create_dbserver`: create new dbserver +* `clustered`: clone will be clustered or not +* `dbserver_id`: Specify if you want to create a database server. This value can be set to true or false as required. +* `dbserver_cluster_id`: dbserver cluster id +* `dbserver_logical_cluster_id`: dbserver logical cluster id +* `latest_snapshot`: latest snapshot +* `postgresql_info`: postgresql info for the clone +* `actionarguments`: (Optional) if any action arguments is required + +### nodes + +* `vm_name`: name for the database server VM. +* `compute_profile_id`: specify compute profile id +* `network_profile_id`: specify network profile id +* `new_db_server_time_zone`: dbserver time zone +* `nx_cluster_id`: cluster id +* `properties`: properties of vm +* `dbserver_id`: dberver id + +### postgresql_info + +* `vm_name`: name for the database server VM. +* `dbserver_description`: description for the dbserver. +* `db_password`: password of the postgres superuser. +* `pre_clone_cmd`: OS command that you want to run before the instance is created. +* `post_clone_cmd`: OS command that you want to run after the instance is created. + +### actionarguments + +Structure for each action argument in actionarguments list: + +* `name`: - (Required) name of argument +* `value`: - (Required) value for argument + + +## Attributes Reference + +* `owner_id`: owner id +* `date_created`: date created for clone +* `date_modified`: last modified date for clone +* `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. +* `clone`: cloned or not +* `era_created`: era created or not +* `internal`: internal +* `placeholder`: placeholder +* `database_name`: database name +* `type`: type of clone +* `database_cluster_type`: database cluster type +* `status`: status of clone +* `database_status`: database status +* `info`: info of clone +* `group_info`: group info of clone +* `metadata`: metadata about clone +* `metric`: Stores storage info regarding size, allocatedSize, usedSize and unit of calculation that seems to have been fetched from PRISM. +* `category`:category of clone +* `parent_database_id`: parent database id +* `parent_source_database_id`: parent source database id +* `dbserver_logical_cluster`: dbserver logical cluster +* `database_nodes`: database nodes associated with database instance +* `linked_databases`: linked databases within database instance + + +See detailed information in [NDB Database Instance](https://www.nutanix.dev/api_references/ndb/#/9a50106e42347-create-clone-using-given-time-machine) . + diff --git a/website/docs/r/ndb_database.html.markdown b/website/docs/r/ndb_database.html.markdown index a266905d1..b134bfd0a 100644 --- a/website/docs/r/ndb_database.html.markdown +++ b/website/docs/r/ndb_database.html.markdown @@ -95,6 +95,173 @@ resource "nutanix_ndb_database" "dbp" { } } } + + +// resource to provision HA instance + +resource "nutanix_ndb_database" "dbp" { + databasetype = "postgres_database" + name = "test-pg-inst-HA-tf" + description = "adding description" + + // adding the profiles details + softwareprofileid = "{{ software_profile_id }}" + softwareprofileversionid = "{{ software_profile_version_id }}" + computeprofileid = "{{ compute_profile_id }}" + networkprofileid = "{{ network_profile_id }}" + dbparameterprofileid = "{{ db_parameter_profile_id }}" + + // required for HA instance + createdbserver = true + clustered = true + + // node count (with haproxy server node) + nodecount= 4 + + // min required details for provisioning HA instance + postgresql_info{ + listener_port = "5432" + + database_size= "200" + + db_password = "{{ database password}}" + + database_names= "testdb1" + + ha_instance{ + proxy_read_port= "5001" + + proxy_write_port = "5000" + + cluster_name= "{{ cluster_name }}" + + patroni_cluster_name = " {{ patroni_cluster_name }}" + } + } + + nxclusterid= "1c42ca25-32f4-42d9-a2bd-6a21f925b725" + sshpublickey= "{{ ssh_public_key }}" + + // nodes are required. + + // HA proxy node + nodes{ + properties{ + name = "node_type" + value = "haproxy" + } + vmname = "{{ vm name }}" + nx_cluster_id = "{{ nx_cluster_id }}" + } + + // Primary node for read/write ops + nodes{ + properties{ + name= "role" + value= "Primary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + + vmname = "{{ name of vm }}" + networkprofileid="{{ network_profile_id }}" + computeprofileid= "{{ compute_profile_id }}" + nx_cluster_id= "{{ nx_cluster_id }}" + } + + // secondary nodes for read ops + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + vmname = "{{ name of vm }}" + networkprofileid="{{ network_profile_id }}" + computeprofileid= "{{ compute_profile_id }}" + nx_cluster_id= "{{ nx_cluster_id }}" + } + nodes{ + properties{ + name= "role" + value= "Secondary" + } + properties{ + name= "failover_mode" + value= "Automatic" + } + properties{ + name= "node_type" + value= "database" + } + + vmname = "{{ name of vm }}" + networkprofileid="{{ network_profile_id }}" + computeprofileid= "{{ compute_profile_id }}" + nx_cluster_id= "{{ nx_cluster_id }}" + } + + // time machine required + timemachineinfo { + name= "test-pg-inst-HA" + description="" + sla_details{ + primary_sla{ + sla_id= "{{ required SLA}}0" + nx_cluster_ids= [ + "{{ nx_cluster_id}}" + ] + } + } + // schedule fields are optional + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + + vm_password= "{{ vm_password}}" + autotunestagingdrive= true +} ``` ## Argument Reference @@ -138,6 +305,9 @@ Each node in nodes supports the following: * `vmname`: - (Required) name of vm * `networkprofileid`: - (Required) network profile ID * `dbserverid`: - (Optional) Database server ID required for existing VM +* `ip_infos` :- (Optional) IP infos for custom network profile. +* `computeprofileid` :- (Optional) compute profile id +* `nx_cluster_id` :- (Optional) cluster id. ### timemachineinfo @@ -145,11 +315,19 @@ The timemachineinfo attribute supports the following: * `name`: - (Required) name of time machine * `description`: - (Optional) description of time machine -* `slaid`: - (Required) SLA ID +* `slaid`: - (Optional) SLA ID for single instance +* `sla_details`:- (optional) SLA details for HA instance * `autotunelogdrive`: - (Optional) enable auto tune log drive. Default: true * `schedule`: - (Optional) schedule for snapshots * `tags`: - (Optional) tags +### sla_details + +* `primary_sla`:- (Required) primary sla details +* `primary_sla.sla_id` :- (Required) sla id +* `primary_sla.nx_cluster_ids` -: (Optioanl) cluster ids + + ### schedule The schedule attribute supports the following: @@ -221,6 +399,22 @@ The postgresql_info attribute supports the following: * `db_password`: - (Required) database instance password * `pre_create_script`: - (Optional) pre instance create script * `post_create_script`: - (Optional) post instance create script +* `ha_instance` :- (Optional) High Availability instance + +### ha_instance + +* `cluster_name` :- (Required) cluster name +* `patroni_cluster_name` :- (Required) patroni cluster name +* `proxy_read_port` :- (Required) proxy read port +* `proxy_write_port` :- (Required) proxy write port +* `provision_virtual_ip` :- (Optional) provisional virtual ip. Default is set to true +* `deploy_haproxy` :- (Optional) HA proxy node. Default is set to false +* `enable_synchronous_mode` :- (Optional) enable synchronous mode. Default is set to true +* `failover_mode` :- (Optional) failover mode of nodes. +* `node_type` :- (Optional) node type of instance. Default is set to database +* `archive_wal_expire_days` :- (Optional) archive wal expire days. Default is set to -1 +* `backup_policy` :- (Optional) backup policy for instance. Default is "primary_only" +* `enable_peer_auth` :- (Optional) enable peer auth . Default is set to false. ## lifecycle diff --git a/website/docs/r/ndb_database_restore.html.markdown b/website/docs/r/ndb_database_restore.html.markdown new file mode 100644 index 000000000..76ebc1ec3 --- /dev/null +++ b/website/docs/r/ndb_database_restore.html.markdown @@ -0,0 +1,78 @@ +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_database_restore" +sidebar_current: "docs-nutanix-resource-ndb-database-restore" +description: |- + Restoring allows you to restore a source instance registered with NDB to a snapshot or point in time supported by the source instance time machine. You can restore an instance by using a snapshot ID, the point-in-time recovery (PITR) timestamp, or the latest snapshot. + This operation submits a request to restore the database instance in Nutanix database service (NDB). +--- + +# nutanix_ndb_database_restore + +Provides a resource to restore the database instance based on the input parameters. + +## Example Usage + +```hcl + // resource to database restore with Point in Time + + resource "nutanix_ndb_database_restore" "name" { + database_id= "{{ database_id }}" + user_pitr_timestamp = "2022-12-28 00:54:30" + time_zone_pitr = "Asia/Calcutta" + } + + // resource to database restore with snapshot uuid + + resource "nutanix_ndb_database_restore" "name" { + database_id= "{{ database_id }}" + snapshot_id= "{{ snapshot id }}" + } +``` + +## Argument Reference + +* `database_id`: (Required) database id +* `snapshot_id`: (Optional) snapshot id from you want to use for restoring the instance +* `latest_snapshot`: (Optional) latest snapshot id +* `user_pitr_timestamp`: (Optional) the time to which you want to restore your instance. +* `time_zone_pitr`: (Optional) timezone . Should be used with `user_pitr_timestamp` +* `restore_version`: (Optional) helps to restore the database with same config. + +## Attributes Reference + +* `name`: Name of database instance +* `description`: description of database instance +* `databasetype`: type of database +* `properties`: properties of database created +* `owner_id`: owner id +* `date_created`: date created for db instance +* `date_modified`: date modified for instance +* `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. +* `clone`: whether instance is cloned or not +* `era_created`: whether era created or not +* `internal`: is internal +* `placeholder`: placeholder +* `database_name`: name of database +* `type`: type of database +* `database_cluster_type`: database cluster type +* `status`: status of instance +* `database_status`: status of database +* `dbserver_logical_cluster_id`: dbserver logical cluster id +* `time_machine_id`: time machine id of instance +* `parent_time_machine_id`: parent time machine id +* `time_zone`: timezone on which instance is created xw +* `info`: info of instance +* `group_info`: group info of instance +* `metadata`: metadata of instance +* `metric`: Stores storage info regarding size, allocatedSize, usedSize and unit of calculation that seems to have been fetched from PRISM. +* `category`: category of instance +* `parent_database_id`: parent database id +* `parent_source_database_id`: parent source database id +* `lcm_config`: LCM config of instance +* `time_machine`: Time Machine details of instance +* `dbserver_logical_cluster`: dbserver logical cluster +* `database_nodes`: database nodes associated with database instance +* `linked_databases`: linked databases within database instance + + +See detailed information in [NDB Database Restore](https://www.nutanix.dev/api_references/ndb/#/90eb0c2f2da21-restore-database) . \ No newline at end of file diff --git a/website/docs/r/ndb_database_scale.html.markdown b/website/docs/r/ndb_database_scale.html.markdown new file mode 100644 index 000000000..6c751ccea --- /dev/null +++ b/website/docs/r/ndb_database_scale.html.markdown @@ -0,0 +1,70 @@ +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_database_scale" +sidebar_current: "docs-nutanix-resource-ndb-database-scale" +description: |- + Scaling the database extends the storage size proportionally across the attached virtual disks or volume groups. Scaling is supported for both single and HA instances. + This operation submits a request to scale out the database instance in Nutanix database service (NDB). +--- + +# nutanix_ndb_database_scale + +Provides a resource to scale the database instance based on the input parameters. + +## Example Usage + +```hcl + + // resource to scale the database + + resource "nutanix_ndb_database_scale" "scale" { + application_type = "{{ Application Type }}" + database_uuid = "{{ database_id }}" + data_storage_size = 1 + } +``` + +## Argument Reference + +* `database_uuid`: (Required) Database id +* `application_type`: (Required) type of instance. eg: postgres_database +* `data_storage_size`: (Required) data area (in GiB) to be added to the existing database. +* `pre_script_cmd`: (Optional) pre script command +* `post_script_cmd`: (Optional) post script command +* `scale_count`: (Optional) scale count helps to scale the same instance with same config + + +## Attributes Reference + +* `name`: Name of database instance +* `description`: description of database instance +* `databasetype`: type of database +* `properties`: properties of database created +* `owner_id`: owner id +* `date_created`: date created for db instance +* `date_modified`: date modified for instance +* `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. +* `clone`: whether instance is cloned or not +* `era_created`: whether era created or not +* `internal`: is internal +* `placeholder`: placeholder +* `database_name`: name of database +* `type`: type of database +* `database_cluster_type`: database cluster type +* `status`: status of instance +* `database_status`: status of database +* `dbserver_logical_cluster_id`: dbserver logical cluster id +* `time_machine_id`: time machine id of instance +* `parent_time_machine_id`: parent time machine id +* `time_zone`: timezone on which instance is created xw +* `info`: info of instance +* `group_info`: group info of instance +* `metadata`: metadata of instance +* `metric`: Stores storage info regarding size, allocatedSize, usedSize and unit of calculation that seems to have been fetched from PRISM. +* `category`: category of instance +* `parent_database_id`: parent database id +* `parent_source_database_id`: parent source database id +* `lcm_config`: LCM config of instance +* `time_machine`: Time Machine details of instance +* `dbserver_logical_cluster`: dbserver logical cluster +* `database_nodes`: database nodes associated with database instance +* `linked_databases`: linked databases within database instance diff --git a/website/docs/r/ndb_database_snapshot.html.markdown b/website/docs/r/ndb_database_snapshot.html.markdown new file mode 100644 index 000000000..11f81d3ad --- /dev/null +++ b/website/docs/r/ndb_database_snapshot.html.markdown @@ -0,0 +1,87 @@ +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_database_snapshot" +sidebar_current: "docs-nutanix-resource-ndb-database-snapshot" +description: |- + NDB time machine allows you to capture and replicate snapshots of the source database across multiple clusters (as defined in the DAM policy) at the time and frequency specified in the schedule. + This operation submits a request to perform snapshot of the database instance in Nutanix database service (NDB). +--- + +# nutanix_ndb_database_snapshot + +Provides a resource to perform the snapshot for database instance based on the input parameters. + +## Example Usage + +```hcl + // resource to create snapshot with time machine id + + resource "nutanix_ndb_database_snapshot" "name" { + time_machine_id = "{{ tms_ID }}" + name = "test-snap" + remove_schedule_in_days = 1 + } + + // resource to craete snapshot with time machine name + + resource "nutanix_ndb_database_snapshot" "name" { + time_machine_name = "{{ tms_name }}" + name = "test-snap" + remove_schedule_in_days = 1 + } + +``` + +## Argument Reference + +* `time_machine_id`: (Optional) Time Machine Id +* `time_machine_name`:(Optional) Time Machine Name +* `name`: (Required) Snapshot name +* `remove_schedule_in_days`: (Optional) Removal schedule after which the snapshot should be removed. +* `expiry_date_timezone`: (Optional) Default is set to Asia/Calcutta +* `replicate_to_clusters`: (Optional) snapshots to be replicated to clusters. + + +## Attributes Reference + +* `id`: name of snapshot +* `description`: description of snapshot +* `properties`: properties +* `owner_id`: owner id +* `date_created`: created date +* `date_modified`: modified date +* `properties`: properties +* `tags`: tags +* `snapshot_uuid`: snapshot uuid +* `nx_cluster_id`: nx cluster id +* `protection_domain_id`: protection domain +* `parent_snapshot_id`: parent snapshot id +* `database_node_id`: database node id +* `app_info_version`: App info version +* `status`: status +* `type`: type +* `applicable_types`: Applicable types +* `snapshot_timestamp`: snapshot timeStamp +* `metadata`: metadata of snapshot +* `software_snapshot_id`: software snapshot id +* `software_database_snapshot`: software database snapshot +* `dbserver_storage_metadata_version`: dbserver storage metadata version +* `santised_from_snapshot_id`: sanitized snapshot id +* `timezone`: timezone +* `processed`: processed +* `database_snapshot`: database snapshot +* `from_timestamp`: from timestamp +* `to_timestamp`: to timestamp +* `dbserver_id`: dbserver id +* `dbserver_name`: dbserver name +* `dbserver_ip`:dbserver ip +* `replicated_snapshots`: replicated snapshots +* `software_snapshot`: software snapshot +* `santised_snapshots`:santised snapshots +* `snapshot_family`: snapshot family +* `snapshot_timestamp_date`: snapshot timestamp date +* `lcm_config`: LCM config +* `parent_snapshot`: parent snapshot +* `snapshot_size`: snapshot size + + +See detailed information in [NDB Database Snapshot](https://www.nutanix.dev/api_references/ndb/#/7f53689342db9-take-snapshot) . \ No newline at end of file diff --git a/website/docs/r/ndb_log_catchups.html.markdown b/website/docs/r/ndb_log_catchups.html.markdown new file mode 100644 index 000000000..acae3974e --- /dev/null +++ b/website/docs/r/ndb_log_catchups.html.markdown @@ -0,0 +1,33 @@ +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_log_catchups" +sidebar_current: "docs-nutanix-resource-ndb-log-catchups" +description: |- + A log catch-up operation copies transaction logs from the source database based on a specified schedule. The schedule can be provided during database registration or provisioning or can be modified later. + This operation submits a request to perform log catchups of the database instance in Nutanix database service (NDB). +--- + +# nutanix_ndb_log_catchups + +Provides a resource to perform the log cactup for database instance based on the input parameters. + +## Example Usage + +```hcl + resource "nutanix_ndb_log_catchups" "name" { + time_machine_id = "{{ timeMachineID }}" + } + + resource "nutanix_ndb_log_catchups" "name" { + database_id = "{{ DatabaseID }}" + } +``` + +## Argument Reference + +* `time_machine_id`: (Optional) Time machine id of +* `database_id`: (Optional) +* `for_restore`: (Optional) Logs to Backup. The database may contain additional logs. Backup any remaining logs before restore or they will be lost. +* `log_catchup_version`: (Optional) it helps to perform same operation with same config. + + +See detailed information in [NDB Log Catchups](https://www.nutanix.dev/api_references/ndb/#/6f54fedbb2e07-start-log-catchup-for-given-time-machine) . \ No newline at end of file diff --git a/website/docs/r/ndb_profiles.html.markdown b/website/docs/r/ndb_profiles.html.markdown new file mode 100644 index 000000000..00055d47b --- /dev/null +++ b/website/docs/r/ndb_profiles.html.markdown @@ -0,0 +1,127 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_profile" +sidebar_current: "docs-nutanix-resource-ndb-profile" +description: |- + This operation submits a request to create, update and delete profiles in Nutanix database service (NDB). + Note: For 1.8.0-beta.2 release, only postgress database type is qualified and officially supported. +--- + +# nutanix_ndb_profile + +Provides a resource to create profiles (Software, Network, Database Paramter, Compute) based on the input parameters. + +## Example Usage + +```hcl + + // resource to create compute profile + + resource "nutanix_ndb_profile" "computeProfile" { + name = "compute-tf" + description = "tf added compute" + compute_profile{ + cpus = 1 + core_per_cpu = 2 + memory_size = 2 + } + published= true + } + + + // resource to create database parameter profile + + resource "nutanix_ndb_database_parameter_profile" "dbProfile" { + name= "dbParams-tf" + description = "database description" + engine_type = "postgres_database" + + // optional args for engine type else will set to default values + postgres_database { + max_connections = "100" + max_replication_slots = "10" + } + } + + + // resource to create Postgres Database Single Instance Network profile + + resource "nutanix_ndb_profile" "networkProfile" { + name = "tf-net" + description = "terraform created" + engine_type = "postgres_database" + network_profile{ + topology = "single" + postgres_database{ + single_instance{ + vlan_name = "vlan.154" + } + } + } + published = true + } + + + // resource to create Postgres Database HA Instance Network profile + + resource "nutanix_ndb_profile" "networkProfile" { + name = "tf-net" + description = "terraform created" + engine_type = "postgres_database" + network_profile{ + topology = "cluster" + postgres_database{ + ha_instance{ + num_of_clusters= "1" + vlan_name = ["{{ vlanName }}"] + cluster_name = ["{{ ClusterName }}"] + } + } + } + published = true + } + + + // resource to create Software Profile + + resource "nutanix_ndb_profile" "softwareProfile" { + name= "test-software" + description = "description" + engine_type = "postgres_database" + software_profile { + topology = "single" + postgres_database{ + source_dbserver_id = "{{ source_dbserver_id }}" + base_profile_version_name = "test1" + base_profile_version_description= "test1 desc" + } + available_cluster_ids= ["{{ cluster_ids }}"] + } + published = true + } +``` + +## Argument Reference +* `name` : (Required) Name of profile +* `description` : (Optional) Description of profile +* `engine_type` : Engine Type of database +* `published` : (Optional) Publish for all users +* `compute_profile`: (Optional) Compute Profile +* `software_profile` : (Optional) Software Profile +* `network_profile` : (Optional) Network Profile +* `database_parameter_profile`: (Optional) Database Parameter Profile + +## Attributes Reference + +* `status`: status of profile +* `owner`: owner of profile +* `latest_version`: latest version of profile +* `latest_version_id`: latest version id of profile +* `versions`: versions of associated profiles +* `nx_cluster_id`: cluster on which profile created +* `assoc_databases`: associated databases of profiles +* `assoc_db_servers`: associated database servers for associated profiles +* `cluster_availability`: cluster availability of profile + + +See detailed information in [NDB Profiles](https://www.nutanix.dev/api_references/ndb/#/a626231269b79-create-a-profile) . diff --git a/website/docs/r/ndb_register_database.html.markdown b/website/docs/r/ndb_register_database.html.markdown new file mode 100644 index 000000000..d10b8d2ff --- /dev/null +++ b/website/docs/r/ndb_register_database.html.markdown @@ -0,0 +1,268 @@ +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_register_database" +sidebar_current: "docs-nutanix-resource-ndb-database-register" +description: |- + It helps to register a source (production) database running on a Nutanix cluster with NDB. When you register a database with NDB, the database server VM (VM that hosts the source database) is also registered with NDB. After you have registered a database with NDB, a time machine is created for that database. + This operation submits a request to register the database in Nutanix database service (NDB). +--- + +# nutanix_ndb_database_scale + +Provides a resource to register the database based on the input parameters. + +## Example Usage + +```hcl + + // register PostgreSQL database with registered DBServer VM + + resource "nutanix_ndb_register_database" "name" { + database_type = "postgres_database" + database_name= "test-inst" + description = "added by terraform" + category = "DEFAULT" + + // registered vm IP + vm_ip = "{{ vm_ip }}" + + // optional + working_directory= "/tmp" + + reset_description_in_nx_cluster= false + + // time Machine Info + time_machine_info { + name= "test-pg-inst-regis" + description= "description of tms" + slaid=" {{ SLA ID}}" + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + postgress_info{ + + // required args + listener_port= "5432" + db_password ="pass" + db_name= "testdb1" + + // Optional with default values + db_user= "postgres" + backup_policy= "prefer_secondary" + postgres_software_home= "{{ directory where the PostgreSQL database software is installed.}}" + software_home= "{{ directory where the PostgreSQL database software is installed. }}" + + } + } + + + // register PostgreSQL database with instance not registered on VM + resource "nutanix_ndb_register_database" "name" { + database_type = "postgres_database" + database_name= "test-inst" + description = "added by terraform" + category = "DEFAULT" + nx_cluster_id = "{{ cluster_ID }}" + + // registered vm info + vm_ip = "{{ vm_ip }}" + vm_username = "{{ vm_username }}" + vm_password = "{{ vm_password }}" + + // optional + working_directory= "/tmp" + + reset_description_in_nx_cluster= false + + // time Machine Info + time_machine_info { + name= "test-pg-inst-regis" + description= "description of tms" + slaid=" {{ SLA ID}}" + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + postgress_info{ + + // required args + listener_port= "5432" + db_password ="pass" + db_name= "testdb1" + + // Optional with default values + db_user= "postgres" + backup_policy= "prefer_secondary" + postgres_software_home= "{{ directory where the PostgreSQL database software is installed }}" + } + } + +``` + + +## Argument Reference + +* `database_type`: (Required) type of database +* `database_name`: (Required) name of database +* `description`: (Optional) description +* `clustered`: (Optional) clustered or not. Default is false +* `forced_install`: (Optional) forced install. Default: true +* `category`: (Optional) category of database. Default is "DEFAULT" +* `vm_ip`: (Required) IP address of dbserver VM +* `vm_username`: (Optional) username of the NDB drive user account that has sudo access. +* `vm_password`: (Optional) password of the NDB drive user account. +* `vm_sshkey`: (Optional) ssh key for vm +* `vm_description`: (Optional) description for VM +* `nx_cluster_id`:(Optional) cluster on which NDB is present +* `reset_description_in_nx_cluster`: (Optional) Reset description in cluster +* `auto_tune_staging_drive`: (Optional) auto tune staging drive. Default is true +* `working_directory`: (Optional) working directory. Default is /tmp +* `time_machine_info`: (Required) Time Machine info +* `tags`: (Optional) tags +* `actionarguments`: (Optional) action arguments +* `postgress_info`: (Optional) Postgress_Info for registering. + +### postgress_info + +* `listener_port`: (Required) listner port of database +* `db_password`: (Required) database password +* `db_name`: (Required) name of the database server VM on which the instance you want to register is running. +* `db_user`: (Optional) username of the NDB drive user account that has sudo access. +* `switch_log`: (Optional) switch log of database. Default is true +* `allow_multiple_databases`: (Optional) allow multiple databases. Default is true +* `backup_policy`: (Optional) backup policy of database. Default is prefer_secondary. +* `vm_ip`: (Optional) VM IP of the database server VM on which the instance you want to register is running. +* `postgres_software_home`: (Required) path to the PostgreSQL home directory in which the PostgreSQL software is installed. +* `software_home`: (Optional) path to the directory in which the PostgreSQL software is installed. + +### time_machine_info + +The timemachineinfo attribute supports the following: + +* `name`: - (Required) name of time machine +* `description`: - (Optional) description of time machine +* `slaid`: - (Optional) SLA ID for single instance +* `sla_details`:- (optional) SLA details for HA instance +* `autotunelogdrive`: - (Optional) enable auto tune log drive. Default: true +* `schedule`: - (Optional) schedule for snapshots +* `tags`: - (Optional) tags + +### sla_details + +* `primary_sla`:- (Required) primary sla details +* `primary_sla.sla_id` :- (Required) sla id +* `primary_sla.nx_cluster_ids` -: (Optioanl) cluster ids + + +### schedule + +The schedule attribute supports the following: + +* `snapshottimeofday`: - (Optional) daily snapshot config +* `continuousschedule`: - (Optional) snapshot freq and log config +* `weeklyschedule`: - (Optional) weekly snapshot config +* `monthlyschedule`: - (Optional) monthly snapshot config +* `quartelyschedule`: - (Optional) quaterly snapshot config +* `yearlyschedule`: - (Optional) yearly snapshot config + + +### actionarguments + +Structure for each action argument in actionarguments list: + +* `name`: - (Required) name of argument +* `value`: - (Required) value for argument + + +## Attributes Reference + +* `name`: Name of database instance +* `description`: description of database instance +* `databasetype`: type of database +* `properties`: properties of database created +* `owner_id`: owner id +* `date_created`: date created for db instance +* `date_modified`: date modified for instance +* `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. +* `clone`: whether instance is cloned or not +* `era_created`: whether era created or not +* `internal`: is internal +* `placeholder`: placeholder +* `database_name`: name of database +* `type`: type of database +* `database_cluster_type`: database cluster type +* `status`: status of instance +* `database_status`: status of database +* `dbserver_logical_cluster_id`: dbserver logical cluster id +* `time_machine_id`: time machine id of instance +* `parent_time_machine_id`: parent time machine id +* `time_zone`: timezone on which instance is created xw +* `info`: info of instance +* `group_info`: group info of instance +* `metadata`: metadata of instance +* `metric`: Stores storage info regarding size, allocatedSize, usedSize and unit of calculation that seems to have been fetched from PRISM. +* `category`: category of instance +* `parent_database_id`: parent database id +* `parent_source_database_id`: parent source database id +* `lcm_config`: LCM config of instance +* `time_machine`: Time Machine details of instance +* `dbserver_logical_cluster`: dbserver logical cluster +* `database_nodes`: database nodes associated with database instance +* `linked_databases`: linked databases within database instance + + +See detailed information in [NDB Register Database](https://www.nutanix.dev/api_references/ndb/#/00afd58e263e8-register-an-existing-database). \ No newline at end of file diff --git a/website/docs/r/ndb_sla.html.markdown b/website/docs/r/ndb_sla.html.markdown new file mode 100644 index 000000000..4e2417d14 --- /dev/null +++ b/website/docs/r/ndb_sla.html.markdown @@ -0,0 +1,52 @@ +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_sla" +sidebar_current: "docs-nutanix-resource-ndb-sla" +description: |- + SLAs are data retention policies that allow you to specify how long the daily, weekly, monthly, and quarterly snapshots are retained in NDB. This operation submits a request to create, update and delete slas in Nutanix database service (NDB). +--- + +# nutanix_ndb_sla + +Provides a resource to create SLAs based on the input parameters. + +## Example Usage + +```hcl + + resource "nutanix_ndb_sla" "sla" { + name= "test-sla" + description = "here goes description" + + // Rentention args are optional with default values + continuous_retention = 30 + daily_retention = 3 + weekly_retention = 2 + monthly_retention= 1 + quarterly_retention=1 + } +``` + + +## Argument Reference +* `name` : (Required) Name of profile +* `description` : (Optional) Description of profile +* `continuous_retention`: (Optional) Duration in days for which transaction logs are retained in NDB. +* `daily_retention`: (Optional) Duration in days for which a daily snapshot must be retained in NDB. +* `weekly_retention`: (Optional) Duration in weeks for which a weekly snapshot must be retained in NDB. +* `monthly_retention`: (Optional) Duration in months for which a monthly snapshot must be retained in NDB +* `quarterly_retention`: (Optional) Duration in number of quarters for which a quarterly snapshot must be retained in NDB. +* `yearly_retention`: (Optional) Not supported as of now. + +## Attributes Reference + +* `unique_name`: name of sla +* `owner_id`: owner id +* `system_sla`: refers whether sla is custom or built-in +* `date_created`: sla created data +* `date_modified`: sla last modified date +* `reference_count`: reference count +* `pitr_enabled`: pitr enabled +* `current_active_frequency`: slas current frequency + + +See detailed information in [NDB SLA](https://www.nutanix.dev/api_references/ndb/#/a0c17eca8b34f-create-sla-from-ndb-service). diff --git a/website/docs/r/ndb_software_profile_version.html.markdown b/website/docs/r/ndb_software_profile_version.html.markdown new file mode 100644 index 000000000..5572c1f43 --- /dev/null +++ b/website/docs/r/ndb_software_profile_version.html.markdown @@ -0,0 +1,77 @@ +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_software_version_profile" +sidebar_current: "docs-nutanix-resource-ndb-software-version-profile" +description: |- + This operation submits a request to create, update and delete software profile versions in Nutanix database service (NDB). +--- + +# nutanix_ndb_software_version_profile + +Provides a resource to create software profile versions based on the input parameters. + +## Example Usage + +```hcl + + resource "nutanix_ndb_software_version_profile" "name" { + engine_type = "postgres_database" + profile_id= resource.nutanix_ndb_profile.name12.id + name = "test-tf" + description= "made by tf" + postgres_database{ + source_dbserver_id = "{{ DB_Server_ID }}" + } + available_cluster_ids = ["{{ cluster_ids }}"] + status = "published" + } +``` + +## Argument Reference + +* `profile_id`: (Required) profile id +* `name`: Name of profile +* `description`: description of profile +* `engine_type`: engine type of profile +* `status`: status of profile. Allowed Values are "deprecated", "published", "unpublished" +* `postgres_database`: postgres database info +* `available_cluster_ids`: available cluster ids + +### postgres_database + +* `source_dbserver_id`: (Optional) source dbserver id +* `os_notes`: (Optional) os notes for software profile +* `db_software_notes`: (Optional) db software notes + +## Attributes Reference + +* `status`: status of profile +* `owner`: owner of profile +* `db_version`: Db version of software profile +* `topology`: topology of software profile +* `system_profile`: system profile or not. +* `version`: Version of software profile +* `published`: Published or not +* `deprecated`: deprecated or not +* `properties`: properties of profile +* `properties_map`: properties map of profile +* `version_cluster_association`: version cluster association + +### version_cluster_association + +* `nx_cluster_id`: nutanix cluster id +* `date_created`: date created of profile +* `date_modified`: date modified of profile +* `owner_id`: owner id +* `status`: status of version +* `profile_version_id`: profile version id +* `properties`: properties of software profile +* `optimized_for_provisioning`: version optimized for provisioning + + +### properties +* `name`: name of property +* `value`: value of property +* `secure`: secure or not + +See detailed information in [NDB Profile version](https://www.nutanix.dev/api_references/ndb/#/424c8db255367-create-profile-version). + From 83b8cd653587440860a7240ef7c197fdc364b977 Mon Sep 17 00:00:00 2001 From: Abhishek Date: Fri, 20 Jan 2023 02:52:21 +0530 Subject: [PATCH 13/18] lint fix --- website/docs/d/ndb_snapshot.html.markdown | 2 +- website/docs/r/ndb_profiles.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/d/ndb_snapshot.html.markdown b/website/docs/d/ndb_snapshot.html.markdown index 5048895da..c6bdeea2c 100644 --- a/website/docs/d/ndb_snapshot.html.markdown +++ b/website/docs/d/ndb_snapshot.html.markdown @@ -31,7 +31,7 @@ Describes the snapshot present in Nutanix Database Service * `timezone`: (Optional) Default is UTC * `load_replicated_child_snapshots`: (Optional) load child snapshots. Default is false -## Attribute Refrence +## Attribute Reference * `id`: name of snapshot * `description`: description of snapshot diff --git a/website/docs/r/ndb_profiles.html.markdown b/website/docs/r/ndb_profiles.html.markdown index 00055d47b..23daef578 100644 --- a/website/docs/r/ndb_profiles.html.markdown +++ b/website/docs/r/ndb_profiles.html.markdown @@ -9,7 +9,7 @@ description: |- # nutanix_ndb_profile -Provides a resource to create profiles (Software, Network, Database Paramter, Compute) based on the input parameters. +Provides a resource to create profiles (Software, Network, Database Parameter, Compute) based on the input parameters. ## Example Usage From 85018e471765b1f3ee5c292723fef1fd5396a376 Mon Sep 17 00:00:00 2001 From: Abhishek Date: Fri, 20 Jan 2023 11:51:48 +0530 Subject: [PATCH 14/18] added detailed info in docs --- website/docs/d/ndb_clone.html.markdown | 2 + website/docs/d/ndb_clones.html.markdown | 5 +- website/docs/d/ndb_snapshot.html.markdown | 1 + website/docs/d/ndb_snapshots.html.markdown | 3 ++ website/docs/d/ndb_time_machine.html.markdown | 1 + .../ndb_time_machine_capability.html.markdown | 4 +- .../docs/d/ndb_time_machines.html.markdown | 3 ++ .../r/ndb_register_database.html.markdown | 2 +- website/nutanix.erb | 51 +++++++++++++++++++ 9 files changed, 69 insertions(+), 3 deletions(-) diff --git a/website/docs/d/ndb_clone.html.markdown b/website/docs/d/ndb_clone.html.markdown index 4a160a92f..0ab0d8db3 100644 --- a/website/docs/d/ndb_clone.html.markdown +++ b/website/docs/d/ndb_clone.html.markdown @@ -78,3 +78,5 @@ Describes the clone present in Nutanix Database Service * `linked_databases`: linked databases within database instance * `databases`: database for a cloned instance * `database_group_state_info`: database group state info + +See detailed information in [NDB Clone](https://www.nutanix.dev/api_references/ndb/#/2f225874df95a-get-clone-by-value-type). \ No newline at end of file diff --git a/website/docs/d/ndb_clones.html.markdown b/website/docs/d/ndb_clones.html.markdown index 3bccc5f23..8c49cb4b9 100644 --- a/website/docs/d/ndb_clones.html.markdown +++ b/website/docs/d/ndb_clones.html.markdown @@ -79,4 +79,7 @@ List all the clone present in Nutanix Database Service * `database_nodes`: database nodes associated with database instance * `linked_databases`: linked databases within database instance * `databases`: database for a cloned instance -* `database_group_state_info`: database group state info \ No newline at end of file +* `database_group_state_info`: database group state info + + +See detailed information in [NDB Clones](https://www.nutanix.dev/api_references/ndb/#/fc568988b42e5-get-a-list-of-all-clones). \ No newline at end of file diff --git a/website/docs/d/ndb_snapshot.html.markdown b/website/docs/d/ndb_snapshot.html.markdown index c6bdeea2c..8c3d44b15 100644 --- a/website/docs/d/ndb_snapshot.html.markdown +++ b/website/docs/d/ndb_snapshot.html.markdown @@ -73,3 +73,4 @@ Describes the snapshot present in Nutanix Database Service * `parent_snapshot`: parent snapshot * `snapshot_size`: snapshot size +See detailed information in [NDB Snapshot](https://www.nutanix.dev/api_references/ndb/#/f0844bb9b0dd4-get-snapshot-by-value-type). \ No newline at end of file diff --git a/website/docs/d/ndb_snapshots.html.markdown b/website/docs/d/ndb_snapshots.html.markdown index 0f2f94f70..e2d6d6a4b 100644 --- a/website/docs/d/ndb_snapshots.html.markdown +++ b/website/docs/d/ndb_snapshots.html.markdown @@ -75,3 +75,6 @@ List all snapshots present in Nutanix Database Service * `lcm_config`: LCM config * `parent_snapshot`: parent snapshot * `snapshot_size`: snapshot size + + +See detailed information in [NDB Snapshots](https://www.nutanix.dev/api_references/ndb/#/e10c0725f8541-get-list-of-all-snapshots). \ No newline at end of file diff --git a/website/docs/d/ndb_time_machine.html.markdown b/website/docs/d/ndb_time_machine.html.markdown index f3d92de6e..3bcc159a3 100644 --- a/website/docs/d/ndb_time_machine.html.markdown +++ b/website/docs/d/ndb_time_machine.html.markdown @@ -57,3 +57,4 @@ description: |- * `schedule`: schedule info * `metadata`: metadata info +See detailed information in [NDB Time Machine](https://www.nutanix.dev/api_references/ndb/#/cb7ba8c0c3284-get-time-machine-by-value-type) . \ No newline at end of file diff --git a/website/docs/d/ndb_time_machine_capability.html.markdown b/website/docs/d/ndb_time_machine_capability.html.markdown index 40cac24c9..795bc2e75 100644 --- a/website/docs/d/ndb_time_machine_capability.html.markdown +++ b/website/docs/d/ndb_time_machine_capability.html.markdown @@ -41,4 +41,6 @@ description: |- * `capability`: capability info * `capability_reset_time`: capability reset time * `last_db_log`: last db log info -* `last_continuous_snapshot`: last continuous snapshot info \ No newline at end of file +* `last_continuous_snapshot`: last continuous snapshot info + +See detailed information in [NDB Time Machine Capability](https://www.nutanix.dev/api_references/ndb/#/8f40c26af7837-get-capability-of-given-time-machine) . diff --git a/website/docs/d/ndb_time_machines.html.markdown b/website/docs/d/ndb_time_machines.html.markdown index c6ddc0151..6d0d122f4 100644 --- a/website/docs/d/ndb_time_machines.html.markdown +++ b/website/docs/d/ndb_time_machines.html.markdown @@ -53,3 +53,6 @@ List all time machines present in Nutanix Database Service * `sla`: sla info * `schedule`: schedule info * `metadata`: metadata info + + +See detailed information in [NDB Time Machines](https://www.nutanix.dev/api_references/ndb/#/256497800ee3c-get-list-of-all-time-machines) . \ No newline at end of file diff --git a/website/docs/r/ndb_register_database.html.markdown b/website/docs/r/ndb_register_database.html.markdown index d10b8d2ff..5232c3ab8 100644 --- a/website/docs/r/ndb_register_database.html.markdown +++ b/website/docs/r/ndb_register_database.html.markdown @@ -6,7 +6,7 @@ description: |- This operation submits a request to register the database in Nutanix database service (NDB). --- -# nutanix_ndb_database_scale +# nutanix_ndb_register_database Provides a resource to register the database based on the input parameters. diff --git a/website/nutanix.erb b/website/nutanix.erb index 4268643b0..6b0573948 100644 --- a/website/nutanix.erb +++ b/website/nutanix.erb @@ -187,6 +187,27 @@ > nutanix_ndb_slas + > + nutanix_ndb_clone + + > + nutanix_ndb_clones + + > + nutanix_ndb_snapshots + + + nutanix_ndb_snapshots + + > + nutanix_ndb_tms_capability + + > + nutanix_ndb_time_machine + + > + nutanix_ndb_time_machines + @@ -274,6 +295,36 @@ > nutanix_ndb_database + > + nutanix_ndb_clone + + > + nutanix_ndb_database_restore + + > + nutanix_ndb_database_scale + + > + nutanix_ndb_database_snapshot + + > + nutanix_ndb_log_catchups + + > + nutanix_ndb_profile + + > + nutanix_ndb_register_database + + > + nutanix_ndb_sla + + > + nutanix_ndb_software_version_profile + + > + nutanix_ndb_authorize_dbserver + From 44713a2818462cfa7c66a438b1aca953f4e0f428 Mon Sep 17 00:00:00 2001 From: Abhishek Date: Fri, 20 Jan 2023 12:46:06 +0530 Subject: [PATCH 15/18] added readme and changelog --- CHANGELOG.md | 36 +++++++++++++++++++++++++ README.md | 6 +++-- website/docs/d/ndb_clone.html.markdown | 8 +++--- website/docs/d/ndb_clones.html.markdown | 10 +++---- website/docs/r/ndb_clone.html.markdown | 4 +-- 5 files changed, 51 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0bff678e2..631c89d92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,39 @@ +## 1.8.0-beta-2 (Jan 20, 2023) +[Full Changelog](https://github.com/nutanix/terraform-provider-nutanix/compare/v1.8.0-beta.1...v1.8.0-beta.2) + +**New Feature:** + +- Feat/1.8.0-beta.2 Release with Nutanix Database Service based resource and datasources [\#533] (https://github.com/nutanix/terraform-provider-nutanix/pull/533) + + New Resources: + - nutanix_ndb_profile + - nutanix_ndb_sla + - nutanix_ndb_database_scale + - nutanix_ndb_database_restore + - nutanix_ndb_database_snapshot + - nutanix_ndb_register_database + - nutanix_ndb_clone + - nutanix_ndb_log_catchups + - nutanix_ndb_authorize_dbservers + - nutanix_ndb_software_version_profile + + New Data Sources: + - nutanix_ndb_snapshot + - nutanix_ndb_snapshots + - nutanix_ndb_time_machine + - nutanix_ndb_time_machines + - nutanix_ndb_tms_capability + - nutanix_ndb_clone + - nutanix_ndb_clones + + +**Implemented enhancements:** + - Support for HA instance in nutanix_ndb_database resource. [\#518](https://github.com/nutanix/terraform-provider-nutanix/pull/518) + - Improving the error when server is unreachable. [\#530](https://github.com/nutanix/terraform-provider-nutanix/pull/530) + - Fetching of database based on database_type filter [\#513](https://github.com/nutanix/terraform-provider-nutanix/pull/513) + - Support of Tags and Maintance Window in provisioning [\#528](https://github.com/nutanix/terraform-provider-nutanix/pull/528) + + ## 1.8.0-beta.1 (Oct 12, 2022) [Full Changelog](https://github.com/nutanix/terraform-provider-nutanix/compare/v1.7.1...v1.8.0-beta.1) diff --git a/README.md b/README.md index 40bed23c5..8302cc55d 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Terraform provider plugin to integrate with Nutanix Enterprise Cloud -NOTE: The latest version of the Nutanix provider is [v1.8.0-beta.1](https://github.com/nutanix/terraform-provider-nutanix/releases/tag/v1.8.0-beta.1) +NOTE: The latest version of the Nutanix provider is [v1.8.0-beta.2](https://github.com/nutanix/terraform-provider-nutanix/releases/tag/v1.8.0-beta.2) Modules based on Terraform Nutanix Provider can be found here : [Modules](https://github.com/nutanix/terraform-provider-nutanix/tree/master/modules) ## Build, Quality Status @@ -75,7 +75,9 @@ Foundation Central based modules and examples : Foundation based modules & examp ## Nutanix Database Service > For the 1.8.0-beta.1 release of the provider, it will have N-1 compatibility with the Nutanix database service. This release was tested with v2.4 and v2.4.1 versions. -Note: For 1.8.0-beta.1 release, only postgress database type is qualified and officially supported. +> For the 1.8.0-beta.2 release of the provider, it will have N-2 compatibilty with the Nutanix Database Service. This release was tested with v2.5.1.1 , v2.5.0.2 and v2.4.1 + +Note: For 1.8.0-beta.2 release, only postgress database type is qualified and officially supported. Checkout example : https://github.com/nutanix/terraform-provider-nutanix/blob/master/examples/ndb/database_instance diff --git a/website/docs/d/ndb_clone.html.markdown b/website/docs/d/ndb_clone.html.markdown index 0ab0d8db3..f67feed8a 100644 --- a/website/docs/d/ndb_clone.html.markdown +++ b/website/docs/d/ndb_clone.html.markdown @@ -31,12 +31,12 @@ Describes the clone present in Nutanix Database Service * `clone_id`: (Optional) Clone id * `clone_name`: (Optional) Clone Name -* `filters`: (Optional) Fetchs info based on filter +* `filters`: (Optional) Fetches info based on filter ### filters -* `detailed`: (Optional) Default is false -* `any_status`: (Optional) Default is false -* `load_dbserver_cluster`:(Optional) Default is false +* `detailed`: (Optional) Load entities with complete details. Default is false +* `any_status`: (Optional) Get entity(s) if it satisfies query criteria irrespective of status (retrieve even deleted). Default is false +* `load_dbserver_cluster`:(Optional) Load cluster info. Default is false * `timezone`:(Optional) Default is UTC diff --git a/website/docs/d/ndb_clones.html.markdown b/website/docs/d/ndb_clones.html.markdown index 8c49cb4b9..fe0418ba8 100644 --- a/website/docs/d/ndb_clones.html.markdown +++ b/website/docs/d/ndb_clones.html.markdown @@ -30,12 +30,12 @@ List all the clone present in Nutanix Database Service ### filters -* `detailed`: (Optional) Default is false -* `any_status`: (Optional) Default is false -* `load_dbserver_cluster`: (Optional) Default is false +* `detailed`: (Optional) Load entities with complete details. Default is false +* `any_status`: (Optional) Get entity(s) if it satisfies query criteria irrespective of status (retrieve even deleted). Default is false +* `load_dbserver_cluster`: (Optional) Load cluster info. Default is false * `timezone`: (Optional) Default is UTC -* `order_by_dbserver_cluster`: (Optional) Default is false -* `order_by_dbserver_logical_cluster`: (Optional) Default is false +* `order_by_dbserver_cluster`: (Optional) Sorted by dbserver cluster. Default is false +* `order_by_dbserver_logical_cluster`: (Optional) Sorted by dbserver logical cluster. Default is false ## Attribute Reference diff --git a/website/docs/r/ndb_clone.html.markdown b/website/docs/r/ndb_clone.html.markdown index 3409f2c50..dd668c6f8 100644 --- a/website/docs/r/ndb_clone.html.markdown +++ b/website/docs/r/ndb_clone.html.markdown @@ -7,12 +7,12 @@ description: |- # nutanix_ndb_clone -Provides a resource to perform the clone for database instance based on the input parameters. +Provides a resource to perform the clone of database instance based on the input parameters. ## Example Usage ```hcl -## resource for ndb_clone with Point in time given time machine name +## resource for cloning using Point in time given time machine name resource "nutanix_ndb_clone" "name" { time_machine_name = "test-pg-inst" From dccfcc288073a35b420ecec6e7ce4a49a761b63d Mon Sep 17 00:00:00 2001 From: Abhishek Date: Fri, 20 Jan 2023 15:32:03 +0530 Subject: [PATCH 16/18] added linked db and changed provider version to beta2 for examples --- client/era/era_service.go | 24 +- client/era/era_structs.go | 14 + examples/ndb/clone/main.tf | 61 +++- examples/ndb/database_instance/main.tf | 2 +- examples/ndb/database_register/main.tf | 2 +- examples/ndb/database_restore/main.tf | 2 +- examples/ndb/database_scale/main.tf | 2 +- examples/ndb/linked_databases/main.tf | 23 ++ .../ndb/linked_databases/terraform.tfvars | 4 + examples/ndb/linked_databases/variables.tf | 10 + examples/ndb/log_catchups/main.tf | 2 +- examples/ndb/profiles/main.tf | 2 +- examples/ndb/sla/main.tf | 2 +- nutanix/provider.go | 1 + nutanix/resource_nutanix_ndb_clone_test.go | 99 +++++ .../resource_nutanix_ndb_linked_databases.go | 340 ++++++++++++++++++ ...ource_nutanix_ndb_linked_databases_test.go | 41 +++ website/docs/r/ndb_profiles.html.markdown | 73 ++++ 18 files changed, 695 insertions(+), 9 deletions(-) create mode 100644 examples/ndb/linked_databases/main.tf create mode 100644 examples/ndb/linked_databases/terraform.tfvars create mode 100644 examples/ndb/linked_databases/variables.tf create mode 100644 nutanix/resource_nutanix_ndb_clone_test.go create mode 100644 nutanix/resource_nutanix_ndb_linked_databases.go create mode 100644 nutanix/resource_nutanix_ndb_linked_databases_test.go diff --git a/client/era/era_service.go b/client/era/era_service.go index 66bb1db6b..821c092bb 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -54,6 +54,8 @@ type Service interface { AuthorizeDBServer(ctx context.Context, id string, req []*string) (*AuthorizeDBServerResponse, error) DeAuthorizeDBServer(ctx context.Context, id string, req []*string) (*AuthorizeDBServerResponse, error) TimeMachineCapability(ctx context.Context, tmsID string) (*TimeMachineCapability, error) + CreateLinkedDatabase(ctx context.Context, id string, req *CreateLinkedDatabasesRequest) (*ProvisionDatabaseResponse, error) + DeleteLinkedDatabase(ctx context.Context, DBID string, linkedDBID string, req *DeleteLinkedDatabaseRequest) (*ProvisionDatabaseResponse, error) } type ServiceClient struct { @@ -597,7 +599,7 @@ func (sc ServiceClient) DeleteClone(ctx context.Context, cloneID string, req *De } func (sc ServiceClient) AuthorizeDBServer(ctx context.Context, tmsID string, req []*string) (*AuthorizeDBServerResponse, error) { - httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/tms/%s/dbservers", tmsID), req) + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/dbservers", tmsID), req) if err != nil { return nil, err } @@ -628,3 +630,23 @@ func (sc ServiceClient) TimeMachineCapability(ctx context.Context, tmsID string) return res, sc.c.Do(ctx, httpReq, res) } + +func (sc ServiceClient) CreateLinkedDatabase(ctx context.Context, id string, req *CreateLinkedDatabasesRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/databases/%s/linked-databases", id), req) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteLinkedDatabase(ctx context.Context, id string, linkDBID string, req *DeleteLinkedDatabaseRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/databases/%s/linked-databases/%s", id, linkDBID), req) + + if err != nil { + return nil, err + } + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} diff --git a/client/era/era_structs.go b/client/era/era_structs.go index 5743a16d8..ec5d20753 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -696,6 +696,7 @@ type BpgConfigs struct { BpgDBParam *BpgDBParam `json:"bpg_db_param"` } type InfoBpgConfig struct { + CreatedBy *string `json:"created_by,omitempty"` BpgConfigs *BpgConfigs `json:"bpg_configs"` } type Info struct { @@ -1398,3 +1399,16 @@ type LastContinuousSnapshot struct { Metadata *LastContinuousSnapshotMetadata `json:"metadata,omitempty"` LcmConfig *LcmConfig `json:"lcmConfig,omitempty"` } + +type LinkedDatabases struct { + DatabaseName *string `json:"databaseName,omitempty"` +} + +type CreateLinkedDatabasesRequest struct { + Databases []*LinkedDatabases `json:"databases,omitempty"` +} + +type DeleteLinkedDatabaseRequest struct { + Delete bool `json:"delete,omitempty"` + Forced bool `json:"forced,omitempty"` +} diff --git a/examples/ndb/clone/main.tf b/examples/ndb/clone/main.tf index 6d6f1f8f5..bab9146c5 100644 --- a/examples/ndb/clone/main.tf +++ b/examples/ndb/clone/main.tf @@ -16,7 +16,7 @@ provider "nutanix"{ } -## resource for ndb_clone with Point in time given time machine name +## resource for cloning using Point in time given time machine name resource "nutanix_ndb_clone" "name" { time_machine_name = "test-pg-inst" @@ -40,3 +40,62 @@ resource "nutanix_ndb_clone" "name" { db_password= "pass" } } + +## resource for cloning using snapshot given time machine name + +resource "nutanix_ndb_clone" "name" { + time_machine_name = "test-pg-inst" + name = "test-inst-tf-check" + nx_cluster_id = "{{ nx_Cluster_id }}" + ssh_public_key = "{{ sshkey }}" + snapshot_id= "{{ snapshot_id }}" + time_zone = "Asia/Calcutta" + create_dbserver = true + compute_profile_id = "{{ compute_profile_id }}" + network_profile_id ="{{ network_profile_id }}" + database_parameter_profile_id = "{{ databse_profile_id }}" + nodes{ + vm_name= "test_vm_clone" + compute_profile_id = "{{ compute_profile_id }}" + network_profile_id ="{{ network_profile_id }}" + nx_cluster_id = "{{ nx_Cluster_id }}" + } + postgresql_info{ + vm_name="test_vm_clone" + db_password= "pass" + } +} + +## resource for cloning with LCM Config with Registered DB Server VM + +resource "nutanix_ndb_clone" "name" { + time_machine_name = "test-pg-inst" + name = "test-inst-tf-check" + nx_cluster_id = "{{ nx_cluster_id }}" + ssh_public_key = "{{ public_key }}" + user_pitr_timestamp= "{{ point in time }}" + time_zone = "Asia/Calcutta" + create_dbserver = false + database_parameter_profile_id = "{{ database_paramter_id }}" + dbserver_id="{{ dbserver_id }}" + nodes{ + vm_name="test-era-vm-regis" + nx_cluster_id = "{{ nx_cluster_id }}" + dbserver_id="{{ dbserver_id }}" + } + lcm_config{ + database_lcm_config{ + expiry_details{ + expire_in_days = 3 + expiry_date_timezone = "Asia/Calcutta" + } + refresh_details{ + refresh_in_days = 2 + } + } + } + postgresql_info{ + vm_name="test-era-vm-regis" + db_password= "pass" + } +} \ No newline at end of file diff --git a/examples/ndb/database_instance/main.tf b/examples/ndb/database_instance/main.tf index 826a99923..c301983ba 100644 --- a/examples/ndb/database_instance/main.tf +++ b/examples/ndb/database_instance/main.tf @@ -2,7 +2,7 @@ terraform{ required_providers { nutanix = { source = "nutanix/nutanix" - version = "1.8.0-beta.1" + version = "1.8.0-beta.2" } } } diff --git a/examples/ndb/database_register/main.tf b/examples/ndb/database_register/main.tf index ff2374817..f12891e7f 100644 --- a/examples/ndb/database_register/main.tf +++ b/examples/ndb/database_register/main.tf @@ -2,7 +2,7 @@ terraform{ required_providers { nutanix = { source = "nutanix/nutanix" - version = "1.8.0" + version = "1.8.0-beta.2" } } } diff --git a/examples/ndb/database_restore/main.tf b/examples/ndb/database_restore/main.tf index 31cffdc52..0f47530cc 100644 --- a/examples/ndb/database_restore/main.tf +++ b/examples/ndb/database_restore/main.tf @@ -2,7 +2,7 @@ terraform{ required_providers { nutanix = { source = "nutanix/nutanix" - version = "1.8.0" + version = "1.8.0-beta.2" } } } diff --git a/examples/ndb/database_scale/main.tf b/examples/ndb/database_scale/main.tf index 9a243477a..5eb7e6d29 100644 --- a/examples/ndb/database_scale/main.tf +++ b/examples/ndb/database_scale/main.tf @@ -2,7 +2,7 @@ terraform{ required_providers { nutanix = { source = "nutanix/nutanix" - version = "1.8.0" + version = "1.8.0-beta.2" } } } diff --git a/examples/ndb/linked_databases/main.tf b/examples/ndb/linked_databases/main.tf new file mode 100644 index 000000000..8cdb8e094 --- /dev/null +++ b/examples/ndb/linked_databases/main.tf @@ -0,0 +1,23 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0-beta.2" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## resource to add linked databases with an instance + +resource "nutanix_ndb_linked_databases" "name" { + database_id= "{{ database_id }}" + database_name = "check" +} \ No newline at end of file diff --git a/examples/ndb/linked_databases/terraform.tfvars b/examples/ndb/linked_databases/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/linked_databases/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/linked_databases/variables.tf b/examples/ndb/linked_databases/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/linked_databases/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/examples/ndb/log_catchups/main.tf b/examples/ndb/log_catchups/main.tf index 0a4765731..c812289db 100644 --- a/examples/ndb/log_catchups/main.tf +++ b/examples/ndb/log_catchups/main.tf @@ -2,7 +2,7 @@ terraform{ required_providers { nutanix = { source = "nutanix/nutanix" - version = "1.8.0" + version = "1.8.0-beta.2" } } } diff --git a/examples/ndb/profiles/main.tf b/examples/ndb/profiles/main.tf index bd957f96a..d23acf185 100644 --- a/examples/ndb/profiles/main.tf +++ b/examples/ndb/profiles/main.tf @@ -2,7 +2,7 @@ terraform{ required_providers { nutanix = { source = "nutanix/nutanix" - version = "1.8.0" + version = "1.8.0-beta.2" } } } diff --git a/examples/ndb/sla/main.tf b/examples/ndb/sla/main.tf index 9222903d9..76f3525f7 100644 --- a/examples/ndb/sla/main.tf +++ b/examples/ndb/sla/main.tf @@ -2,7 +2,7 @@ terraform{ required_providers { nutanix = { source = "nutanix/nutanix" - version = "1.8.0" + version = "1.8.0-beta.2" } } } diff --git a/nutanix/provider.go b/nutanix/provider.go index ad894a93a..2588770e8 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -243,6 +243,7 @@ func Provider() *schema.Provider { "nutanix_ndb_database_snapshot": resourceNutanixNDBDatabaseSnapshot(), "nutanix_ndb_clone": resourceNutanixNDBClone(), "nutanix_ndb_authorize_dbserver": resourceNutanixNDBAuthorizeDBServer(), + "nutanix_ndb_linked_databases": resourceNutanixNDBLinkedDB(), }, ConfigureContextFunc: providerConfigure, } diff --git a/nutanix/resource_nutanix_ndb_clone_test.go b/nutanix/resource_nutanix_ndb_clone_test.go new file mode 100644 index 000000000..8692ae551 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_clone_test.go @@ -0,0 +1,99 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceClone = "nutanix_ndb_clone.acctest-managed" + +func TestAccEra_Clonebasic(t *testing.T) { + r := randIntBetween(25, 35) + name := fmt.Sprintf("test-pg-inst-tf-clone-%d", r) + desc := "this is desc" + vmName := fmt.Sprintf("testvm-%d", r) + sshKey := testVars.SSHKey + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraCloneConfig(name, desc, vmName, sshKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceClone, "name", name), + resource.TestCheckResourceAttr(resourceClone, "description", desc), + resource.TestCheckResourceAttrSet(resourceClone, "date_created"), + resource.TestCheckResourceAttrSet(resourceClone, "database_name"), + resource.TestCheckResourceAttrSet(resourceClone, "database_status"), + resource.TestCheckResourceAttrSet(resourceClone, "metadata.#"), + resource.TestCheckResourceAttrSet(resourceClone, "time_machine.#"), + resource.TestCheckResourceAttrSet(resourceClone, "linked_databases.#"), + ), + }, + }, + }) +} + +func testAccEraCloneConfig(name, desc, vmName, sshKey string) string { + return fmt.Sprintf(` + data "nutanix_ndb_profiles" "p"{ + } + data "nutanix_ndb_clusters" "clusters"{} + + locals { + profiles_by_type = { + for p in data.nutanix_ndb_profiles.p.profiles : p.type => p... + } + compute_profiles = { + for p in local.profiles_by_type.Compute: p.name => p + } + network_profiles = { + for p in local.profiles_by_type.Network: p.name => p + } + database_parameter_profiles = { + for p in local.profiles_by_type.Database_Parameter: p.name => p + } + + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_time_machine" "test"{ + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + + data "nutanix_ndb_tms_capability" "test"{ + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + + resource "nutanix_ndb_clone" "acctest-managed" { + time_machine_id = data.nutanix_ndb_time_machine.test.id + name = "%[1]s" + description = "%[2]s" + nx_cluster_id = local.clusters.EraCluster.id + ssh_public_key = "%[4]s" + snapshot_id = data.nutanix_ndb_tms_capability.test.last_continuous_snapshot.0.id + time_zone = "Asia/Calcutta" + create_dbserver = true + compute_profile_id = local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + network_profile_id = local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + database_parameter_profile_id = local.database_parameter_profiles.DEFAULT_POSTGRES_PARAMS.id + nodes{ + vm_name="%[3]s" + compute_profile_id = local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + network_profile_id = local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + nx_cluster_id = local.clusters.EraCluster.id + } + postgresql_info{ + vm_name="%[3]s" + db_password= "pass" + # dbserver_description = "des" + } + } + `, name, desc, vmName, sshKey) +} diff --git a/nutanix/resource_nutanix_ndb_linked_databases.go b/nutanix/resource_nutanix_ndb_linked_databases.go new file mode 100644 index 000000000..4c910480c --- /dev/null +++ b/nutanix/resource_nutanix_ndb_linked_databases.go @@ -0,0 +1,340 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBLinkedDB() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBLinkedDBCreate, + ReadContext: resourceNutanixNDBLinkedDBRead, + UpdateContext: resourceNutanixNDBLinkedDBUpdate, + DeleteContext: resourceNutanixNDBLinkedDBDelete, + Schema: map[string]*schema.Schema{ + "database_id": { + Type: schema.TypeString, + Required: true, + }, + + "database_name": { + Type: schema.TypeString, + Optional: true, + }, + + // computed values + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_linked_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "created_by": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "metadata": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceNutanixNDBLinkedDBCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.CreateLinkedDatabasesRequest{} + + databaseID := "" + databaseName := "" + SetID := "" + if dbID, dok := d.GetOk("database_id"); dok { + databaseID = dbID.(string) + } + + dbNames := []*era.LinkedDatabases{} + + if dbName, ok := d.GetOk("database_name"); ok { + dbNames = append(dbNames, &era.LinkedDatabases{ + DatabaseName: utils.StringPtr(dbName.(string)), + }) + databaseName = dbName.(string) + } + + req.Databases = dbNames + + // call the Linked Databases API + + resp, err := conn.Service.CreateLinkedDatabase(ctx, databaseID, req) + + if err != nil { + return diag.FromErr(err) + } + + // Get Operation ID from response of ProvisionDatabaseResponse and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for databases (%s) to add: %s", resp.Entityid, errWaitTask) + } + + // call the databases API + + response, er := conn.Service.GetDatabaseInstance(ctx, resp.Entityid) + if er != nil { + return diag.FromErr(er) + } + + linkDbs := response.Linkeddatabases + + for _, v := range linkDbs { + if v.DatabaseName == databaseName { + SetID = v.ID + break + } + } + + d.SetId(SetID) + log.Printf("NDB linked databases with %s id created successfully", d.Id()) + + return resourceNutanixNDBLinkedDBRead(ctx, d, meta) +} +func resourceNutanixNDBLinkedDBRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + databaseID := d.Get("database_id") + + response, er := conn.Service.GetDatabaseInstance(ctx, databaseID.(string)) + if er != nil { + return diag.FromErr(er) + } + + linkDbs := response.Linkeddatabases + currentLinkedDB := &era.Linkeddatabases{} + + for _, v := range linkDbs { + if v.ID == d.Id() { + *currentLinkedDB = v + break + } + } + + if err := d.Set("database_name", currentLinkedDB.DatabaseName); err != nil { + return diag.FromErr(err) + } + if err := d.Set("database_status", currentLinkedDB.Databasestatus); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_created", currentLinkedDB.Datecreated); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_modified", currentLinkedDB.Datemodified); err != nil { + return diag.FromErr(err) + } + if err := d.Set("description", currentLinkedDB.Description); err != nil { + return diag.FromErr(err) + } + if err := d.Set("info", flattenLinkedDBInfo(currentLinkedDB.Info)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("metadata", currentLinkedDB.Metadata); err != nil { + return diag.FromErr(err) + } + if err := d.Set("metric", currentLinkedDB.Metric); err != nil { + return diag.FromErr(err) + } + if err := d.Set("name", currentLinkedDB.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("owner_id", currentLinkedDB.Ownerid); err != nil { + return diag.FromErr(err) + } + if err := d.Set("parent_database_id", currentLinkedDB.ParentDatabaseID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("parent_linked_database_id", currentLinkedDB.ParentLinkedDatabaseID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("snapshot_id", currentLinkedDB.SnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", currentLinkedDB.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("timezone", currentLinkedDB.TimeZone); err != nil { + return diag.FromErr(err) + } + + return nil +} +func resourceNutanixNDBLinkedDBUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} +func resourceNutanixNDBLinkedDBDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + dbID := d.Get("database_id") + + req := &era.DeleteLinkedDatabaseRequest{ + Delete: true, + Forced: true, + } + + // API to delete linked databases + + res, err := conn.Service.DeleteLinkedDatabase(ctx, dbID.(string), d.Id(), req) + if err != nil { + return diag.FromErr(err) + } + + log.Printf("Operation to delete linked databases with id %s has started, operation id: %s", d.Id(), res.Operationid) + opID := res.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Cluster GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for linked db (%s) to delete: %s", d.Id(), errWaitTask) + } + log.Printf("NDB linked databases with %s id deleted successfully", d.Id()) + return nil +} + +func flattenLinkedDBInfo(pr era.Info) []interface{} { + res := make([]interface{}, 0) + info := make(map[string]interface{}) + + if pr.Secureinfo != nil { + info["secure_info"] = pr.Secureinfo + } + + if pr.Info != nil { + inf := make([]interface{}, 0) + infval := make(map[string]interface{}) + + if pr.Info.CreatedBy != nil { + infval["created_by"] = pr.Info.CreatedBy + } + + inf = append(inf, infval) + info["info"] = inf + } + + res = append(res, info) + return res +} diff --git a/nutanix/resource_nutanix_ndb_linked_databases_test.go b/nutanix/resource_nutanix_ndb_linked_databases_test.go new file mode 100644 index 000000000..c09ad6bca --- /dev/null +++ b/nutanix/resource_nutanix_ndb_linked_databases_test.go @@ -0,0 +1,41 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameLinkedDB = "nutanix_ndb_linked_databases.acctest-managed" + +func TestAccEraLinkedDB_basic(t *testing.T) { + name := "test-linked-tf" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraLinkedDB(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameLinkedDB, "name", name), + resource.TestCheckResourceAttrSet(resourceNameLinkedDB, "id"), + resource.TestCheckResourceAttrSet(resourceNameLinkedDB, "status"), + resource.TestCheckResourceAttrSet(resourceNameLinkedDB, "owner_id"), + ), + }, + }, + }) +} + +func testAccEraLinkedDB(name string) string { + return fmt.Sprintf( + ` + data "nutanix_ndb_databases" "test1" {} + + resource "nutanix_ndb_linked_databases" "acctest-managed" { + database_id= data.nutanix_ndb_databases.test1.database_instances.0.id + database_name = "%[1]s" + } + `, name) +} diff --git a/website/docs/r/ndb_profiles.html.markdown b/website/docs/r/ndb_profiles.html.markdown index 23daef578..d5ca2ad6a 100644 --- a/website/docs/r/ndb_profiles.html.markdown +++ b/website/docs/r/ndb_profiles.html.markdown @@ -111,6 +111,79 @@ Provides a resource to create profiles (Software, Network, Database Parameter, C * `network_profile` : (Optional) Network Profile * `database_parameter_profile`: (Optional) Database Parameter Profile +### compute_profile +The following arguments are supported to create a compute profile. + +* `cpus`: (Optional) number of vCPUs for the database server VM. +* `core_per_cpu`: (Optional) number of cores per vCPU for the database server VM. +* `memory_size`: (Optional) amount of memory for the database server VM. + +### software_profile +Ensure that you have registered an existing PostgreSQL database server VM with NDB. NDB uses a registered database server VM to create a software profile. + +* `topology`: (Required) Topology of software profile. Allowed values are "cluster" and "single" + +* `postgres_database`: (Optional) Software profile info about postgres database. +* `postgres_database.source_dbserver_id`: source dbserver id where postgress software will be installed. +* `postgres_database.base_profile_version_name`: name for the software profile version. +* `postgres_database.base_profile_version_description`: description for the software profile version. +* `postgres_database.os_notes`: a note to provide additional information about the operating system +* `postgres_database.db_software_notes`: a note to provide additional information about the database software. + +* `available_cluster_ids`: specify Nutanix clusters where this profile is available. + + +### network_profile +A network profile specifies the VLAN for the new database server VM. You can add one or more NICs to segment the network traffic of the database server VM or server cluster. + +* `topology`: (Required) Topology supported for network profile. Allowed values are "cluster" and "single" + +* `postgres_database`: (Optional) Postgres Info to create network profile + +* `postgres_database.single_instance`: (Optional) Info for postgres database to create single instance network profile. +* `postgres_database.single_instance.vlan_name`: (Required) specify the VLAN to provide the IP address used to connect the database from the public network. +* `postgres_database.single_instance.enable_ip_address_selection`: (Optional) If Advanced Network Segmentation is enabled, then this vLAN needs to be a static vLAN and needs to be true. + +* `postgres_database.ha_instance`: (Optional) Info for craeting Network profile for HA instance +* `postgres_database.ha_instance.vlan_name`: (Required) specify the VLANs for network +* `postgres_database.ha_instance.cluster_name`: (Required) specify the cluster name associated with given VLANs +* `postgres_database.ha_instance.cluster_id`: (Optional) specify the cluster ids associated with given VLANs +* `postgres_database.ha_instance.num_of_clusters`: (Required) number of cluster attached to network profile + +* `version_cluster_association`: (Optional) cluster associated with VLAN. this is used with Single instance for postgres database. +* `version_cluster_association.nx_cluster_id`: (Required) cluster id for associated VLAN. + + +### database_parameter_profile +A database parameter profile is a template of custom database parameters that you want to apply to your database + +* `postgres_database`: (Optional) Database parameters suuported for postgress. +* `postgres_database.max_connections`: (Optional) Determines the maximum number of concurrent connections to the database server. The default is set to 100 +* `postgres_database.max_replication_slots`: (Optional) Specifies the maximum number of replication slots that the server can support. The default is zero. wal_level must be set to archive or higher to allow replication slots to be used. Setting it to a lower value than the number of currently existing replication slots will prevent the server from starting. +* `postgres_database.effective_io_concurrency`: (Optional) Sets the number of concurrent disk I/O operations that PostgreSQL expects can be executed simultaneously. Raising this value will increase the number of I/O operations that any individual PostgreSQL session attempts to initiate in parallel. +* `postgres_database.timezone`: (Optional) Sets the time zone for displaying and interpreting time stamps. Defult is UTC . +* `postgres_database.max_prepared_transactions`: (Optional) Sets the maximum number of transactions that can be in the prepared state simultaneously. Setting this parameter to zero (which is the default) disables the prepared-transaction feature. +* `postgres_database.max_locks_per_transaction`: (Optional) This parameter controls the average number of object locks allocated for each transaction; individual transactions can lock more objects as long as the locks of all transactions fit in the lock table. Default is 64. +* `postgres_database.max_wal_senders`: (Optional) Specifies the maximum number of concurrent connections from standby servers or streaming base backup clients (i.e., the maximum number of simultaneously running WAL sender processes). The default is 10. +* `postgres_database.max_worker_processes`: (Optional) Sets the maximum number of background processes that the system can support. The default is 8. +* `postgres_database.min_wal_size`: (Optional) As long as WAL disk usage stays below this setting, old WAL files are always recycled for future use at a checkpoint, rather than removed. This can be used to ensure that enough WAL space is reserved to handle spikes in WAL usage, for example when running large batch jobs. The default is 80 MB. +* `postgres_database.max_wal_size`: (Optional) Maximum size to let the WAL grow to between automatic WAL checkpoints. The default is 1 GB +* `postgres_database.checkpoint_timeout`: (Optional) Sets the maximum time between automatic WAL checkpoints . High Value gives Good Performance, but takes More Recovery Time, Reboot time. can reduce the I/O load on your system, especially when using large values for shared_buffers. Default is 5min +* `postgres_database.autovacuum`: (Optional) Controls whether the server should run the autovacuum launcher daemon. This is on by default; however, track_counts must also be enabled for autovacuum to work. +* `postgres_database.checkpoint_completion_target`: (Optional) +Specifies the target of checkpoint completion, as a fraction of total time between checkpoints. Time spent flushing dirty buffers during checkpoint, as fraction of checkpoint interval . Formula - (checkpoint_timeout - 2min) / checkpoint_timeout. The default is 0.5. +* `postgres_database.autovacuum_freeze_max_age`: (Optional) Age at which to autovacuum a table to prevent transaction ID wraparound. Default is 200000000 +* `postgres_database.autovacuum_vacuum_threshold`: (Optional) Min number of row updates before vacuum. Minimum number of tuple updates or deletes prior to vacuum. Take value in KB. Default is 50 . +* `postgres_database.autovacuum_vacuum_scale_factor`: (Optional) Number of tuple updates or deletes prior to vacuum as a fraction of reltuples. Default is 0.2 +* `postgres_database.autovacuum_work_mem`: (Optional) Sets the maximum memory to be used by each autovacuum worker process. Unit is in KB. Default is -1 +* `postgres_database.autovacuum_max_workers`: (Optional) Sets the maximum number of simultaneously running autovacuum worker processes. Default is 3 +* `postgres_database.autovacuum_vacuum_cost_delay`: (Optional) Vacuum cost delay in milliseconds, for autovacuum. Specifies the cost delay value that will be used in automatic VACUUM operation. Default is 2ms +* `postgres_database.wal_buffers`: (Optional) +Sets the number of disk-page buffers in shared memory for WAL. The amount of shared memory used for WAL data that has not yet been written to disk. The default is -1. +* `postgres_database.synchronous_commit`: (Optional) Sets the current transaction's synchronization level. Specifies whether transaction commit will wait for WAL records to be written to disk before the command returns a success indication to the client. Default is on. +* `postgres_database.random_page_cost`: (Optional) Sets the planner's estimate of the cost of a nonsequentially fetched disk page. Sets the planner's estimate of the cost of a non-sequentially-fetched disk page. The default is 4.0. +* `postgres_database.wal_keep_segments`: (Optional) Sets the number of WAL files held for standby servers, Specifies the minimum number of past log file segments kept in the pg_wal directory. Default is 700 . + ## Attributes Reference * `status`: status of profile From d095eb8024a7cc0a1a5a4651645f06e286ba6cef Mon Sep 17 00:00:00 2001 From: Abhishek Date: Fri, 20 Jan 2023 17:17:38 +0530 Subject: [PATCH 17/18] clone tcs --- nutanix/resource_nutanix_ndb_clone.go | 32 +++++++++++++++++++++- nutanix/resource_nutanix_ndb_clone_test.go | 3 +- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/nutanix/resource_nutanix_ndb_clone.go b/nutanix/resource_nutanix_ndb_clone.go index d28694aea..4a0e250ae 100644 --- a/nutanix/resource_nutanix_ndb_clone.go +++ b/nutanix/resource_nutanix_ndb_clone.go @@ -249,6 +249,26 @@ func resourceNutanixNDBClone() *schema.Resource { "actionarguments": actionArgumentsSchema(), // Computed values + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + }, + }, + }, + "owner_id": { Type: schema.TypeString, Computed: true, @@ -318,6 +338,10 @@ func resourceNutanixNDBClone() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, "parent_database_id": { Type: schema.TypeString, Computed: true, @@ -407,7 +431,13 @@ func resourceNutanixNDBCloneCreate(ctx context.Context, d *schema.ResourceData, func resourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*Client).Era - resp, err := conn.Service.GetClone(ctx, d.Id(), "", nil) + filterParams := &era.FilterParams{} + filterParams.Detailed = "false" + filterParams.AnyStatus = "false" + filterParams.LoadDBServerCluster = "false" + filterParams.TimeZone = "UTC" + + resp, err := conn.Service.GetClone(ctx, d.Id(), "", filterParams) if err != nil { return diag.FromErr(err) } diff --git a/nutanix/resource_nutanix_ndb_clone_test.go b/nutanix/resource_nutanix_ndb_clone_test.go index 8692ae551..61f35e503 100644 --- a/nutanix/resource_nutanix_ndb_clone_test.go +++ b/nutanix/resource_nutanix_ndb_clone_test.go @@ -24,11 +24,12 @@ func TestAccEra_Clonebasic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceClone, "name", name), resource.TestCheckResourceAttr(resourceClone, "description", desc), + resource.TestCheckResourceAttr(resourceClone, "clone", "true"), resource.TestCheckResourceAttrSet(resourceClone, "date_created"), resource.TestCheckResourceAttrSet(resourceClone, "database_name"), resource.TestCheckResourceAttrSet(resourceClone, "database_status"), resource.TestCheckResourceAttrSet(resourceClone, "metadata.#"), - resource.TestCheckResourceAttrSet(resourceClone, "time_machine.#"), + resource.TestCheckResourceAttrSet(resourceClone, "database_nodes.#"), resource.TestCheckResourceAttrSet(resourceClone, "linked_databases.#"), ), }, From 0a6b73a91aa8506b5502f47c07971815736a2aea Mon Sep 17 00:00:00 2001 From: Abhishek Date: Fri, 20 Jan 2023 17:23:58 +0530 Subject: [PATCH 18/18] changeLog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 631c89d92..ad6c83114 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - nutanix_ndb_log_catchups - nutanix_ndb_authorize_dbservers - nutanix_ndb_software_version_profile + - nutanix_ndb_linked_databases New Data Sources: - nutanix_ndb_snapshot @@ -31,7 +32,7 @@ - Support for HA instance in nutanix_ndb_database resource. [\#518](https://github.com/nutanix/terraform-provider-nutanix/pull/518) - Improving the error when server is unreachable. [\#530](https://github.com/nutanix/terraform-provider-nutanix/pull/530) - Fetching of database based on database_type filter [\#513](https://github.com/nutanix/terraform-provider-nutanix/pull/513) - - Support of Tags and Maintance Window in provisioning [\#528](https://github.com/nutanix/terraform-provider-nutanix/pull/528) + - Support of Tags and Maintainence Window in provisioning [\#528](https://github.com/nutanix/terraform-provider-nutanix/pull/528) ## 1.8.0-beta.1 (Oct 12, 2022)