diff --git a/CHANGELOG.md b/CHANGELOG.md index b4d7522e1..0bff678e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +## 1.8.0-beta.1 (Oct 12, 2022) + +[Full Changelog](https://github.com/nutanix/terraform-provider-nutanix/compare/v1.7.1...v1.8.0-beta.1) + +**New Feature:** + +- Feat/1.8.0-beta.1 Release with Nutanix Database Service based resources and datasources [\#501] (https://github.com/nutanix/terraform-provider-nutanix/pull/501) + + New Data Sources : + - nutanix_ndb_cluster + - nutanix_ndb_clusters + - nutanix_ndb_database + - nutanix_ndb_databases + - nutanix_ndb_profile + - nutanix_ndb_profiles + - nutanix_ndb_sla + - nutanix_ndb_slas + + New Resources : + - nutanix_ndb_database + ## 1.7.1 (August 31, 2022) [Full Changelog](https://github.com/nutanix/terraform-provider-nutanix/compare/v1.7.0...v1.7.1) diff --git a/README.md b/README.md index fe7a8983b..40bed23c5 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Terraform provider plugin to integrate with Nutanix Enterprise Cloud -NOTE: The latest version of the Nutanix provider is [v1.7.1](https://github.com/nutanix/terraform-provider-nutanix/releases/tag/v1.7.1) +NOTE: The latest version of the Nutanix provider is [v1.8.0-beta.1](https://github.com/nutanix/terraform-provider-nutanix/releases/tag/v1.8.0-beta.1) Modules based on Terraform Nutanix Provider can be found here : [Modules](https://github.com/nutanix/terraform-provider-nutanix/tree/master/modules) ## Build, Quality Status @@ -72,6 +72,13 @@ Foundation Central based examples : https://github.com/nutanix/terraform-provide Foundation Central based modules and examples : Foundation based modules & examples : https://github.com/nutanix/terraform-provider-nutanix/blob/master/modules/foundationCentral/ +## Nutanix Database Service +> For the 1.8.0-beta.1 release of the provider, it will have N-1 compatibility with the Nutanix database service. This release was tested with v2.4 and v2.4.1 versions. + +Note: For 1.8.0-beta.1 release, only postgress database type is qualified and officially supported. + +Checkout example : https://github.com/nutanix/terraform-provider-nutanix/blob/master/examples/ndb/database_instance + ## Example Usage See the Examples folder for a handful of main.tf demos as well as some pre-compiled binaries. @@ -128,11 +135,25 @@ provider "nutanix" { } ``` +## Additional fields for using Nutanix Database Service: + +* **ndb_username** - (Optional) Username of Nutanix Database Service server +* **ndb_password** - (Optional) Password of Nutanix Database Service server +* **ndb_endpoint** - (Optional) IP of Nutanix Database Service server + +```hcl +provider "nutanix" { + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint +} +``` + ### Provider Configuration Requirements & Warnings From foundation getting released in 1.5.0-beta, provider configuration will accomodate prism central and foundation apis connection details. **It will show warnings for disabled api connections as per the attributes given in provider configuration in above mentioned format**. The below are the required attributes for corresponding provider componenets : * endpoint, username and password are required fields for using Prism Central & Karbon based resources and data sources * foundation_endpoint is required field for using Foundation based resources and data sources - +* ndb_username, ndb_password and ndb_endpoint are required fields for using NDB based resources and data sources ## Resources * nutanix_access_control_policy @@ -161,6 +182,7 @@ From foundation getting released in 1.5.0-beta, provider configuration will acco * nutanix_static_routes * nutanix_floating_ip * nutanix_user_groups +* nutanix_ndb_database ## Data Sources @@ -215,6 +237,14 @@ From foundation getting released in 1.5.0-beta, provider configuration will acco * nutanix_floating_ip * nutanix_floating_ips * nutanix_static_routes +* nutanix_ndb_cluster +* nutanix_ndb_clusters +* nutanix_ndb_database +* nutanix_ndb_databases +* nutanix_ndb_profile +* nutanix_ndb_profiles +* nutanix_ndb_sla +* nutanix_ndb_slas ## Quick Install diff --git a/client/client.go b/client/client.go index 1715b5dc1..088377798 100644 --- a/client/client.go +++ b/client/client.go @@ -72,6 +72,9 @@ type Credentials struct { FoundationEndpoint string // Required field for connecting to foundation VM APIs FoundationPort string // Port for connecting to foundation VM APIs RequiredFields map[string][]string // RequiredFields is client to its required fields mapping for validations and usage in every client + NdbEndpoint string // Required field for connecting to Era VM APIs. + NdbUsername string + NdbPassword string } // AdditionalFilter specification for client side filters diff --git a/client/client_test.go b/client/client_test.go index f15692cc6..dc9ff73c7 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -27,14 +27,14 @@ func setup() (*http.ServeMux, *Client, *httptest.Server) { mux := http.NewServeMux() server := httptest.NewServer(mux) - client, _ := NewClient(&Credentials{"", "username", "password", "", "", true, false, "", "", "", nil}, testUserAgent, testAbsolutePath, false) + client, _ := NewClient(&Credentials{"", "username", "password", "", "", true, false, "", "", "", nil, "", "", ""}, testUserAgent, testAbsolutePath, false) client.BaseURL, _ = url.Parse(server.URL) return mux, client, server } func TestNewClient(t *testing.T) { - c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil}, testUserAgent, testAbsolutePath, false) + c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil, "", "", ""}, testUserAgent, testAbsolutePath, false) if err != nil { t.Errorf("Unexpected Error: %v", err) @@ -52,7 +52,7 @@ func TestNewClient(t *testing.T) { } func TestNewBaseClient(t *testing.T) { - c, err := NewBaseClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil}, testAbsolutePath, true) + c, err := NewBaseClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil, "", "", ""}, testAbsolutePath, true) if err != nil { t.Errorf("Unexpected Error: %v", err) } @@ -69,7 +69,7 @@ func TestNewBaseClient(t *testing.T) { } func TestNewRequest(t *testing.T) { - c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil}, testUserAgent, testAbsolutePath, false) + c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil, "", "", ""}, testUserAgent, testAbsolutePath, false) if err != nil { t.Errorf("Unexpected Error: %v", err) @@ -93,7 +93,7 @@ func TestNewRequest(t *testing.T) { } func TestNewUploadRequest(t *testing.T) { - c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil}, testUserAgent, testAbsolutePath, true) + c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil, "", "", ""}, testUserAgent, testAbsolutePath, true) if err != nil { t.Errorf("Unexpected Error: %v", err) @@ -137,7 +137,7 @@ func TestNewUploadRequest(t *testing.T) { } func TestNewUnAuthRequest(t *testing.T) { - c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil}, testUserAgent, testAbsolutePath, true) + c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil, "", "", ""}, testUserAgent, testAbsolutePath, true) if err != nil { t.Errorf("Unexpected Error: %v", err) @@ -176,7 +176,7 @@ func TestNewUnAuthRequest(t *testing.T) { } func TestNewUnAuthFormEncodedRequest(t *testing.T) { - c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil}, testUserAgent, testAbsolutePath, true) + c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil, "", "", ""}, testUserAgent, testAbsolutePath, true) if err != nil { t.Errorf("Unexpected Error: %v", err) @@ -219,7 +219,7 @@ func TestNewUnAuthFormEncodedRequest(t *testing.T) { } func TestNewUnAuthUploadRequest(t *testing.T) { - c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil}, testUserAgent, testAbsolutePath, true) + c, err := NewClient(&Credentials{"foo.com", "username", "password", "", "", true, false, "", "", "", nil, "", "", ""}, testUserAgent, testAbsolutePath, true) if err != nil { t.Errorf("Unexpected Error: %v", err) diff --git a/client/era/era.go b/client/era/era.go new file mode 100644 index 000000000..d4e9988a8 --- /dev/null +++ b/client/era/era.go @@ -0,0 +1,49 @@ +package era + +import ( + "fmt" + "strings" + + "github.com/terraform-providers/terraform-provider-nutanix/client" +) + +const ( + libraryVersion = "v0.9" + absolutePath = "era/" + libraryVersion + clientName = "ndb" +) + +type Client struct { + client *client.Client + Service Service +} + +func NewEraClient(credentials client.Credentials) (*Client, error) { + var baseClient *client.Client + + // check if all required fields are present. Else create an empty client + if credentials.NdbUsername != "" && credentials.NdbPassword != "" && credentials.NdbEndpoint != "" { + credentials.URL = fmt.Sprintf(credentials.NdbEndpoint) + credentials.Password = credentials.NdbPassword + credentials.Username = credentials.NdbUsername + + c, err := client.NewBaseClient(&credentials, absolutePath, false) + if err != nil { + return nil, err + } + baseClient = c + } else { + errorMsg := fmt.Sprintf("NDB Client is missing. "+ + "Please provide required details - %s in provider configuration.", strings.Join(credentials.RequiredFields[clientName], ", ")) + + baseClient = &client.Client{ErrorMsg: errorMsg} + } + + era := &Client{ + client: baseClient, + Service: ServiceClient{ + c: baseClient, + }, + } + return era, nil +} diff --git a/client/era/era_service.go b/client/era/era_service.go new file mode 100644 index 000000000..c21b51cfb --- /dev/null +++ b/client/era/era_service.go @@ -0,0 +1,269 @@ +package era + +import ( + "context" + "fmt" + "net/http" + + "github.com/terraform-providers/terraform-provider-nutanix/client" +) + +type Service interface { + ProvisionDatabase(ctx context.Context, req *ProvisionDatabaseRequest) (*ProvisionDatabaseResponse, error) + ListDatabaseTypes() (*ListDatabaseTypesResponse, error) + ListDatabaseParams() (*ListDatabaseParamsResponse, error) + ListDatabaseServerVMs() (*ListDatabaseServerVMResponse, error) + GetOperation(GetOperationRequest) (*GetOperationResponse, error) + GetDatabaseInstance(ctx context.Context, uuid string) (*GetDatabaseResponse, error) + ListDatabaseInstance(ctx context.Context) (*ListDatabaseInstance, error) + UpdateDatabase(ctx context.Context, req *UpdateDatabaseRequest, uuid string) (*UpdateDatabaseResponse, error) + DeleteDatabase(ctx context.Context, req *DeleteDatabaseRequest, uuid string) (*DeleteDatabaseResponse, error) + ListProfiles(ctx context.Context, engine string, profileType string) (*ProfileListResponse, error) + GetProfiles(ctx context.Context, engine string, profileType string, id string, name string) (*ListProfileResponse, error) + GetCluster(ctx context.Context, id string, name string) (*ListClusterResponse, error) + ListClusters(ctx context.Context) (*ClusterListResponse, error) + GetSLA(ctx context.Context, id string, name string) (*ListSLAResponse, error) + ListSLA(ctx context.Context) (*SLAResponse, error) +} + +type ServiceClient struct { + c *client.Client +} + +func (sc ServiceClient) ListProfiles(ctx context.Context, engine string, profileType string) (*ProfileListResponse, error) { + var httpReq *http.Request + var err error + + path := makeListProfilePath(engine, profileType) + httpReq, err = sc.c.NewRequest(ctx, http.MethodGet, path, nil) + + if err != nil { + return nil, err + } + res := new(ProfileListResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetProfiles(ctx context.Context, engine string, profileType string, id string, name string) (*ListProfileResponse, error) { + var httpReq *http.Request + var err error + path := makePathProfiles(engine, profileType, id, name) + + httpReq, err = sc.c.NewRequest(ctx, http.MethodGet, path, nil) + + if err != nil { + return nil, err + } + res := new(ListProfileResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetCluster(ctx context.Context, id string, name string) (*ListClusterResponse, error) { + var path string + if id != "" { + path = fmt.Sprintf("/clusters/%s", id) + } + if name != "" { + path = fmt.Sprintf("/clusters/name/%s", name) + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + res := new(ListClusterResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListClusters(ctx context.Context) (*ClusterListResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/clusters", nil) + if err != nil { + return nil, err + } + res := new(ClusterListResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetSLA(ctx context.Context, id string, name string) (*ListSLAResponse, error) { + var path string + if id != "" { + path = fmt.Sprintf("/slas/%s", id) + } + if name != "" { + path = fmt.Sprintf("/slas/name/%s", name) + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + res := new(ListSLAResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListSLA(ctx context.Context) (*SLAResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/slas", nil) + if err != nil { + return nil, err + } + res := new(SLAResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func makeListProfilePath(engine string, profileType string) string { + if engine != "" && profileType != "" { + return fmt.Sprintf("/profiles?engine=%s&type=%s", engine, profileType) + } + if engine != "" { + return fmt.Sprintf("/profiles?engine=%s", engine) + } else if profileType != "" { + return fmt.Sprintf("/profiles?type=%s", profileType) + } + return "/profiles" +} + +func makePathProfiles(engine string, ptype string, id string, name string) string { + if engine != "" { + path := "/profiles?engine=" + engine + if ptype != "" { + path = path + "&type=" + ptype + } + if id != "" { + path = path + "&id=" + id + } + if name != "" { + path = path + "&name=" + name + } + return path + } + if ptype != "" { + path := "/profiles?type=" + ptype + if id != "" { + path = path + "&id=" + id + } + if name != "" { + path = path + "&name=" + name + } + return path + } + + if id != "" { + path := "/profiles?id=" + id + if name != "" { + path = path + "&name=" + name + } + return path + } + + if name != "" { + path := "/profiles?name=" + name + return path + } + return "" +} + +func (sc ServiceClient) ProvisionDatabase(ctx context.Context, req *ProvisionDatabaseRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/databases/provision", req) + res := new(ProvisionDatabaseResponse) + + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateDatabase(ctx context.Context, req *UpdateDatabaseRequest, databaseID string) (*UpdateDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/databases/%s", databaseID), req) + res := new(UpdateDatabaseResponse) + + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteDatabase(ctx context.Context, req *DeleteDatabaseRequest, databaseID string) (*DeleteDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/databases/%s", databaseID), req) + res := new(DeleteDatabaseResponse) + + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListDatabaseTypes() (*ListDatabaseTypesResponse, error) { + ctx := context.TODO() + + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/databases/i/era-drive/tune-config", nil) + if err != nil { + return nil, err + } + res := new(ListDatabaseTypesResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListDatabaseParams() (*ListDatabaseParamsResponse, error) { + ctx := context.TODO() + + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/app_types/postgres_database/provision/input-file?category=db_server;database", nil) + if err != nil { + return nil, err + } + res := new(ListDatabaseParamsResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListDatabaseServerVMs() (*ListDatabaseServerVMResponse, error) { + ctx := context.TODO() + + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/dbservers?detailed=true&load-dbserver-cluster=true", nil) + if err != nil { + return nil, err + } + res := new(ListDatabaseServerVMResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetOperation(req GetOperationRequest) (*GetOperationResponse, error) { + ctx := context.TODO() + + opID := req.OperationID + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/operations/%s", opID), nil) + if err != nil { + return nil, err + } + res := new(GetOperationResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetDatabaseInstance(ctx context.Context, dbInstanceID string) (*GetDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/databases/%s?detailed=true&load-dbserver-cluster=true", dbInstanceID), nil) + if err != nil { + return nil, err + } + res := new(GetDatabaseResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListDatabaseInstance(ctx context.Context) (*ListDatabaseInstance, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, ("/databases?detailed=false&load-dbserver-cluster=false&order-by-dbserver-cluster=false"), nil) + if err != nil { + return nil, err + } + res := new(ListDatabaseInstance) + + return res, sc.c.Do(ctx, httpReq, res) +} diff --git a/client/era/era_structs.go b/client/era/era_structs.go new file mode 100644 index 000000000..27ebf8fc7 --- /dev/null +++ b/client/era/era_structs.go @@ -0,0 +1,939 @@ +package era + +type Clusteravailability struct { + Nxclusterid *string `json:"nxClusterId,omitempty"` + Datecreated *string `json:"dateCreated,omitempty"` + Datemodified *string `json:"dateModified,omitempty"` + Ownerid *string `json:"ownerId,omitempty"` + Status *string `json:"status,omitempty"` + Profileid *string `json:"profileId,omitempty"` +} + +// ListProfile response +type ListProfileResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Status *string `json:"status,omitempty"` + Datecreated *string `json:"dateCreated,omitempty"` + Datemodified *string `json:"dateModified,omitempty"` + Owner *string `json:"owner,omitempty"` + Enginetype *string `json:"engineType,omitempty"` + Type *string `json:"type,omitempty"` + Topology *string `json:"topology,omitempty"` + Dbversion *string `json:"dbVersion,omitempty"` + Systemprofile bool `json:"systemProfile,omitempty"` + Latestversion *string `json:"latestVersion,omitempty"` + Latestversionid *string `json:"latestVersionId,omitempty"` + Versions []*Versions `json:"versions,omitempty"` + Assocdbservers []interface{} `json:"assocDbServers,omitempty"` + Assocdatabases []*string `json:"assocDatabases,omitempty"` + Nxclusterid *string `json:"nxClusterId,omitempty"` + Clusteravailability []*Clusteravailability `json:"clusterAvailability,omitempty"` +} + +type ProfileListResponse []ListProfileResponse + +type Propertiesmap struct { + DefaultContainer *string `json:"DEFAULT_CONTAINER"` + MaxVdiskSize *string `json:"MAX_VDISK_SIZE"` +} + +type Properties struct { + RefID *string `json:"ref_id,omitempty"` + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` + Secure bool `json:"secure,omitempty"` + Description *string `json:"description,omitempty"` +} + +type VersionClusterAssociation struct { + NxClusterID *string `json:"nxClusterId,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + Status *string `json:"status,omitempty"` + ProfileVersionID *string `json:"profileVersionId,omitempty"` + Properties []*Properties `json:"properties,omitempty"` + OptimizedForProvisioning bool `json:"optimizedForProvisioning,omitempty"` +} + +type Versions struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Status *string `json:"status,omitempty"` + Datecreated *string `json:"dateCreated,omitempty"` + Datemodified *string `json:"dateModified,omitempty"` + Owner *string `json:"owner,omitempty"` + Enginetype *string `json:"engineType,omitempty"` + Type *string `json:"type,omitempty"` + Topology *string `json:"topology,omitempty"` + Dbversion *string `json:"dbVersion,omitempty"` + Version *string `json:"version,omitempty"` + Profileid *string `json:"profileId,omitempty"` + Published bool `json:"published,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Systemprofile bool `json:"systemProfile,omitempty"` + Propertiesmap map[string]interface{} `json:"propertiesMap,omitempty"` + Properties []*Properties `json:"properties,omitempty"` + VersionClusterAssociation []*VersionClusterAssociation `json:"versionClusterAssociation,omitempty"` +} + +// ListClustersResponse structs +type ListClusterResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Uniquename *string `json:"uniqueName,omitempty"` + Ipaddresses []*string `json:"ipAddresses,omitempty"` + Fqdns interface{} `json:"fqdns,omitempty"` + Nxclusteruuid *string `json:"nxClusterUUID,omitempty"` + Description *string `json:"description,omitempty"` + Cloudtype *string `json:"cloudType,omitempty"` + Datecreated *string `json:"dateCreated,omitempty"` + Datemodified *string `json:"dateModified,omitempty"` + Ownerid *string `json:"ownerId,omitempty"` + Status *string `json:"status,omitempty"` + Version *string `json:"version,omitempty"` + Hypervisortype *string `json:"hypervisorType,omitempty"` + Hypervisorversion *string `json:"hypervisorVersion,omitempty"` + Properties []*Properties `json:"properties,omitempty"` + Referencecount int `json:"referenceCount,omitempty"` + Username interface{} `json:"username,omitempty"` + Password interface{} `json:"password,omitempty"` + Cloudinfo interface{} `json:"cloudInfo,omitempty"` + Resourceconfig *Resourceconfig `json:"resourceConfig,omitempty"` + Managementserverinfo interface{} `json:"managementServerInfo,omitempty"` + Entitycounts interface{} `json:"entityCounts,omitempty"` + Healthy bool `json:"healthy,omitempty"` +} + +type ClusterListResponse []ListClusterResponse + +type Resourceconfig struct { + Storagethresholdpercentage float64 `json:"storageThresholdPercentage,omitempty"` + Memorythresholdpercentage float64 `json:"memoryThresholdPercentage,omitempty"` +} + +// ListSLAResponse structs +type ListSLAResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Uniquename *string `json:"uniqueName,omitempty"` + Description *string `json:"description,omitempty"` + Ownerid *string `json:"ownerId,omitempty"` + Datecreated *string `json:"dateCreated,omitempty"` + Datemodified *string `json:"dateModified,omitempty"` + CurrentActiveFrequency *string `json:"currentActiveFrequency,omitempty"` + Continuousretention int `json:"continuousRetention,omitempty"` + Dailyretention int `json:"dailyRetention,omitempty"` + Weeklyretention int `json:"weeklyRetention,omitempty"` + Monthlyretention int `json:"monthlyRetention,omitempty"` + Quarterlyretention int `json:"quarterlyRetention,omitempty"` + Yearlyretention int `json:"yearlyRetention,omitempty"` + Referencecount int `json:"referenceCount,omitempty"` + PitrEnabled bool `json:"pitrEnabled,omitempty"` + Systemsla bool `json:"systemSla,omitempty"` +} + +type SLAResponse []ListSLAResponse + +type ListDatabaseTypesResponse map[string]DatabaseTypeProperties + +type DatabaseTypeProperties struct { + Databasetype string `json:"databaseType"` + Stagingdriveautotunesupported bool `json:"stagingDriveAutoTuneSupported"` + Defaultstagingdriveautotune bool `json:"defaultStagingDriveAutoTune"` + Logdriveautotunesupported bool `json:"logDriveAutoTuneSupported"` + Defaultlogdriveautotune bool `json:"defaultLogDriveAutoTune"` +} + +// ProvisionDatabaseRequestStructs +type ProvisionDatabaseRequest struct { + Createdbserver bool `json:"createDbserver,omitempty"` + Clustered bool `json:"clustered,omitempty"` + Autotunestagingdrive bool `json:"autoTuneStagingDrive,omitempty"` + Nodecount *int `json:"nodeCount,omitempty"` + Databasetype *string `json:"databaseType,omitempty"` + Name *string `json:"name,omitempty"` + Databasedescription *string `json:"databaseDescription,omitempty"` + DatabaseServerID *string `json:"dbserverId,omitempty"` + Softwareprofileid *string `json:"softwareProfileId,omitempty"` + Softwareprofileversionid *string `json:"softwareProfileVersionId,omitempty"` + Computeprofileid *string `json:"computeProfileId,omitempty"` + Networkprofileid *string `json:"networkProfileId,omitempty"` + Dbparameterprofileid *string `json:"dbParameterProfileId,omitempty"` + Newdbservertimezone *string `json:"newFVMbServerTimeZone,omitempty"` + Nxclusterid *string `json:"nxClusterId,omitempty"` + Sshpublickey *string `json:"sshPublicKey,omitempty"` + VMPassword *string `json:"vmPassword,omitempty"` + Timemachineinfo *Timemachineinfo `json:"timeMachineInfo,omitempty"` + Actionarguments []*Actionarguments `json:"actionArguments,omitempty"` + Nodes []*Nodes `json:"nodes,omitempty"` +} + +type Snapshottimeofday struct { + Hours int `json:"hours"` + Minutes int `json:"minutes"` + Seconds int `json:"seconds"` +} + +type Continuousschedule struct { + Enabled bool `json:"enabled"` + Logbackupinterval int `json:"logBackupInterval"` + Snapshotsperday int `json:"snapshotsPerDay"` +} + +type Weeklyschedule struct { + Enabled bool `json:"enabled"` + Dayofweek string `json:"dayOfWeek"` +} + +type Monthlyschedule struct { + Enabled bool `json:"enabled"` + Dayofmonth int `json:"dayOfMonth"` +} + +type Quartelyschedule struct { + Enabled bool `json:"enabled"` + Startmonth string `json:"startMonth"` + Dayofmonth int `json:"dayOfMonth"` +} + +type Yearlyschedule struct { + Enabled bool `json:"enabled"` + Dayofmonth int `json:"dayOfMonth"` + Month string `json:"month"` +} + +type Dailyschedule struct { + Enabled bool `json:"enabled"` +} + +type Schedule struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + UniqueName string `json:"uniqueName"` + OwnerID string `json:"ownerId"` + SystemPolicy bool `json:"systemPolicy"` + GlobalPolicy bool `json:"globalPolicy"` + Datecreated string `json:"dateCreated"` + Datemodified string `json:"dateModified"` + Snapshottimeofday *Snapshottimeofday `json:"snapshotTimeOfDay"` + Continuousschedule *Continuousschedule `json:"continuousSchedule"` + Weeklyschedule *Weeklyschedule `json:"weeklySchedule"` + Dailyschedule *Dailyschedule `json:"dailySchedule"` + Monthlyschedule *Monthlyschedule `json:"monthlySchedule"` + Quartelyschedule *Quartelyschedule `json:"quartelySchedule"` + Yearlyschedule *Yearlyschedule `json:"yearlySchedule"` + ReferenceCount int `json:"referenceCount"` + StartTime string `json:"startTime"` + TimeZone string `json:"timeZone"` +} + +type Timemachineinfo struct { + Name string `json:"name"` + Description string `json:"description"` + Slaid string `json:"slaId"` + Schedule Schedule `json:"schedule"` + Tags []interface{} `json:"tags"` + + Autotunelogdrive bool `json:"autoTuneLogDrive"` +} + +type Actionarguments struct { + Name string `json:"name"` + Value interface{} `json:"value"` +} + +type Nodes struct { + Properties []interface{} `json:"properties"` + Vmname string `json:"vmName,omitempty"` + Networkprofileid string `json:"networkProfileId,omitempty"` + DatabaseServerID string `json:"dbserverId,omitempty"` +} + +// ProvisionDatabaseResponse structs +type ProvisionDatabaseResponse struct { + Name string `json:"name"` + Workid string `json:"workId"` + Operationid string `json:"operationId"` + Dbserverid string `json:"dbserverId"` + Message interface{} `json:"message"` + Entityid string `json:"entityId"` + Entityname string `json:"entityName"` + Entitytype string `json:"entityType"` + Status string `json:"status"` + Associatedoperations interface{} `json:"associatedOperations"` + Dependencyreport interface{} `json:"dependencyReport"` +} + +// ListDatabaseParamsResponse structs +type ListDatabaseParamsResponse struct { + Properties []DatabaseProperties `json:"properties"` +} +type DatabaseProperties struct { + RefID string `json:"ref_id"` + Name string `json:"name"` + Type string `json:"type"` + ValueType string `json:"value_type"` + Category string `json:"category"` + Regex string `json:"regex"` + Secure string `json:"secure"` + Required string `json:"required"` + Custom1 string `json:"custom1"` + Custom2 string `json:"custom2"` + Custom3 string `json:"custom3"` + DefaultValue string `json:"default_value"` + Sensitive string `json:"sensitive"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + Index int `json:"index"` + Alias string `json:"alias"` + ParameterizedDisplayName string `json:"parameterized_display_name"` + ParameterizedDescription string `json:"parameterized_description"` + Isduplicable string `json:"isDuplicable"` +} + +// ListDatabaseInstancesResponse structs +type ListDatabaseInstancesResponse struct { + Databases []DatabaseInstance `json:"databases"` +} + +type DatabaseInstance struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` +} + +// ListDatabaseServerVMResponse structs +type ListDatabaseServerVMResponse struct { + Dbserverclusters []interface{} `json:"dbserverClusters"` + Dbservers []Dbservers `json:"dbservers"` +} +type DatabaseServerProperties struct { + RefID string `json:"ref_id"` + Name string `json:"name"` + Value string `json:"value"` + Secure bool `json:"secure"` + Description interface{} `json:"description"` +} +type Metadata struct { + Physicaleradrive bool `json:"physicalEraDrive"` + Clustered bool `json:"clustered"` + Singleinstance bool `json:"singleInstance"` + Eradriveinitialised bool `json:"eraDriveInitialised"` + Markedfordeletion bool `json:"markedForDeletion"` + Softwaresnaphotinterval int `json:"softwareSnaphotInterval"` + Databasetype *string `json:"databaseType"` + Provisionoperationid *string `json:"provisionOperationId"` + Associatedtimemachines []*string `json:"associatedTimeMachines"` + Secureinfo *Secureinfo `json:"secureInfo"` + Info *Info `json:"info"` + Deregisterinfo *DeregisterInfo `json:"deregisterInfo"` + // Protectiondomainmigrationstatus interface{} `json:"protectionDomainMigrationStatus"` + // Lastclocksyncalerttime interface{} `json:"lastClockSyncAlertTime"` +} +type Dbservers struct { + Placeholder bool `json:"placeholder"` + Clustered bool `json:"clustered"` + Eracreated bool `json:"eraCreated"` + Internal bool `json:"internal"` + IsServerDriven bool `json:"is_server_driven"` + Dbserverinvalideastate bool `json:"dbserverInValidEaState"` + ID *string `json:"id"` + Name *string `json:"name"` + Description *string `json:"description"` + Ownerid *string `json:"ownerId"` + Datecreated *string `json:"dateCreated"` + Datemodified *string `json:"dateModified"` + Dbserverclusterid *string `json:"dbserverClusterId"` + Vmclustername *string `json:"vmClusterName"` + Vmclusteruuid *string `json:"vmClusterUuid"` + Type *string `json:"type"` + Status *string `json:"status"` + Clientid *string `json:"clientId"` + Nxclusterid *string `json:"nxClusterId"` + Eradriveid *string `json:"eraDriveId"` + Eraversion *string `json:"eraVersion"` + Vmtimezone *string `json:"vmTimeZone"` + Accesskey *string `json:"accessKey"` + Protectiondomainid *string `json:"protectionDomainId"` + Databasetype *string `json:"databaseType"` + Accesskeyid *string `json:"accessKeyId"` + Requestedversion *string `json:"requestedVersion"` + AssociatedTimeMachineID *string `json:"associated_time_machine_id"` + Workingdirectory *string `json:"workingDirectory"` + Ipaddresses []*string `json:"ipAddresses"` + Fqdns []*string `json:"fqdns"` + Macaddresses []*string `json:"macAddresses"` + Associatedtimemachineids []*string `json:"associatedTimeMachineIds"` + Properties []*DatabaseServerProperties `json:"properties"` + Tags []*Tags `json:"tags"` + Vminfo *VMInfo `json:"vmInfo"` + Info *Info `json:"info"` + Metadata *Metadata `json:"metadata"` + Metric *Metric `json:"metric"` + Lcmconfig *LcmConfig `json:"lcmConfig"` + TimeMachineInfo []*Properties `json:"time_machine_info"` + Eradrive interface{} `json:"eraDrive"` + Databases interface{} `json:"databases"` + Clones interface{} `json:"clones"` + Softwareinstallations interface{} `json:"softwareInstallations"` + // Protectiondomain interface{} `json:"protectionDomain"` +} + +// GetOperationRequest struct +type GetOperationRequest struct { + OperationID string `json:"operation_id"` +} + +// GetOperationResponse struct +type GetOperationResponse struct { + Stepgenenabled bool `json:"stepGenEnabled"` + Setstarttime bool `json:"setStartTime"` + Systemtriggered bool `json:"systemTriggered"` + Uservisible bool `json:"userVisible"` + Isinternal bool `json:"isInternal"` + Timeout int `json:"timeout"` + Parentstep int `json:"parentStep"` + Entityname *string `json:"entityName"` + Timezone *string `json:"timeZone"` + ID *string `json:"id"` + Name *string `json:"name"` + Uniquename *string `json:"uniqueName"` + Type *string `json:"type"` + Starttime *string `json:"startTime"` + Endtime *string `json:"endTime"` + Instanceid *string `json:"instanceId"` + Ownerid *string `json:"ownerId"` + Status *string `json:"status"` + Percentagecomplete *string `json:"percentageComplete"` + Parentid *string `json:"parentId"` + Message *string `json:"message"` + Scheduletime *string `json:"scheduleTime"` + Nxclusterid *string `json:"nxClusterId"` + Dbserverstatus *string `json:"dbserverStatus"` + Userrequestedaction *string `json:"userRequestedAction"` + Userrequestedactiontime *string `json:"userRequestedActionTime"` + Entityid *string `json:"entityId"` + Entitytype *string `json:"entityType"` + Dbserverid *string `json:"dbserverId"` + Datesubmitted *string `json:"dateSubmitted"` + Deferredby *string `json:"deferredBy"` + DeferredByOpIDs []*string `json:"deferredByOpIds"` + Steps []*Steps `json:"steps"` + Properties []*Properties `json:"properties"` + Metadata *OperationMetadata `json:"metadata"` + Work interface{} `json:"work"` + Childoperations []interface{} `json:"childOperations"` +} + +type Steps struct { + Stepgenenabled bool `json:"stepGenEnabled"` + Setstarttimevalue bool `json:"setStartTimeValue"` + ID string `json:"id"` + Name string `json:"name"` + Uniquename interface{} `json:"uniqueName"` + Definitionid string `json:"definitionId"` + Starttime string `json:"startTime"` + Endtime string `json:"endTime"` + Instanceid interface{} `json:"instanceId"` + Parentid interface{} `json:"parentId"` + Level string `json:"level"` + Status string `json:"status"` + Fileid interface{} `json:"fileId"` + Percentagecomplete string `json:"percentageComplete"` + Message interface{} `json:"message"` + Sequencenumber int `json:"sequenceNumber"` + Childsteps interface{} `json:"childSteps"` + Weightage int `json:"weightage"` +} +type Executioncontext struct { + Affecteddbservers []string `json:"affectedDBServers"` + Extendedaffecteddbservers []string `json:"extendedAffectedDBServers"` + Applicationtype string `json:"applicationType"` +} +type OperationMetadata struct { + Linkedoperations interface{} `json:"linkedOperations"` + Associatedentities interface{} `json:"associatedEntities"` + Oldstatus interface{} `json:"oldStatus"` + Userrequestedaction string `json:"userRequestedAction"` + Userrequestedactiontimestamp interface{} `json:"userRequestedActionTimestamp"` + Controlmessage interface{} `json:"controlMessage"` + Executioncontext Executioncontext `json:"executionContext"` + Scheduletime interface{} `json:"scheduleTime"` + Scheduledby string `json:"scheduledBy"` + Scheduledon string `json:"scheduledOn"` + Retryparentid interface{} `json:"retryParentId"` + Retryimmediateparentid interface{} `json:"retryImmediateParentId"` + Retriedoperations interface{} `json:"retriedOperations"` + Switcheddbservers interface{} `json:"switchedDbservers"` + Linkedoperationsdescription string `json:"linkedOperationsDescription"` +} + +// Common Error response + +type ErrorResponse struct { + Errorcode string `json:"errorCode"` + Reason string `json:"Reason"` + Remedy string `json:"remedy"` + Message string `json:"message"` + Stacktrace []interface{} `json:"stackTrace"` + Suppressedexceptions []interface{} `json:"suppressedExceptions"` +} + +// DeleteDatabase models + +type DeleteDatabaseRequest struct { + Delete bool `json:"delete"` + Remove bool `json:"remove"` + Softremove bool `json:"softRemove"` + Forced bool `json:"forced"` + Deletetimemachine bool `json:"deleteTimeMachine"` + Deletelogicalcluster bool `json:"deleteLogicalCluster"` +} + +type DeleteDatabaseResponse struct { + Name string `json:"name"` + Workid string `json:"workId"` + Operationid string `json:"operationId"` + Dbserverid string `json:"dbserverId"` + Message interface{} `json:"message"` + Entityid string `json:"entityId"` + Entityname string `json:"entityName"` + Entitytype string `json:"entityType"` + Status string `json:"status"` + Associatedoperations interface{} `json:"associatedOperations"` + Dependencyreport interface{} `json:"dependencyReport"` +} + +// UpdateDatabase models +type UpdateDatabaseRequest struct { + Name string `json:"name"` + Description string `json:"description"` + Tags []interface{} `json:"tags"` + Resetname bool `json:"resetName"` + Resetdescription bool `json:"resetDescription"` + Resettags bool `json:"resetTags"` +} + +type UpdateDatabaseResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` +} + +type DBExpiryDetails struct { + EffectiveTimestamp *string `json:"effectiveTimestamp,omitempty"` + ExpiryTimestamp *string `json:"expiryTimestamp,omitempty"` + ExpiryDateTimezone *string `json:"expiryDateTimezone,omitempty"` + RemindBeforeInDays *int `json:"remindBeforeInDays,omitempty"` + ExpireInDays *int `json:"expireInDays,omitempty"` + DeleteDatabase bool `json:"deleteDatabase,omitempty"` + DeleteTimeMachine bool `json:"deleteTimeMachine,omitempty"` + DeleteVM bool `json:"deleteVM,omitempty"` + UserCreated bool `json:"userCreated,omitempty"` +} + +type DBRefreshDetails struct { + RefreshInDays int `json:"refreshInDays,omitempty"` + RefreshInHours int `json:"refreshInHours,omitempty"` + RefreshInMonths int `json:"refreshInMonths,omitempty"` + LastRefreshDate string `json:"lastRefreshDate,omitempty"` + NextRefreshDate string `json:"nextRefreshDate,omitempty"` + RefreshTime string `json:"refreshTime,omitempty"` + RefreshDateTimezone string `json:"refreshDateTimezone,omitempty"` +} + +type DBPrePostDeleteCommand struct { + Command string `json:"command,omitempty"` +} + +type DBPostDeleteCommand struct{} + +type LcmConfig struct { + ExpiryDetails *DBExpiryDetails `json:"expiryDetails,omitempty"` + RefreshDetails *DBRefreshDetails `json:"refreshDetails,omitempty"` + PreDeleteCommand *DBPrePostDeleteCommand `json:"preDeleteCommand,omitempty"` + PostDeleteCommand *DBPrePostDeleteCommand `json:"postDeleteCommand,omitempty"` +} + +type ListDatabaseInstance []GetDatabaseResponse + +type GetDatabaseResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Ownerid string `json:"ownerId"` + Datecreated string `json:"dateCreated"` + Datemodified string `json:"dateModified"` + AccessLevel interface{} `json:"accessLevel"` + Properties []*DBInstanceProperties `json:"properties"` + Tags []*Tags `json:"tags"` + Clustered bool `json:"clustered"` + Clone bool `json:"clone"` + Eracreated bool `json:"eraCreated"` + Internal bool `json:"internal"` + Placeholder bool `json:"placeholder"` + Databasename string `json:"databaseName"` + Type string `json:"type"` + Databaseclustertype interface{} `json:"databaseClusterType"` + Status string `json:"status"` + Databasestatus string `json:"databaseStatus"` + Dbserverlogicalclusterid interface{} `json:"dbserverLogicalClusterId"` + Timemachineid string `json:"timeMachineId"` + Parenttimemachineid interface{} `json:"parentTimeMachineId"` + Timezone string `json:"timeZone"` + Info *Info `json:"info"` + GroupInfo interface{} `json:"groupInfo"` + Metadata *DBInstanceMetadata `json:"metadata"` + Metric interface{} `json:"metric"` + Category string `json:"category"` + ParentDatabaseID interface{} `json:"parentDatabaseId,omitempty"` + ParentSourceDatabaseID interface{} `json:"parentSourceDatabaseId,omitempty"` + Lcmconfig *LcmConfig `json:"lcmConfig"` + TimeMachine *TimeMachine `json:"timeMachine"` + Dbserverlogicalcluster interface{} `json:"dbserverlogicalCluster"` + Databasenodes []Databasenodes `json:"databaseNodes"` + Linkeddatabases []Linkeddatabases `json:"linkedDatabases"` + Databases interface{} `json:"databases,omitempty"` + DatabaseGroupStateInfo interface{} `json:"databaseGroupStateInfo"` +} + +type DBInstanceProperties struct { + RefID string `json:"ref_id"` + Name string `json:"name"` + Value string `json:"value"` + Secure bool `json:"secure"` + Description interface{} `json:"description"` +} + +type Secureinfo struct { +} + +type DataDisks struct { + Count float64 `json:"count"` +} + +type LogDisks struct { + Count float64 `json:"count"` + Size float64 `json:"size"` +} +type ArchiveStorage struct { + Size float64 `json:"size"` +} +type Storage struct { + DataDisks *DataDisks `json:"data_disks"` + LogDisks *LogDisks `json:"log_disks"` + ArchiveStorage *ArchiveStorage `json:"archive_storage"` +} +type VMProperties struct { + NrHugepages float64 `json:"nr_hugepages"` + OvercommitMemory float64 `json:"overcommit_memory"` + DirtyBackgroundRatio float64 `json:"dirty_background_ratio"` + DirtyRatio float64 `json:"dirty_ratio"` + DirtyExpireCentisecs float64 `json:"dirty_expire_centisecs"` + DirtyWritebackCentisecs float64 `json:"dirty_writeback_centisecs"` + Swappiness float64 `json:"swappiness"` +} +type BpgDBParam struct { + SharedBuffers string `json:"shared_buffers"` + MaintenanceWorkMem string `json:"maintenance_work_mem"` + WorkMem string `json:"work_mem"` + EffectiveCacheSize string `json:"effective_cache_size"` + MaxWorkerProcesses string `json:"max_worker_processes"` + MaxParallelWorkersPerGather string `json:"max_parallel_workers_per_gather"` +} +type BpgConfigs struct { + Storage *Storage `json:"storage"` + VMProperties *VMProperties `json:"vm_properties"` + BpgDBParam *BpgDBParam `json:"bpg_db_param"` +} +type InfoBpgConfig struct { + BpgConfigs *BpgConfigs `json:"bpg_configs"` +} +type Info struct { + Secureinfo interface{} `json:"secureInfo"` + Info *InfoBpgConfig `json:"info"` +} +type DBInstanceMetadata struct { + Logcatchupforrestoredispatched bool `json:"logCatchUpForRestoreDispatched,omitempty"` + BaseSizeComputed bool `json:"baseSizeComputed,omitempty"` + PitrBased bool `json:"pitrBased,omitempty"` + DeregisteredWithDeleteTimeMachine bool `json:"deregisteredWithDeleteTimeMachine,omitempty"` + Lastrefreshtimestamp *string `json:"lastRefreshTimestamp,omitempty"` + Lastrequestedrefreshtimestamp *string `json:"lastRequestedRefreshTimestamp,omitempty"` + Statebeforerefresh *string `json:"stateBeforeRefresh,omitempty"` + Statebeforerestore *string `json:"stateBeforeRestore,omitempty"` + Statebeforescaling *string `json:"stateBeforeScaling,omitempty"` + Lastlogcatchupforrestoreoperationid *string `json:"lastLogCatchUpForRestoreOperationId,omitempty"` + ProvisionOperationID *string `json:"provisionOperationId,omitempty"` + SourceSnapshotID *string `json:"sourceSnapshotId,omitempty"` + Tmactivateoperationid *string `json:"tmActivateOperationId,omitempty"` + Createddbservers []*string `json:"createdDbservers,omitempty"` + Secureinfo *Secureinfo `json:"secureInfo,omitempty"` + Info *Info `json:"info,omitempty"` + Deregisterinfo *DeregisterInfo `json:"deregisterInfo,omitempty"` + Registereddbservers interface{} `json:"registeredDbservers,omitempty"` + CapabilityResetTime interface{} `json:"capabilityResetTime,omitempty"` + Originaldatabasename interface{} `json:"originalDatabaseName,omitempty"` + RefreshBlockerInfo interface{} `json:"refreshBlockerInfo,omitempty"` +} + +type DbserverMetadata struct { + Physicaleradrive bool `json:"physicalEraDrive"` + Clustered bool `json:"clustered"` + Singleinstance bool `json:"singleInstance"` + Eradriveinitialised bool `json:"eraDriveInitialised"` + Markedfordeletion bool `json:"markedForDeletion"` + Softwaresnaphotinterval int `json:"softwareSnaphotInterval"` + Databasetype *string `json:"databaseType"` + Provisionoperationid *string `json:"provisionOperationId"` + Associatedtimemachines []*string `json:"associatedTimeMachines"` + Secureinfo *Secureinfo `json:"secureInfo"` + Info *Info `json:"info"` + Deregisterinfo *DeregisterInfo `json:"deregisterInfo"` + // Protectiondomainmigrationstatus interface{} `json:"protectionDomainMigrationStatus"` + // Lastclocksyncalerttime interface{} `json:"lastClockSyncAlertTime"` +} + +type VMInfo struct { + OsType *string `json:"osType,omitempty"` + OsVersion *string `json:"osVersion,omitempty"` + Distribution *string `json:"distribution,omitempty"` +} + +type MetricVMInfo struct { + NumVCPUs *int `json:"numVCPUs,omitempty"` + NumCoresPerVCPU *int `json:"numCoresPerVCPU,omitempty"` + HypervisorCPUUsagePpm []*int `json:"hypervisorCpuUsagePpm,omitempty"` + LastUpdatedTimeInUTC *string `json:"lastUpdatedTimeInUTC,omitempty"` +} + +type MetricMemoryInfo struct { + LastUpdatedTimeInUTC *string `json:"lastUpdatedTimeInUTC,omitempty"` + Memory *int `json:"memory,omitempty"` + MemoryUsagePpm []*int `json:"memoryUsagePpm,omitempty"` + Unit *string `json:"unit,omitempty"` +} + +type MetricStorageInfo struct { + LastUpdatedTimeInUTC *string `json:"lastUpdatedTimeInUTC,omitempty"` + ControllerNumIops []*int `json:"controllerNumIops,omitempty"` + ControllerAvgIoLatencyUsecs []*int `json:"controllerAvgIoLatencyUsecs,omitempty"` + Size *int `json:"size,omitempty"` + AllocatedSize *int `json:"allocatedSize,omitempty"` + UsedSize *int `json:"usedSize,omitempty"` + Unit *string `json:"unit,omitempty"` +} + +type Metric struct { + LastUpdatedTimeInUTC *string `json:"lastUpdatedTimeInUTC,omitempty"` + Compute *MetricVMInfo `json:"compute,omitempty"` + Memory *MetricMemoryInfo `json:"memory,omitempty"` + Storage *MetricStorageInfo `json:"storage,omitempty"` +} + +type Dbserver struct { + Placeholder bool `json:"placeholder,omitempty"` + Eracreated bool `json:"eraCreated,omitempty"` + Internal bool `json:"internal,omitempty"` + IsServerDriven bool `json:"is_server_driven,omitempty"` + Clustered bool `json:"clustered,omitempty"` + Dbserverinvalideastate bool `json:"dbserverInValidEaState,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Ownerid *string `json:"ownerId,omitempty"` + Datecreated *string `json:"dateCreated,omitempty"` + Datemodified *string `json:"dateModified,omitempty"` + Vmclustername *string `json:"vmClusterName,omitempty"` + Vmclusteruuid *string `json:"vmClusterUuid,omitempty"` + Type *string `json:"type,omitempty"` + Status *string `json:"status,omitempty"` + Clientid *string `json:"clientId,omitempty"` + Nxclusterid *string `json:"nxClusterId,omitempty"` + Eradriveid *string `json:"eraDriveId,omitempty"` + Eraversion *string `json:"eraVersion,omitempty"` + Vmtimezone *string `json:"vmTimeZone,omitempty"` + Requestedversion *string `json:"requestedVersion,omitempty"` + AssociatedTimeMachineID *string `json:"associated_time_machine_id,omitempty"` + Accesskey *string `json:"accessKey,omitempty"` + Protectiondomainid *string `json:"protectionDomainId,omitempty"` + Databasetype *string `json:"databaseType,omitempty"` + Accesskeyid *string `json:"accessKeyId,omitempty"` + Associatedtimemachineids []*string `json:"associatedTimeMachineIds,omitempty"` + Workingdirectory *string `json:"workingDirectory,omitempty"` + Ipaddresses []*string `json:"ipAddresses,omitempty"` + Macaddresses []*string `json:"macAddresses,omitempty"` + Vminfo *VMInfo `json:"vmInfo,omitempty"` + Info *Info `json:"info,omitempty"` + Metadata *DbserverMetadata `json:"metadata,omitempty"` + Metric *Metric `json:"metric,omitempty"` + Lcmconfig *LcmConfig `json:"lcmConfig,omitempty"` + TimeMachineInfo []*Properties `json:"time_machine_info"` + Properties []*Properties `json:"properties,omitempty"` + Eradrive interface{} `json:"eraDrive,omitempty"` + Databases interface{} `json:"databases,omitempty"` + Clones interface{} `json:"clones,omitempty"` + Softwareinstallations interface{} `json:"softwareInstallations,omitempty"` + Protectiondomain interface{} `json:"protectionDomain,omitempty"` + Dbserverclusterid interface{} `json:"dbserverClusterId,omitempty"` + Fqdns interface{} `json:"fqdns,omitempty"` + Tags []interface{} `json:"tags,omitempty"` +} + +type Tags struct { + TagID string `json:"tagId,omitempty"` + EntityID string `json:"entityId,omitempty"` + EntityType interface{} `json:"entityType,omitempty"` + Value string `json:"value,omitempty"` + TagName string `json:"tagName,omitempty"` +} + +type Protectiondomain struct { + ID string `json:"id"` + Name string `json:"name"` + Eracreated bool `json:"eraCreated"` + Description string `json:"description"` + Type string `json:"type"` + Cloudid string `json:"cloudId"` + Datecreated string `json:"dateCreated"` + Datemodified string `json:"dateModified"` + Ownerid string `json:"ownerId"` + Status string `json:"status"` + PrimaryHost string `json:"primaryHost,omitempty"` + Properties []*DBInstanceProperties `json:"properties"` + Tags []*Tags `json:"tags,omitempty"` + AssocEntities []string `json:"assocEntities,omitempty"` +} +type Databasenodes struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Ownerid string `json:"ownerId"` + Datecreated string `json:"dateCreated"` + Datemodified string `json:"dateModified"` + AccessLevel interface{} `json:"accessLevel,omitempty"` + Properties []interface{} `json:"properties"` + Tags []*Tags `json:"tags"` + Databaseid string `json:"databaseId"` + Status string `json:"status"` + Databasestatus string `json:"databaseStatus"` + Primary bool `json:"primary"` + Dbserverid string `json:"dbserverId"` + Softwareinstallationid string `json:"softwareInstallationId"` + Protectiondomainid string `json:"protectionDomainId"` + Info Info `json:"info"` + Metadata interface{} `json:"metadata"` + Protectiondomain *Protectiondomain `json:"protectionDomain"` + // Valideastate bool `json:"validEaState"` +} + +type Linkeddatabases struct { + ID string `json:"id"` + Name string `json:"name"` + DatabaseName string `json:"databaseName,omitempty"` + Description string `json:"description"` + Status string `json:"status"` + Databasestatus string `json:"databaseStatus"` + ParentDatabaseID string `json:"parentDatabaseId"` + ParentLinkedDatabaseID string `json:"parentLinkedDatabaseId"` + Ownerid string `json:"ownerId"` + Datecreated string `json:"dateCreated"` + Datemodified string `json:"dateModified"` + TimeZone string `json:"timeZone"` + Info Info `json:"info"` + Metadata interface{} `json:"metadata"` + Metric interface{} `json:"metric"` + SnapshotID string `json:"snapshotId"` +} + +type TimeMachine struct { + SLAUpdateInProgress bool `json:"slaUpdateInProgress,omitempty"` + Clustered bool `json:"clustered,omitempty"` + Clone bool `json:"clone,omitempty"` + Internal bool `json:"internal,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + DatabaseID *string `json:"databaseId,omitempty"` + Type *string `json:"type,omitempty"` + Category *string `json:"category,omitempty"` + Status *string `json:"status,omitempty"` + EaStatus *string `json:"eaStatus,omitempty"` + Scope *string `json:"scope,omitempty"` + SLAID *string `json:"slaId,omitempty"` + ScheduleID *string `json:"scheduleId,omitempty"` + SourceNxClusters []*string `json:"sourceNxClusters,omitempty"` + Properties []*DBInstanceProperties `json:"properties,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Info *Info `json:"info,omitempty"` + Metadata *TimeMachineMetadata `json:"metadata,omitempty"` + SLA *ListSLAResponse `json:"sla,omitempty"` + Schedule *Schedule `json:"schedule,omitempty"` + Database *DatabaseInstance `json:"database,omitempty"` + Clones interface{} `json:"clones,omitempty"` + AccessLevel interface{} `json:"accessLevel,omitempty"` + Metric interface{} `json:"metric,omitempty"` + //AssociatedClusters interface{} `json:"associatedClusters,omitempty"` + // SLAUpdateMetadata interface{} `json:"slaUpdateMetadata,omitempty"` +} + +type DeregisterInfo struct { + Message *string `json:"message,omitempty"` + Operations []*string `json:"operations,omitempty"` +} + +type TimeMachineMetadata struct { + LastHealSystemTriggered bool `json:"lastHealSystemTriggered,omitempty"` + AutoHeal bool `json:"autoHeal,omitempty"` + DispatchOnboardingSnapshot bool `json:"dispatchOnboardingSnapshot,omitempty"` + LastLogCatchupSkipped bool `json:"lastLogCatchupSkipped,omitempty"` + FirstSnapshotCaptured bool `json:"firstSnapshotCaptured,omitempty"` + FirstSnapshotDispatched bool `json:"firstSnapshotDispatched,omitempty"` + StorageLimitExhausted bool `json:"storageLimitExhausted,omitempty"` + AbsoluteThresholdExhausted bool `json:"absoluteThresholdExhausted,omitempty"` + SnapshotCapturedForTheDay bool `json:"snapshotCapturedForTheDay,omitempty"` + LastPauseByForce bool `json:"lastPauseByForce,omitempty"` + AutoHealRetryCount *int `json:"autoHealRetryCount,omitempty"` + AutoHealSnapshotCount *int `json:"autoHealSnapshotCount,omitempty"` + AutoHealLogCatchupCount *int `json:"autoHealLogCatchupCount,omitempty"` + SnapshotSuccessiveFailureCount *int `json:"snapshotSuccessiveFailureCount,omitempty"` + FirstSnapshotRetryCount *int `json:"firstSnapshotRetryCount,omitempty"` + LogCatchupSuccessiveFailureCount *int `json:"logCatchupSuccessiveFailureCount,omitempty"` + ImplicitResumeCount *int `json:"implicitResumeCount,omitempty"` + RequiredSpace *float64 `json:"requiredSpace,omitempty"` + CapabilityResetTime *string `json:"capabilityResetTime,omitempty"` + LastSnapshotTime *string `json:"lastSnapshotTime,omitempty"` + LastAutoSnapshotTime *string `json:"lastAutoSnapshotTime,omitempty"` + LastSnapshotOperationID *string `json:"lastSnapshotOperationId,omitempty"` + LastAutoSnapshotOperationID *string `json:"lastAutoSnapshotOperationId,omitempty"` + LastSuccessfulSnapshotOperationID *string `json:"lastSuccessfulSnapshotOperationId,omitempty"` + LastHealSnapshotOperation *string `json:"lastHealSnapshotOperation,omitempty"` + LastNonExtraAutoSnapshotTime *string `json:"lastNonExtraAutoSnapshotTime,omitempty"` + LastLogCatchupTime *string `json:"lastLogCatchupTime,omitempty"` + LastSuccessfulLogCatchupOperationID *string `json:"lastSuccessfulLogCatchupOperationId,omitempty"` + LastLogCatchupOperationID *string `json:"lastLogCatchupOperationId,omitempty"` + LastPauseTime *string `json:"lastPauseTime,omitempty"` + LastResumeTime *string `json:"lastResumeTime,omitempty"` + LastPauseReason *string `json:"lastPauseReason,omitempty"` + StateBeforeRestore *string `json:"stateBeforeRestore,omitempty"` + LastHealthAlertedTime *string `json:"lastHealthAlertedTime,omitempty"` + LastImplicitResumeTime *string `json:"lastImplicitResumeTime,omitempty"` + LastEaBreakdownTime *string `json:"lastEaBreakdownTime,omitempty"` + LastHealTime *string `json:"lastHealTime,omitempty"` + AuthorizedDbservers []string `json:"authorizedDbservers,omitempty"` + DeregisterInfo *DeregisterInfo `json:"deregisterInfo,omitempty"` + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + DatabasesFirstSnapshotInfo interface{} `json:"databasesFirstSnapshotInfo,omitempty"` + OnboardingSnapshotProperties interface{} `json:"onboardingSnapshotProperties,omitempty"` + LastSuccessfulLogCatchupPostHealWithResetCapability interface{} `json:"lastSuccessfulLogCatchupPostHealWithResetCapability,omitempty"` + AutoSnapshotRetryInfo interface{} `json:"autoSnapshotRetryInfo,omitempty"` +} diff --git a/examples/ndb/database_instance/main.tf b/examples/ndb/database_instance/main.tf new file mode 100644 index 000000000..ec09f1da0 --- /dev/null +++ b/examples/ndb/database_instance/main.tf @@ -0,0 +1,100 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0-beta.1" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## provision PostgreSQL database with single instance + +resource "nutanix_ndb_database" "dbp" { + + // name of database type + databasetype = "postgres_database" + + // required name of db instance + name = "test-inst" + description = "add description" + + // adding the profiles details + softwareprofileid = "{{ software_profile_id }}" + softwareprofileversionid = "{{ software_profile_version_id }}" + computeprofileid = "{{ compute_profile_id }}" + networkprofileid = "{{ network_profile_id }}" + dbparameterprofileid = "{{ db_parameter_profile_id }}" + + // postgreSQL Info + postgresql_info{ + listener_port = "{{ listner_port }}" + + database_size= "{{ 200 }}" + + db_password = "password" + + database_names= "testdb1" + } + + // era cluster id + nxclusterid= local.clusters.EraCluster.id + + // ssh-key + sshpublickey= "{{ ssh-public-key }}" + + // node for single instance + nodes{ + // name of dbserver vm + vmname= "test-era-vm1" + + // network profile id + networkprofileid= local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + } + + // time machine info + timemachineinfo { + name= "test-pg-inst" + description="description of time machine" + slaid= "{{ sla_id }}" + + // schedule info fields are optional. + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } +} diff --git a/examples/ndb/database_instance/terraform.tfvars b/examples/ndb/database_instance/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/database_instance/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/database_instance/variables.tf b/examples/ndb/database_instance/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/database_instance/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/go.mod b/go.mod index e5b9ab0d0..ca112ffe8 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/client9/misspell v0.3.4 github.com/golang/snappy v0.0.1 // indirect github.com/golangci/golangci-lint v1.25.0 + github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/terraform-plugin-go v0.5.0 // indirect github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba // indirect diff --git a/nutanix/common_era_schema.go b/nutanix/common_era_schema.go new file mode 100644 index 000000000..949b5f3d5 --- /dev/null +++ b/nutanix/common_era_schema.go @@ -0,0 +1,419 @@ +package nutanix + +import ( + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func timeMachineInfoSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + MaxItems: 1, + ForceNew: true, + Optional: true, + Description: "sample description for time machine info", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "description of time machine's name", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: "description of time machine's", + }, + "slaid": { + Type: schema.TypeString, + Required: true, + Description: "description of SLA ID.", + }, + "autotunelogdrive": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "description of autoTuneLogDrive", + }, + "schedule": { + Type: schema.TypeSet, + MaxItems: 1, + Required: true, + Description: "description of schedule of time machine", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "snapshottimeofday": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "description of schedule of time machine", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Required: true, + }, + "minutes": { + Type: schema.TypeInt, + Required: true, + }, + "seconds": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "continuousschedule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "description of schedule of time machine", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "logbackupinterval": { + Type: schema.TypeInt, + Required: true, + }, + "snapshotsperday": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "weeklyschedule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "description of schedule of time machine", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "dayofweek": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "monthlyschedule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "description of schedule of time machine", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "dayofmonth": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "quartelyschedule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "description of schedule of time machine", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "startmonth": { + Type: schema.TypeString, + Required: true, + }, + "dayofmonth": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "yearlyschedule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "description of schedule of time machine", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "dayofmonth": { + Type: schema.TypeInt, + Required: true, + }, + "month": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Description: "description of schedule of time machine", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + } +} + +func buildTimeMachineSchedule(set *schema.Set) *era.Schedule { + d := set.List() + schedMap := d[0].(map[string]interface{}) + sch := &era.Schedule{} + + if cs, ok := schedMap["snapshottimeofday"]; ok && len(cs.([]interface{})) > 0 { + conSch := &era.Snapshottimeofday{} + + icmps := (cs.([]interface{}))[0].(map[string]interface{}) + if hours, cok := icmps["hours"]; cok { + conSch.Hours = hours.(int) + } + + if mins, tok := icmps["minutes"]; tok { + conSch.Minutes = mins.(int) + } + if secs, tok := icmps["seconds"]; tok { + conSch.Seconds = secs.(int) + } + + sch.Snapshottimeofday = conSch + } + + if cs, ok := schedMap["continuousschedule"]; ok && len(cs.([]interface{})) > 0 { + conSch := &era.Continuousschedule{} + + icmps := (cs.([]interface{}))[0].(map[string]interface{}) + if enabled, cok := icmps["enabled"]; cok { + conSch.Enabled = enabled.(bool) + } + + if mins, tok := icmps["logbackupinterval"]; tok { + conSch.Logbackupinterval = mins.(int) + } + if secs, tok := icmps["snapshotsperday"]; tok { + conSch.Snapshotsperday = secs.(int) + } + + sch.Continuousschedule = conSch + } + + if cs, ok := schedMap["weeklyschedule"]; ok && len(cs.([]interface{})) > 0 { + conSch := &era.Weeklyschedule{} + + icmps := (cs.([]interface{}))[0].(map[string]interface{}) + if hours, cok := icmps["enabled"]; cok { + conSch.Enabled = hours.(bool) + } + + if mins, tok := icmps["dayofweek"]; tok { + conSch.Dayofweek = mins.(string) + } + + sch.Weeklyschedule = conSch + } + + if cs, ok := schedMap["monthlyschedule"]; ok && len(cs.([]interface{})) > 0 { + conSch := &era.Monthlyschedule{} + + icmps := (cs.([]interface{}))[0].(map[string]interface{}) + if hours, cok := icmps["enabled"]; cok { + conSch.Enabled = hours.(bool) + } + + if mins, tok := icmps["dayofmonth"]; tok { + conSch.Dayofmonth = mins.(int) + } + + sch.Monthlyschedule = conSch + } + + if cs, ok := schedMap["quartelyschedule"]; ok && len(cs.([]interface{})) > 0 { + conSch := &era.Quartelyschedule{} + + icmps := (cs.([]interface{}))[0].(map[string]interface{}) + if hours, cok := icmps["enabled"]; cok { + conSch.Enabled = hours.(bool) + } + + if mins, tok := icmps["dayofmonth"]; tok { + conSch.Dayofmonth = mins.(int) + } + if secs, tok := icmps["startmonth"]; tok { + conSch.Startmonth = secs.(string) + } + + sch.Quartelyschedule = conSch + } + + if cs, ok := schedMap["yearlyschedule"]; ok && len(cs.([]interface{})) > 0 { + conSch := &era.Yearlyschedule{} + + icmps := (cs.([]interface{}))[0].(map[string]interface{}) + if hours, cok := icmps["enabled"]; cok { + conSch.Enabled = hours.(bool) + } + + if mins, tok := icmps["dayofmonth"]; tok { + conSch.Dayofmonth = mins.(int) + } + if secs, tok := icmps["month"]; tok { + conSch.Month = secs.(string) + } + + sch.Yearlyschedule = conSch + } + + return sch +} + +func buildTimeMachineFromResourceData(set *schema.Set) *era.Timemachineinfo { + d := set.List() + tMap := d[0].(map[string]interface{}) + return &era.Timemachineinfo{ + Name: tMap["name"].(string), + Description: tMap["description"].(string), + Slaid: tMap["slaid"].(string), + Schedule: *buildTimeMachineSchedule(tMap["schedule"].(*schema.Set)), // NULL Pointer check + Tags: tMap["tags"].(*schema.Set).List(), + Autotunelogdrive: tMap["autotunelogdrive"].(bool), + } +} + +func nodesSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Computed: true, + Description: "Description of nodes", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "properties": { + Type: schema.TypeSet, + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "vmname": { + Type: schema.TypeString, + Required: true, + ConfigMode: schema.SchemaConfigModeAttr, + }, + "networkprofileid": { + Type: schema.TypeString, + Required: true, + ConfigMode: schema.SchemaConfigModeAttr, + }, + "dbserverid": { // When createDbServer is false, we can use this field to set the target db server. + Type: schema.TypeString, + Description: "", + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Default: "", + }, + }, + }, + } +} + +func buildNodesFromResourceData(d *schema.Set) []*era.Nodes { + argSet := d.List() + args := []*era.Nodes{} + + for _, arg := range argSet { + args = append(args, &era.Nodes{ + Properties: arg.(map[string]interface{})["properties"].(*schema.Set).List(), + Vmname: arg.(map[string]interface{})["vmname"].(string), + Networkprofileid: arg.(map[string]interface{})["networkprofileid"].(string), + DatabaseServerID: arg.(map[string]interface{})["dbserverid"].(string), + }) + } + return args +} + +func actionArgumentsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Description: "description of action arguments", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Description: "", + Required: true, + }, + "value": { + Type: schema.TypeString, + Description: "", + Required: true, + }, + }, + }, + } +} + +func tryToConvertBool(v interface{}) (bool, bool) { + str := v.(string) + b, err := strconv.ParseBool(str) + if err != nil { + return false, false + } + return b, true +} + +func buildActionArgumentsFromResourceData(d *schema.Set, args []*era.Actionarguments) []*era.Actionarguments { + argSet := d.List() + for _, arg := range argSet { + var val interface{} + val = arg.(map[string]interface{})["value"] + b, ok := tryToConvertBool(arg.(map[string]interface{})["value"]) + if ok { + val = b + } + + args = append(args, &era.Actionarguments{ + Name: arg.(map[string]interface{})["name"].(string), + Value: val, + }) + } + return args +} diff --git a/nutanix/common_schema_validation.go b/nutanix/common_schema_validation.go new file mode 100644 index 000000000..43efcd7a1 --- /dev/null +++ b/nutanix/common_schema_validation.go @@ -0,0 +1,28 @@ +package nutanix + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var requiredResourceFields map[string][]string = map[string][]string{ + "era_provision_database": {"databasetype", "softwareprofileid", "softwareprofileversionid", "computeprofileid", + "networkprofileid", "dbparameterprofileid", "nxclusterid", "sshpublickey", "timemachineinfo", "nodes"}, +} + +func schemaValidation(resourceName string, d *schema.ResourceData) error { + var diagMap []string + if vals, ok := requiredResourceFields[resourceName]; ok { + for _, attr := range vals { + if _, ok := d.GetOk(attr); !ok { + diagMap = append(diagMap, attr) + } + } + + if diagMap != nil { + return fmt.Errorf("missing required fields are %s for %s", diagMap, resourceName) + } + } + return nil +} diff --git a/nutanix/config.go b/nutanix/config.go index 8fd13fa26..902fee668 100644 --- a/nutanix/config.go +++ b/nutanix/config.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/terraform-providers/terraform-provider-nutanix/client" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" foundation_central "github.com/terraform-providers/terraform-provider-nutanix/client/fc" "github.com/terraform-providers/terraform-provider-nutanix/client/foundation" "github.com/terraform-providers/terraform-provider-nutanix/client/karbon" @@ -26,6 +27,9 @@ type Config struct { FoundationEndpoint string // Required field for connecting to foundation VM APIs FoundationPort string // Port for connecting to foundation VM APIs RequiredFields map[string][]string // RequiredFields is client name to its required fields mapping for validations and usage in every client + NdbEndpoint string + NdbUsername string + NdbPassword string } // Client ... @@ -41,6 +45,9 @@ func (c *Config) Client() (*Client, error) { ProxyURL: c.ProxyURL, FoundationEndpoint: c.FoundationEndpoint, FoundationPort: c.FoundationPort, + NdbEndpoint: c.NdbEndpoint, + NdbUsername: c.NdbUsername, + NdbPassword: c.NdbPassword, RequiredFields: c.RequiredFields, } @@ -60,12 +67,17 @@ func (c *Config) Client() (*Client, error) { if err != nil { return nil, err } + eraClient, err := era.NewEraClient(configCreds) + if err != nil { + return nil, err + } return &Client{ WaitTimeout: c.WaitTimeout, API: v3Client, KarbonAPI: karbonClient, FoundationClientAPI: foundationClient, FoundationCentral: fcClient, + Era: eraClient, }, nil } @@ -76,4 +88,5 @@ type Client struct { FoundationClientAPI *foundation.Client WaitTimeout int64 FoundationCentral *foundation_central.Client + Era *era.Client } diff --git a/nutanix/data_source_nutanix_ndb_cluster.go b/nutanix/data_source_nutanix_ndb_cluster.go new file mode 100644 index 000000000..4006b064b --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_cluster.go @@ -0,0 +1,272 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceNutanixEraCluster() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraClusterRead, + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"cluster_name"}, + }, + "cluster_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"cluster_id"}, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "fqdns": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "cloud_type": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + "hypervisor_type": { + Type: schema.TypeString, + Computed: true, + }, + "hypervisor_version": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ref_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + }, + "cloud_info": { + Type: schema.TypeString, + Computed: true, + }, + "resource_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_threshold_percentage": { + Type: schema.TypeFloat, + Computed: true, + }, + "memory_threshold_percentage": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "management_server_info": { + Type: schema.TypeString, + Computed: true, + }, + "entity_counts": { + Type: schema.TypeString, + Computed: true, + }, + "healthy": { + Type: schema.TypeBool, + Computed: true, + }, + }, + } +} + +func dataSourceNutanixEraClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + clusterID, iok := d.GetOk("cluster_id") + clusterName, nok := d.GetOk("cluster_name") + + if !iok && !nok { + return diag.Errorf("please provide one of cluster_id or cluster_name attributes") + } + + resp, err := conn.Service.GetCluster(ctx, clusterID.(string), clusterName.(string)) + + if err != nil { + diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("unique_name", resp.Uniquename); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("ip_addresses", resp.Ipaddresses); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("fqdns", resp.Fqdns); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_uuid", resp.Nxclusteruuid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + if err := d.Set("cloud_type", resp.Cloudtype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.Datecreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.Datemodified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.Ownerid); err != nil { + return diag.FromErr(err) + } + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("version", resp.Version); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("hypervisor_type", resp.Hypervisortype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("hypervisor_version", resp.Hypervisorversion); err != nil { + return diag.FromErr(err) + } + if err := d.Set("properties", flattenClusterProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("reference_count", resp.Referencecount); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("username", resp.Username); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("password", resp.Password); err != nil { + return diag.FromErr(err) + } + if err := d.Set("cloud_info", resp.Cloudinfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("resource_config", flattenResourceConfig(resp.Resourceconfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("management_server_info", resp.Managementserverinfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("entity_counts", resp.Entitycounts); err != nil { + return diag.FromErr(err) + } + if err := d.Set("healthy", resp.Healthy); err != nil { + return diag.FromErr(err) + } + + d.SetId(*resp.ID) + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_cluster_test.go b/nutanix/data_source_nutanix_ndb_cluster_test.go new file mode 100644 index 000000000..890bd7b15 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_cluster_test.go @@ -0,0 +1,68 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraClusterDataSource_basic(t *testing.T) { + // r := randIntBetween(31, 40) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraClusterDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "status", "UP"), + resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "cloud_type", "NTNX"), + resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "hypervisor_type", "AHV"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_cluster.test", "properties.#"), + resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "healthy", "true"), + ), + }, + }, + }) +} + +func TestAccEraClusterDataSource_ByName(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraClusterDataSourceConfigByName(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "status", "UP"), + resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "cloud_type", "NTNX"), + resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "hypervisor_type", "AHV"), + resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "properties.#", "0"), + resource.TestCheckResourceAttr("data.nutanix_ndb_cluster.test", "healthy", "true"), + ), + }, + }, + }) +} + +func testAccEraClusterDataSourceConfig() string { + return ` + data "nutanix_ndb_clusters" "test1" {} + + data "nutanix_ndb_cluster" "test" { + depends_on = [data.nutanix_ndb_clusters.test1] + cluster_id = data.nutanix_ndb_clusters.test1.clusters[0].id + } + ` +} + +func testAccEraClusterDataSourceConfigByName() string { + return ` + data "nutanix_ndb_clusters" "test1" {} + + data "nutanix_ndb_cluster" "test" { + depends_on = [data.nutanix_ndb_clusters.test1] + cluster_name = data.nutanix_ndb_clusters.test1.clusters[0].name + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_clusters.go b/nutanix/data_source_nutanix_ndb_clusters.go new file mode 100644 index 000000000..10dd92939 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_clusters.go @@ -0,0 +1,252 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + Era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func dataSourceNutanixEraClusters() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraClustersRead, + Schema: map[string]*schema.Schema{ + "clusters": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "fqdns": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "cloud_type": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + "hypervisor_type": { + Type: schema.TypeString, + Computed: true, + }, + "hypervisor_version": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ref_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + }, + "cloud_info": { + Type: schema.TypeString, + Computed: true, + }, + "resource_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_threshold_percentage": { + Type: schema.TypeFloat, + Computed: true, + }, + "memory_threshold_percentage": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "management_server_info": { + Type: schema.TypeString, + Computed: true, + }, + "entity_counts": { + Type: schema.TypeString, + Computed: true, + }, + "healthy": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixEraClustersRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ListClusters(ctx) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("clusters", flattenClustersResponse(resp)); err != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era clusters: %+v", err) + } + d.SetId(uuid) + return nil +} + +func flattenClustersResponse(crsp *Era.ClusterListResponse) []map[string]interface{} { + if crsp != nil { + lst := []map[string]interface{}{} + for _, v := range *crsp { + d := map[string]interface{}{} + d["id"] = v.ID + d["name"] = v.Name + d["unique_name"] = v.Uniquename + d["ip_addresses"] = utils.StringValueSlice(v.Ipaddresses) + d["fqdns"] = v.Fqdns + d["nx_cluster_uuid"] = v.Nxclusteruuid + d["description"] = v.Description + d["cloud_type"] = v.Cloudtype + d["date_created"] = v.Datecreated + d["date_modified"] = v.Datemodified + d["owner_id"] = v.Ownerid + d["status"] = v.Status + d["version"] = v.Version + d["hypervisor_type"] = v.Hypervisortype + d["hypervisor_version"] = v.Hypervisorversion + d["properties"] = flattenClusterProperties(v.Properties) + d["reference_count"] = v.Referencecount + d["username"] = v.Username + d["password"] = v.Password + d["cloud_info"] = v.Cloudinfo + d["resource_config"] = flattenResourceConfig(v.Resourceconfig) + d["management_server_info"] = v.Managementserverinfo + d["entity_counts"] = v.Entitycounts + d["healthy"] = v.Healthy + lst = append(lst, d) + } + return lst + } + return nil +} + +func flattenClusterProperties(erp []*Era.Properties) []map[string]interface{} { + if len(erp) > 0 { + res := make([]map[string]interface{}, len(erp)) + + for k, v := range erp { + ents := make(map[string]interface{}) + ents["name"] = v.Name + ents["value"] = v.Value + ents["secure"] = v.Secure + ents["ref_id"] = v.RefID + ents["description"] = v.Description + res[k] = ents + } + return res + } + return nil +} + +func flattenResourceConfig(rcfg *Era.Resourceconfig) []map[string]interface{} { + specList := make([]map[string]interface{}, 0) + + if rcfg != nil { + specs := make(map[string]interface{}) + + specs["memory_threshold_percentage"] = rcfg.Memorythresholdpercentage + specs["storage_threshold_percentage"] = rcfg.Storagethresholdpercentage + + specList = append(specList, specs) + return specList + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_clusters_test.go b/nutanix/data_source_nutanix_ndb_clusters_test.go new file mode 100644 index 000000000..293a41679 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_clusters_test.go @@ -0,0 +1,29 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraClustersDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraClustersDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_clusters.test", "clusters.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_clusters.test", "clusters.0.id"), + ), + }, + }, + }) +} + +func testAccEraClustersDataSourceConfig() string { + return ` + data "nutanix_ndb_clusters" "test" { } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_database.go b/nutanix/data_source_nutanix_ndb_database.go new file mode 100644 index 000000000..f63cf6cfd --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_database.go @@ -0,0 +1,2137 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + Era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func dataSourceNutanixEraDatabase() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraDatabaseRead, + Schema: map[string]*schema.Schema{ + "database_id": { + Type: schema.TypeString, + Required: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + "databases": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_group_state_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func dataSourceNutanixEraDatabaseRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + dUUID, ok := d.GetOk("database_id") + if !ok { + return diag.Errorf("please provide `database_id`") + } + + resp, err := conn.Service.GetDatabaseInstance(ctx, dUUID.(string)) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.Ownerid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.Datecreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.Datemodified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties", flattenDBInstanceProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clustered", resp.Clustered); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("internal", resp.Internal); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("placeholder", resp.Placeholder); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_name", resp.Databasename); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_cluster_type", resp.Databaseclustertype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_status", resp.Databasestatus); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster_id", resp.Dbserverlogicalclusterid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.Timemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_time_machine_id", resp.Parenttimemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_zone", resp.Timezone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("info", flattenDBInfo(resp.Info)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("group_info", resp.GroupInfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metric", resp.Metric); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("category", resp.Category); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_source_database_id", resp.ParentSourceDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.Lcmconfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine", flattenDBTimeMachine(resp.TimeMachine)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_nodes", flattenDBNodes(resp.Databasenodes)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("linked_databases", flattenDBLinkedDbs(resp.Linkeddatabases)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("databases", resp.Databases); err != nil { + return diag.FromErr(err) + } + if err := d.Set("database_group_state_info", resp.DatabaseGroupStateInfo); err != nil { + return diag.FromErr(err) + } + + d.SetId(resp.ID) + return nil +} + +func flattenDBInstanceProperties(pr []*Era.DBInstanceProperties) []map[string]interface{} { + if len(pr) > 0 { + res := []map[string]interface{}{} + for _, v := range pr { + prop := map[string]interface{}{} + + prop["description"] = v.Description + prop["name"] = v.Name + prop["ref_id"] = v.RefID + prop["secure"] = v.Secure + prop["value"] = v.Value + + res = append(res, prop) + } + return res + } + return nil +} + +func flattenDBInstanceMetadata(pr *Era.DBInstanceMetadata) []map[string]interface{} { + if pr != nil { + pdbmeta := make([]map[string]interface{}, 0) + + pmeta := make(map[string]interface{}) + pmeta["secure_info"] = pr.Secureinfo + pmeta["info"] = pr.Info + pmeta["deregister_info"] = pr.Deregisterinfo + pmeta["tm_activate_operation_id"] = pr.Tmactivateoperationid + pmeta["created_dbservers"] = pr.Createddbservers + pmeta["registered_dbservers"] = pr.Registereddbservers + pmeta["last_refresh_timestamp"] = pr.Lastrefreshtimestamp + pmeta["last_requested_refresh_timestamp"] = pr.Lastrequestedrefreshtimestamp + pmeta["capability_reset_time"] = pr.CapabilityResetTime + pmeta["state_before_refresh"] = pr.Statebeforerefresh + pmeta["state_before_restore"] = pr.Statebeforerestore + pmeta["state_before_scaling"] = pr.Statebeforescaling + pmeta["log_catchup_for_restore_dispatched"] = pr.Logcatchupforrestoredispatched + pmeta["last_log_catchup_for_restore_operation_id"] = pr.Lastlogcatchupforrestoreoperationid + pmeta["base_size_computed"] = pr.BaseSizeComputed + pmeta["original_database_name"] = pr.Originaldatabasename + pmeta["provision_operation_id"] = pr.ProvisionOperationID + pmeta["source_snapshot_id"] = pr.SourceSnapshotID + pmeta["pitr_based"] = pr.PitrBased + pmeta["refresh_blocker_info"] = pr.RefreshBlockerInfo + pmeta["deregistered_with_delete_time_machine"] = pr.DeregisteredWithDeleteTimeMachine + + pdbmeta = append(pdbmeta, pmeta) + return pdbmeta + } + return nil +} + +func flattenDBNodes(pr []Era.Databasenodes) []map[string]interface{} { + if len(pr) > 0 { + res := make([]map[string]interface{}, len(pr)) + + for k, v := range pr { + db := map[string]interface{}{} + + db["access_level"] = v.AccessLevel + db["database_id"] = v.Databaseid + db["database_status"] = v.Databasestatus + db["date_created"] = v.Datecreated + db["date_modified"] = v.Datemodified + db["dbserver_id"] = v.Dbserverid + db["description"] = v.Description + db["id"] = v.ID + db["metadata"] = v.Metadata + db["name"] = v.Name + db["owner_id"] = v.Ownerid + db["primary"] = v.Primary + db["properties"] = v.Properties + db["protection_domain"] = flattenDBProtectionDomain(v.Protectiondomain) + db["protection_domain_id"] = v.Protectiondomainid + db["software_installation_id"] = v.Softwareinstallationid + db["status"] = v.Status + db["tags"] = flattenDBTags(v.Tags) + + res[k] = db + } + return res + } + return nil +} + +func flattenDBLinkedDbs(pr []Era.Linkeddatabases) []map[string]interface{} { + if len(pr) > 0 { + res := make([]map[string]interface{}, len(pr)) + + for k, v := range pr { + ld := map[string]interface{}{} + + ld["database_name"] = v.DatabaseName + ld["database_status"] = v.Databasestatus + ld["date_created"] = v.Datecreated + ld["date_modified"] = v.Datemodified + ld["description"] = v.Description + ld["id"] = v.ID + ld["metadata"] = v.Metadata + ld["metric"] = v.Metric + ld["name"] = v.Name + ld["owner_id"] = v.Ownerid + ld["parent_database_id"] = v.ParentDatabaseID + ld["parent_linked_database_id"] = v.ParentLinkedDatabaseID + ld["snapshot_id"] = v.SnapshotID + ld["status"] = v.Status + ld["timezone"] = v.TimeZone + + res[k] = ld + } + return res + } + return nil +} + +func flattenDBProtectionDomain(pr *Era.Protectiondomain) []map[string]interface{} { + pDList := make([]map[string]interface{}, 0) + if pr != nil { + pmeta := make(map[string]interface{}) + + pmeta["cloud_id"] = pr.Cloudid + pmeta["date_created"] = pr.Datecreated + pmeta["date_modified"] = pr.Datemodified + pmeta["description"] = pr.Description + pmeta["era_created"] = pr.Eracreated + pmeta["id"] = pr.ID + pmeta["name"] = pr.Name + pmeta["owner_id"] = pr.Ownerid + pmeta["primary_host"] = pr.PrimaryHost + pmeta["properties"] = flattenDBInstanceProperties(pr.Properties) + pmeta["status"] = pr.Status + if pr.Tags != nil { + pmeta["tags"] = flattenDBTags(pr.Tags) + } + pmeta["type"] = pr.Type + + pDList = append(pDList, pmeta) + return pDList + } + return nil +} + +func flattenDBTags(pr []*Era.Tags) []map[string]interface{} { + if len(pr) > 0 { + res := make([]map[string]interface{}, len(pr)) + + for k, v := range pr { + tag := map[string]interface{}{} + + tag["entity_id"] = v.EntityID + tag["entity_type"] = v.EntityType + tag["tag_id"] = v.TagID + tag["tag_name"] = v.TagName + tag["value"] = v.Value + + res[k] = tag + } + return res + } + return nil +} + +func flattenDBInfo(pr *Era.Info) []map[string]interface{} { + infoList := make([]map[string]interface{}, 0) + if pr != nil { + info := make(map[string]interface{}) + + if pr.Secureinfo != nil { + info["secure_info"] = pr.Secureinfo + } + if pr.Info != nil { + info["bpg_configs"] = flattenBpgConfig(pr.Info.BpgConfigs) + } + infoList = append(infoList, info) + return infoList + } + return nil +} + +func flattenBpgConfig(pr *Era.BpgConfigs) []map[string]interface{} { + bpgList := make([]map[string]interface{}, 0) + if pr != nil { + bpg := make(map[string]interface{}) + + var bgdbParams []map[string]interface{} + if pr.BpgDBParam != nil { + bg := make(map[string]interface{}) + bg["maintenance_work_mem"] = utils.StringValue(&pr.BpgDBParam.MaintenanceWorkMem) + bg["effective_cache_size"] = utils.StringValue(&pr.BpgDBParam.EffectiveCacheSize) + bg["max_parallel_workers_per_gather"] = utils.StringValue(&pr.BpgDBParam.MaxParallelWorkersPerGather) + bg["max_worker_processes"] = utils.StringValue(&pr.BpgDBParam.MaxWorkerProcesses) + bg["shared_buffers"] = utils.StringValue(&pr.BpgDBParam.SharedBuffers) + bg["work_mem"] = utils.StringValue(&pr.BpgDBParam.WorkMem) + bgdbParams = append(bgdbParams, bg) + } + bpg["bpg_db_param"] = bgdbParams + + var storg []map[string]interface{} + if pr.Storage != nil { + str := make(map[string]interface{}) + + var storgArch []map[string]interface{} + if pr.Storage.ArchiveStorage != nil { + arc := make(map[string]interface{}) + + arc["size"] = pr.Storage.ArchiveStorage.Size + storgArch = append(storgArch, arc) + } + str["archive_storage"] = storgArch + + var stdisk []map[string]interface{} + if pr.Storage.DataDisks != nil { + arc := make(map[string]interface{}) + + arc["count"] = pr.Storage.DataDisks.Count + stdisk = append(stdisk, arc) + } + str["data_disks"] = stdisk + + var stgLog []map[string]interface{} + if pr.Storage.LogDisks != nil { + arc := make(map[string]interface{}) + + arc["size"] = pr.Storage.LogDisks.Size + arc["count"] = pr.Storage.LogDisks.Count + stgLog = append(stgLog, arc) + } + str["log_disks"] = stgLog + + storg = append(storg, str) + } + bpg["storage"] = storg + + var vmProp []map[string]interface{} + if pr.VMProperties != nil { + vmp := make(map[string]interface{}) + vmp["dirty_background_ratio"] = pr.VMProperties.DirtyBackgroundRatio + vmp["dirty_expire_centisecs"] = pr.VMProperties.DirtyExpireCentisecs + vmp["dirty_ratio"] = pr.VMProperties.DirtyRatio + vmp["dirty_writeback_centisecs"] = pr.VMProperties.DirtyWritebackCentisecs + vmp["nr_hugepages"] = pr.VMProperties.NrHugepages + vmp["overcommit_memory"] = pr.VMProperties.OvercommitMemory + vmp["swappiness"] = pr.VMProperties.Swappiness + + vmProp = append(vmProp, vmp) + } + + bpg["vm_properties"] = vmProp + + bpgList = append(bpgList, bpg) + return bpgList + } + return nil +} + +func flattenDBLcmConfig(pr *Era.LcmConfig) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + lcm := map[string]interface{}{} + + lcm["expiryDetails"] = flattenEraExpiryDetails(pr.ExpiryDetails) + lcm["refresh_details"] = flattenEraRefreshDetails(pr.RefreshDetails) + + var preLcmComm []map[string]interface{} + if pr.PreDeleteCommand != nil { + pre := map[string]interface{}{} + + pre["command"] = pr.PreDeleteCommand.Command + + preLcmComm = append(preLcmComm, pre) + } + lcm["pre_delete_command"] = preLcmComm + + var postLcmComm []map[string]interface{} + if pr.PreDeleteCommand != nil { + pre := map[string]interface{}{} + + pre["command"] = pr.PostDeleteCommand.Command + + postLcmComm = append(postLcmComm, pre) + } + lcm["post_delete_command"] = postLcmComm + + res = append(res, lcm) + return res + } + return nil +} + +func flattenEraExpiryDetails(pr *Era.DBExpiryDetails) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + expiry := map[string]interface{}{} + + expiry["delete_database"] = pr.DeleteDatabase + expiry["delete_time_machine"] = pr.DeleteTimeMachine + expiry["delete_vm"] = pr.DeleteVM + expiry["effective_timestamp"] = pr.EffectiveTimestamp + expiry["expire_in_days"] = pr.ExpireInDays + expiry["expiry_date_timezone"] = pr.ExpiryDateTimezone + expiry["expiry_timestamp"] = pr.ExpiryTimestamp + expiry["remind_before_in_days"] = pr.RemindBeforeInDays + expiry["user_created"] = pr.UserCreated + + res = append(res, expiry) + return res + } + return nil +} + +func flattenEraRefreshDetails(pr *Era.DBRefreshDetails) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + refresh := map[string]interface{}{} + + refresh["last_refresh_date"] = pr.LastRefreshDate + refresh["next_refresh_date"] = pr.NextRefreshDate + refresh["refresh_date_timezone"] = pr.RefreshDateTimezone + refresh["refresh_in_days"] = pr.RefreshInDays + refresh["refresh_in_hours"] = pr.RefreshInHours + refresh["refresh_in_months"] = pr.RefreshInMonths + refresh["refresh_time"] = pr.RefreshTime + + res = append(res, refresh) + return res + } + return nil +} + +func flattenDBTimeMachine(pr *Era.TimeMachine) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + tmac := map[string]interface{}{} + + tmac["id"] = pr.ID + tmac["name"] = pr.Name + tmac["description"] = pr.Description + tmac["owner_id"] = pr.OwnerID + tmac["date_created"] = pr.DateCreated + tmac["date_modified"] = pr.DateModified + tmac["access_level"] = pr.AccessLevel + tmac["properties"] = flattenDBInstanceProperties(pr.Properties) + tmac["tags"] = flattenDBTags(pr.Tags) + tmac["clustered"] = pr.Clustered + tmac["clone"] = pr.Clone + tmac["internal"] = pr.Internal + tmac["database_id"] = pr.DatabaseID + tmac["type"] = pr.Type + tmac["category"] = pr.Category + tmac["status"] = pr.Status + tmac["ea_status"] = pr.EaStatus + tmac["scope"] = pr.Scope + tmac["sla_id"] = pr.SLAID + tmac["schedule_id"] = pr.ScheduleID + tmac["metric"] = pr.Metric + // tmac["sla_update_metadata"] = pr.SLAUpdateMetadata + tmac["database"] = pr.Database + tmac["clones"] = pr.Clones + tmac["source_nx_clusters"] = pr.SourceNxClusters + tmac["sla_update_in_progress"] = pr.SLAUpdateInProgress + tmac["sla"] = flattenDBSLA(pr.SLA) + tmac["schedule"] = flattenSchedule(pr.Schedule) + tmac["metadata"] = flattenTimeMachineMetadata(pr.Metadata) + + res = append(res, tmac) + return res + } + return nil +} + +func flattenDBSLA(pr *Era.ListSLAResponse) []map[string]interface{} { + res := []map[string]interface{}{} + if pr != nil { + sla := map[string]interface{}{} + + sla["id"] = pr.ID + sla["name"] = pr.Name + sla["continuous_retention"] = pr.Continuousretention + sla["daily_retention"] = pr.Dailyretention + sla["date_modified"] = pr.Datemodified + sla["date_created"] = pr.Datecreated + sla["description"] = pr.Description + sla["monthly_retention"] = pr.Monthlyretention + sla["owner_id"] = pr.Ownerid + sla["quarterly_retention"] = pr.Quarterlyretention + sla["reference_count"] = pr.Referencecount + sla["system_sla"] = pr.Systemsla + sla["unique_name"] = pr.Uniquename + sla["weekly_retention"] = pr.Weeklyretention + sla["yearly_retention"] = pr.Yearlyretention + + res = append(res, sla) + return res + } + return nil +} + +func flattenSchedule(pr *Era.Schedule) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + sch := map[string]interface{}{} + + sch["continuous_schedule"] = flattenContinousSch(pr.Continuousschedule) + sch["date_created"] = pr.Datecreated + sch["date_modified"] = pr.Datemodified + sch["description"] = pr.Description + sch["global_policy"] = pr.GlobalPolicy + sch["id"] = pr.ID + sch["monthly_schedule"] = flattenMonthlySchedule(pr.Monthlyschedule) + sch["name"] = pr.Name + sch["owner_id"] = pr.OwnerID + sch["quartely_schedule"] = flattenQuartelySchedule(pr.Quartelyschedule) + sch["reference_count"] = pr.ReferenceCount + sch["snapshot_time_of_day"] = flattenSnapshotTimeOfDay(pr.Snapshottimeofday) + sch["start_time"] = pr.StartTime + sch["system_policy"] = pr.SystemPolicy + sch["time_zone"] = pr.TimeZone + sch["unique_name"] = pr.UniqueName + sch["weekly_schedule"] = flattenWeeklySchedule(pr.Weeklyschedule) + sch["yearly_schedule"] = flattenYearlylySchedule(pr.Yearlyschedule) + sch["daily_schedule"] = flattenDailySchedule(pr.Dailyschedule) + + res = append(res, sch) + return res + } + return nil +} + +func flattenContinousSch(pr *Era.Continuousschedule) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + cr := map[string]interface{}{} + + cr["enabled"] = pr.Enabled + cr["log_backup_interval"] = pr.Logbackupinterval + cr["snapshots_per_day"] = pr.Snapshotsperday + + res = append(res, cr) + return res + } + return nil +} + +func flattenMonthlySchedule(pr *Era.Monthlyschedule) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + cr := map[string]interface{}{} + + cr["enabled"] = pr.Enabled + cr["day_of_month"] = pr.Dayofmonth + + res = append(res, cr) + return res + } + return nil +} + +func flattenQuartelySchedule(pr *Era.Quartelyschedule) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + cr := map[string]interface{}{} + + cr["enabled"] = pr.Enabled + cr["day_of_month"] = pr.Dayofmonth + cr["start_month"] = pr.Startmonth + + res = append(res, cr) + return res + } + return nil +} + +func flattenSnapshotTimeOfDay(pr *Era.Snapshottimeofday) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + cr := map[string]interface{}{} + + cr["hours"] = pr.Hours + cr["minutes"] = pr.Minutes + cr["seconds"] = pr.Seconds + + res = append(res, cr) + return res + } + return nil +} + +func flattenWeeklySchedule(pr *Era.Weeklyschedule) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + cr := map[string]interface{}{} + + cr["enabled"] = pr.Enabled + cr["day_of_week"] = pr.Dayofweek + + res = append(res, cr) + return res + } + return nil +} + +func flattenYearlylySchedule(pr *Era.Yearlyschedule) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + cr := map[string]interface{}{} + + cr["enabled"] = pr.Enabled + cr["day_of_month"] = pr.Dayofmonth + cr["month"] = pr.Month + + res = append(res, cr) + return res + } + return nil +} + +func flattenDailySchedule(pr *Era.Dailyschedule) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if pr != nil { + cr := map[string]interface{}{} + + cr["enabled"] = pr.Enabled + res = append(res, cr) + return res + } + return nil +} + +func flattenTimeMachineMetadata(pr *Era.TimeMachineMetadata) []map[string]interface{} { + if pr != nil { + tmMeta := make([]map[string]interface{}, 0) + tm := make(map[string]interface{}) + + tm["secure_info"] = pr.SecureInfo + tm["info"] = pr.Info + tm["deregister_info"] = pr.DeregisterInfo + tm["capability_reset_time"] = pr.CapabilityResetTime + tm["auto_heal"] = pr.AutoHeal + tm["auto_heal_snapshot_count"] = pr.AutoHealSnapshotCount + tm["auto_heal_log_catchup_count"] = pr.AutoHealLogCatchupCount + tm["first_snapshot_captured"] = pr.FirstSnapshotCaptured + tm["first_snapshot_dispatched"] = pr.FirstSnapshotDispatched + tm["last_snapshot_time"] = pr.LastSnapshotTime + tm["last_auto_snapshot_time"] = pr.LastAutoSnapshotTime + tm["last_snapshot_operation_id"] = pr.LastSnapshotOperationID + tm["last_auto_snapshot_operation_id"] = pr.LastAutoSnapshotOperationID + tm["last_successful_snapshot_operation_id"] = pr.LastSuccessfulSnapshotOperationID + tm["snapshot_successive_failure_count"] = pr.SnapshotSuccessiveFailureCount + tm["last_heal_snapshot_operation"] = pr.LastHealSnapshotOperation + tm["last_log_catchup_time"] = pr.LastLogCatchupTime + tm["last_successful_log_catchup_operation_id"] = pr.LastSuccessfulLogCatchupOperationID + tm["last_log_catchup_operation_id"] = pr.LastLogCatchupOperationID + tm["log_catchup_successive_failure_count"] = pr.LogCatchupSuccessiveFailureCount + tm["last_pause_time"] = pr.LastPauseTime + tm["last_pause_by_force"] = pr.LastPauseByForce + tm["last_resume_time"] = pr.LastResumeTime + tm["last_pause_reason"] = pr.LastPauseReason + tm["state_before_restore"] = pr.StateBeforeRestore + tm["last_health_alerted_time"] = pr.LastHealthAlertedTime + tm["last_ea_breakdown_time"] = pr.LastEaBreakdownTime + tm["authorized_dbservers"] = pr.AuthorizedDbservers + tm["last_heal_time"] = pr.LastHealTime + tm["last_heal_system_triggered"] = pr.LastHealSystemTriggered + + tmMeta = append(tmMeta, tm) + return tmMeta + } + return nil +} + +func dataSourceEraDatabaseProperties() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "ref_id": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + } +} + +func dataSourceEraDatabaseInfo() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "bpg_configs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_disks": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "log_disks": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeFloat, + Computed: true, + }, + "size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "archive_storage": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "vm_properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nr_hugepages": { + Type: schema.TypeFloat, + Computed: true, + }, + "overcommit_memory": { + Type: schema.TypeFloat, + Computed: true, + }, + "dirty_ratio": { + Type: schema.TypeFloat, + Computed: true, + }, + "dirty_background_ratio": { + Type: schema.TypeFloat, + Computed: true, + }, + "dirty_expire_centisecs": { + Type: schema.TypeFloat, + Computed: true, + }, + "dirty_writeback_centisecs": { + Type: schema.TypeFloat, + Computed: true, + }, + "swappiness": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "bpg_db_param": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "shared_buffers": { + Type: schema.TypeString, + Computed: true, + }, + "maintenance_work_mem": { + Type: schema.TypeString, + Computed: true, + }, + "work_mem": { + Type: schema.TypeString, + Computed: true, + }, + "effective_cache_size": { + Type: schema.TypeString, + Computed: true, + }, + "max_worker_processes": { + Type: schema.TypeString, + Computed: true, + }, + "max_parallel_workers_per_gather": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceEraLCMConfig() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expiry_details": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "remind_before_in_days": { + Type: schema.TypeInt, + Computed: true, + }, + "effective_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "expiry_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "expiry_date_timezone": { + Type: schema.TypeString, + Computed: true, + }, + "user_created": { + Type: schema.TypeBool, + Computed: true, + }, + "expire_in_days": { + Type: schema.TypeInt, + Computed: true, + }, + "delete_database": { + Type: schema.TypeBool, + Computed: true, + }, + "delete_time_machine": { + Type: schema.TypeBool, + Computed: true, + }, + "delete_vm": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "refresh_details": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "refresh_in_days": { + Type: schema.TypeInt, + Computed: true, + }, + "refresh_in_hours": { + Type: schema.TypeInt, + Computed: true, + }, + "refresh_in_months": { + Type: schema.TypeInt, + Computed: true, + }, + "last_refresh_date": { + Type: schema.TypeString, + Computed: true, + }, + "next_refresh_date": { + Type: schema.TypeString, + Computed: true, + }, + "refresh_time": { + Type: schema.TypeString, + Computed: true, + }, + "refresh_date_timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "pre_delete_command": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "command": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "post_delete_command": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "command": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceEraTimeMachine() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "database_id": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "ea_status": { + Type: schema.TypeString, + Computed: true, + }, + "scope": { + Type: schema.TypeString, + Computed: true, + }, + "sla_id": { + Type: schema.TypeString, + Computed: true, + }, + "schedule_id": { + Type: schema.TypeString, + Computed: true, + }, + "database": { + Type: schema.TypeString, + Computed: true, + }, + "clones": { + Type: schema.TypeString, + Computed: true, + }, + "source_nx_clusters": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "sla_update_in_progress": { + Type: schema.TypeBool, + Computed: true, + }, + "metric": { + Type: schema.TypeString, + Computed: true, + }, + "sla_update_metadata": { + Type: schema.TypeString, + Computed: true, + }, + "sla": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "system_sla": { + Type: schema.TypeBool, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + + "continuous_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "daily_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "weekly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "monthly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "quarterly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "yearly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "pitr_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "current_active_frequency": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "system_policy": { + Type: schema.TypeBool, + Computed: true, + }, + "global_policy": { + Type: schema.TypeBool, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_time_of_day": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Computed: true, + }, + "minutes": { + Type: schema.TypeInt, + Computed: true, + }, + "seconds": { + Type: schema.TypeInt, + Computed: true, + }, + "extra": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "continuous_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_backup_interval": { + Type: schema.TypeInt, + Computed: true, + }, + "snapshots_per_day": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "weekly_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day_of_week": { + Type: schema.TypeString, + Computed: true, + }, + "day_of_week_value": { + Type: schema.TypeString, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "monthly_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "yearly_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "month": { + Type: schema.TypeString, + Computed: true, + }, + "month_value": { + Type: schema.TypeString, + Computed: true, + }, + "day_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "quartely_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_month": { + Type: schema.TypeString, + Computed: true, + }, + "start_month_value": { + Type: schema.TypeString, + Computed: true, + }, + "day_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "daily_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "start_time": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "capability_reset_time": { + Type: schema.TypeString, + Computed: true, + }, + "auto_heal": { + Type: schema.TypeBool, + Computed: true, + }, + "auto_heal_snapshot_count": { + Type: schema.TypeInt, + Computed: true, + }, + "auto_heal_log_catchup_count": { + Type: schema.TypeInt, + Computed: true, + }, + "first_snapshot_captured": { + Type: schema.TypeBool, + Computed: true, + }, + "first_snapshot_dispatched": { + Type: schema.TypeBool, + Computed: true, + }, + "last_snapshot_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_auto_snapshot_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_snapshot_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "last_auto_snapshot_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "last_successful_snapshot_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_successive_failure_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_heal_snapshot_operation": { + Type: schema.TypeString, + Computed: true, + }, + "last_log_catchup_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_successful_log_catchup_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "last_log_catchup_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "log_catchup_successive_failure_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_pause_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_pause_by_force": { + Type: schema.TypeBool, + Computed: true, + }, + "last_resume_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_pause_reason": { + Type: schema.TypeString, + Computed: true, + }, + "state_before_restore": { + Type: schema.TypeString, + Computed: true, + }, + "last_health_alerted_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_ea_breakdown_time": { + Type: schema.TypeString, + Computed: true, + }, + "authorized_dbservers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "last_heal_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_heal_system_triggered": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceEraDatabaseNodes() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "tags": dataSourceEraDBInstanceTags(), + "database_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "primary": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_installation_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "dbserver": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "protection_domain": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "cloud_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "primary_host": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "ref_id": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "assoc_entities": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceEraLinkedDatabases() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_linked_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "metadata": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + } +} + +func dataSourceEraDBInstanceMetadata() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "tm_activate_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "created_dbservers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "registered_dbservers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "last_refresh_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_requested_refresh_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "capability_reset_time": { + Type: schema.TypeString, + Computed: true, + }, + "state_before_refresh": { + Type: schema.TypeString, + Computed: true, + }, + "state_before_restore": { + Type: schema.TypeString, + Computed: true, + }, + "state_before_scaling": { + Type: schema.TypeString, + Computed: true, + }, + "log_catchup_for_restore_dispatched": { + Type: schema.TypeBool, + Computed: true, + }, + "last_log_catchup_for_restore_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "base_size_computed": { + Type: schema.TypeBool, + Computed: true, + }, + "original_database_name": { + Type: schema.TypeString, + Computed: true, + }, + "provision_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "pitr_based": { + Type: schema.TypeBool, + Computed: true, + }, + "refresh_blocker_info": { + Type: schema.TypeString, + Computed: true, + }, + "deregistered_with_delete_time_machine": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + } +} + +func dataSourceEraDBInstanceTags() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tag_id": { + Type: schema.TypeString, + Computed: true, + }, + "entity_id": { + Type: schema.TypeString, + Computed: true, + }, + "entity_type": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "tag_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + } +} diff --git a/nutanix/data_source_nutanix_ndb_database_test.go b/nutanix/data_source_nutanix_ndb_database_test.go new file mode 100644 index 000000000..8e268df02 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_database_test.go @@ -0,0 +1,36 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraDatabaseDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.nutanix_ndb_database.test", "metadata.#", "1"), + resource.TestCheckResourceAttr("data.nutanix_ndb_database.test", "time_zone", "UTC"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_database.test", "placeholder"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_database.test", "name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_database.test", "linked_databases.#"), + ), + }, + }, + }) +} + +func testAccEraDatabaseDataSourceConfig() string { + return ` + data "nutanix_ndb_databases" "test1" {} + + data "nutanix_ndb_database" "test" { + database_id = data.nutanix_ndb_databases.test1.database_instances.0.id + } +` +} diff --git a/nutanix/data_source_nutanix_ndb_databases.go b/nutanix/data_source_nutanix_ndb_databases.go new file mode 100644 index 000000000..edba77b9c --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_databases.go @@ -0,0 +1,231 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixEraDatabases() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraDatabaseIntancesRead, + Schema: map[string]*schema.Schema{ + "database_instances": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + "databases": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_group_state_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixEraDatabaseIntancesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ListDatabaseInstance(ctx) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("database_instances", flattenDatabaseIntancesList(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era clusters: %+v", err) + } + d.SetId(uuid) + return nil +} + +func flattenDatabaseIntancesList(db *era.ListDatabaseInstance) []map[string]interface{} { + if db != nil { + lst := []map[string]interface{}{} + for _, data := range *db { + d := map[string]interface{}{} + + d["category"] = data.Category + d["clone"] = data.Clone + d["clustered"] = data.Clustered + d["database_group_state_info"] = data.DatabaseGroupStateInfo + d["database_cluster_type"] = data.Databaseclustertype + d["database_name"] = data.Databasename + d["database_nodes"] = flattenDBNodes(data.Databasenodes) + d["databases"] = data.Databases + d["database_status"] = data.Databasestatus + d["date_created"] = data.Datecreated + d["date_modified"] = data.Datemodified + d["dbserver_logical_cluster"] = data.Dbserverlogicalcluster + d["dbserver_logical_cluster_id"] = data.Dbserverlogicalclusterid + d["description"] = data.Description + d["group_info"] = data.GroupInfo + d["id"] = data.ID + d["info"] = flattenDBInfo(data.Info) + d["internal"] = data.Internal + d["lcm_config"] = flattenDBLcmConfig(data.Lcmconfig) + d["linked_databases"] = flattenDBLinkedDbs(data.Linkeddatabases) + d["metadata"] = flattenDBInstanceMetadata(data.Metadata) + d["metric"] = data.Metric + d["name"] = data.Name + d["owner_id"] = data.Ownerid + d["parent_database_id"] = data.ParentDatabaseID + d["parent_source_database_id"] = data.ParentSourceDatabaseID + d["parent_time_machine_id"] = data.Parenttimemachineid + d["placeholder"] = data.Placeholder + d["properties"] = flattenDBInstanceProperties(data.Properties) + d["status"] = data.Status + d["tags"] = flattenDBTags(data.Tags) + d["time_machine"] = flattenDBTimeMachine(data.TimeMachine) + d["time_machine_id"] = data.Timemachineid + d["time_zone"] = data.Timezone + d["type"] = data.Type + + lst = append(lst, d) + } + return lst + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_databases_test.go b/nutanix/data_source_nutanix_ndb_databases_test.go new file mode 100644 index 000000000..a07b80ca5 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_databases_test.go @@ -0,0 +1,32 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraDatabasesDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabasesDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.metadata.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.time_zone"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.id"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.linked_databases.#"), + ), + }, + }, + }) +} + +func testAccEraDatabasesDataSourceConfig() string { + return ` + data "nutanix_ndb_databases" "test" {} +` +} diff --git a/nutanix/data_source_nutanix_ndb_profile.go b/nutanix/data_source_nutanix_ndb_profile.go new file mode 100644 index 000000000..8c8bacc7e --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_profile.go @@ -0,0 +1,408 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + Era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func dataSourceNutanixEraProfile() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraProfileRead, + Schema: map[string]*schema.Schema{ + "engine": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"oracle_database", + "postgres_database", "sqlserver_database", "mariadb_database", + "mysql_database"}, false), + }, + "profile_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"Software", "Compute", + "Network", "Database_Parameter"}, false), + }, + "profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "profile_name": { + Type: schema.TypeString, + Optional: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "engine_type": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "topology": { + Type: schema.TypeString, + Computed: true, + }, + "db_version": { + Type: schema.TypeString, + Computed: true, + }, + "system_profile": { + Type: schema.TypeBool, + Computed: true, + }, + "assoc_db_servers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "assoc_databases": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "latest_version": { + Type: schema.TypeString, + Computed: true, + }, + "latest_version_id": { + Type: schema.TypeString, + Computed: true, + }, + "versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "engine_type": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "topology": { + Type: schema.TypeString, + Computed: true, + }, + "db_version": { + Type: schema.TypeString, + Computed: true, + }, + "system_profile": { + Type: schema.TypeBool, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + "profile_id": { + Type: schema.TypeString, + Computed: true, + }, + "published": { + Type: schema.TypeBool, + Computed: true, + }, + "deprecated": { + Type: schema.TypeBool, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "properties_map": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "version_cluster_association": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "profile_version_id": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "optimized_for_provisioning": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "cluster_availability": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "profile_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceNutanixEraProfileRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + engine := "" + profileType := "" + pID := "" + pName := "" + if engineType, ok := d.GetOk("engine"); ok { + engine = engineType.(string) + } + + if ptype, ok := d.GetOk("profile_type"); ok { + profileType = ptype.(string) + } + + profileID, pIDOk := d.GetOk("profile_id") + + profileName, pNameOk := d.GetOk("profile_name") + + if !pIDOk && !pNameOk { + return diag.Errorf("please provide one of profile_id or profile_name attributes") + } + if pIDOk { + pID = profileID.(string) + } + if pNameOk { + pName = profileName.(string) + } + + resp, err := conn.Service.GetProfiles(ctx, engine, profileType, pID, pName) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner", resp.Owner); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("engine_type", resp.Enginetype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("topology", resp.Topology); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("db_version", resp.Dbversion); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("system_profile", resp.Systemprofile); err != nil { + return diag.FromErr(err) + } + if err := d.Set("assoc_db_servers", resp.Assocdbservers); err != nil { + return diag.FromErr(err) + } + if err := d.Set("assoc_databases", resp.Assocdatabases); err != nil { + return diag.FromErr(err) + } + if err := d.Set("latest_version", resp.Latestversion); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("latest_version_id", resp.Latestversionid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("versions", flattenVersions(resp.Versions)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("cluster_availability", flattenClusterAvailability(resp.Clusteravailability)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_id", resp.Nxclusterid); err != nil { + return diag.FromErr(err) + } + + d.SetId(utils.StringValue(resp.ID)) + return nil +} + +func flattenClusterAvailability(erc []*Era.Clusteravailability) []map[string]interface{} { + if len(erc) > 0 { + res := make([]map[string]interface{}, len(erc)) + + for k, v := range erc { + clsAv := map[string]interface{}{} + + clsAv["nx_cluster_id"] = v.Nxclusterid + clsAv["date_created"] = v.Datecreated + clsAv["date_modified"] = v.Datemodified + clsAv["owner_id"] = v.Ownerid + clsAv["profile_id"] = v.Profileid + clsAv["status"] = v.Status + + res[k] = clsAv + } + return res + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_profile_test.go b/nutanix/data_source_nutanix_ndb_profile_test.go new file mode 100644 index 000000000..f1ecc5f97 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_profile_test.go @@ -0,0 +1,100 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraProfileDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfileDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "versions.#", "1"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "status", "READY"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "system_profile", "true"), + ), + }, + }, + }) +} + +func TestAccEraProfileDataSource_ById(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfileDataSourceConfigByID(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "versions.#", "1"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "status", "READY"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "system_profile", "true"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "status", "READY"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "engine_type", "postgres_database"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "type", "Software"), + ), + }, + }, + }) +} + +func TestAccEraProfileDataSource_ByName(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfileDataSourceConfigByName(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "versions.#", "1"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "status", "READY"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "system_profile", "true"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "status", "READY"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "engine_type", "postgres_database"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profile.test", "type", "Database_Parameter"), + ), + }, + }, + }) +} + +func testAccEraProfileDataSourceConfig() string { + return ` + data "nutanix_ndb_profiles" "test1" {} + + data "nutanix_ndb_profile" "test" { + profile_id = data.nutanix_ndb_profiles.test1.profiles.0.id + } + ` +} + +func testAccEraProfileDataSourceConfigByID() string { + return ` + data "nutanix_ndb_profiles" "test1" { + engine = "postgres_database" + profile_type = "Software" + } + + data "nutanix_ndb_profile" "test" { + profile_id = data.nutanix_ndb_profiles.test1.profiles.0.id + } + ` +} + +func testAccEraProfileDataSourceConfigByName() string { + return ` + data "nutanix_ndb_profiles" "test1" { + engine = "postgres_database" + profile_type = "Database_Parameter" + } + + data "nutanix_ndb_profile" "test" { + profile_name = data.nutanix_ndb_profiles.test1.profiles.0.name + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_profiles.go b/nutanix/data_source_nutanix_ndb_profiles.go new file mode 100644 index 000000000..0a5b446be --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_profiles.go @@ -0,0 +1,437 @@ +package nutanix + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + Era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func dataSourceNutanixEraProfiles() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraProfilesRead, + Schema: map[string]*schema.Schema{ + "engine": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"oracle_database", + "postgres_database", "sqlserver_database", "mariadb_database", + "mysql_database"}, false), + }, + "profile_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"Software", "Compute", + "Network", "Database_Parameter"}, false), + }, + "profiles": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "engine_type": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "topology": { + Type: schema.TypeString, + Computed: true, + }, + "db_version": { + Type: schema.TypeString, + Computed: true, + }, + "system_profile": { + Type: schema.TypeBool, + Computed: true, + }, + "assoc_db_servers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "assoc_databases": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "latest_version": { + Type: schema.TypeString, + Computed: true, + }, + "latest_version_id": { + Type: schema.TypeString, + Computed: true, + }, + "versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "engine_type": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "topology": { + Type: schema.TypeString, + Computed: true, + }, + "db_version": { + Type: schema.TypeString, + Computed: true, + }, + "system_profile": { + Type: schema.TypeBool, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + "profile_id": { + Type: schema.TypeString, + Computed: true, + }, + "published": { + Type: schema.TypeBool, + Computed: true, + }, + "deprecated": { + Type: schema.TypeBool, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "properties_map": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "version_cluster_association": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "profile_version_id": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "optimized_for_provisioning": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "cluster_availability": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "profile_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixEraProfilesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + engine := "" + profileType := "" + + if engineType, ok := d.GetOk("engine"); ok { + engine = engineType.(string) + } + + if ptype, ok := d.GetOk("profile_type"); ok { + profileType = ptype.(string) + } + + //check for profile type and engine + _, eror := dataSourceEraProfileEngineDiff(ctx, d, meta) + if eror != nil { + return diag.FromErr(eror) + } + + resp, err := conn.Service.ListProfiles(ctx, engine, profileType) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("profiles", flattenProfilesResponse(resp)); err != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era clusters: %+v", err) + } + d.SetId(uuid) + return nil +} + +func flattenVersions(erv []*Era.Versions) []map[string]interface{} { + if len(erv) > 0 { + res := make([]map[string]interface{}, len(erv)) + + for k, v := range erv { + ents := make(map[string]interface{}) + ents["id"] = v.ID + ents["name"] = v.Name + ents["description"] = v.Description + + ents["status"] = v.Status + ents["owner"] = v.Owner + ents["engine_type"] = v.Enginetype + + ents["type"] = v.Type + ents["topology"] = v.Topology + ents["db_version"] = v.Dbversion + + ents["system_profile"] = v.Systemprofile + ents["version"] = v.Version + ents["profile_id"] = v.Profileid + + ents["published"] = v.Published + ents["deprecated"] = v.Deprecated + + ents["properties"] = flattenProperties(v.Properties) + ents["properties_map"] = utils.ConvertMapString(v.Propertiesmap) + ents["version_cluster_association"] = flattenClusterAssociation(v.VersionClusterAssociation) + res[k] = ents + } + return res + } + return nil +} + +func flattenProperties(erp []*Era.Properties) []map[string]interface{} { + if len(erp) > 0 { + res := make([]map[string]interface{}, len(erp)) + + for k, v := range erp { + ents := make(map[string]interface{}) + ents["name"] = v.Name + ents["value"] = v.Value + ents["secure"] = v.Secure + res[k] = ents + } + return res + } + return nil +} + +func flattenProfilesResponse(erp *Era.ProfileListResponse) []map[string]interface{} { + if erp != nil { + lst := []map[string]interface{}{} + for _, v := range *erp { + d := map[string]interface{}{} + d["id"] = v.ID + d["name"] = v.Name + d["description"] = v.Description + d["status"] = v.Status + d["owner"] = v.Owner + d["engine_type"] = v.Enginetype + d["type"] = v.Type + d["topology"] = v.Topology + d["db_version"] = v.Dbversion + d["system_profile"] = v.Systemprofile + d["latest_version"] = v.Latestversion + d["latest_version_id"] = v.Latestversionid + d["versions"] = flattenVersions(v.Versions) + + lst = append(lst, d) + } + return lst + } + return nil +} + +func flattenClusterAssociation(erc []*Era.VersionClusterAssociation) []map[string]interface{} { + if len(erc) > 0 { + res := make([]map[string]interface{}, len(erc)) + + for k, v := range erc { + ercs := map[string]interface{}{} + + ercs["nx_cluster_id"] = v.NxClusterID + ercs["date_created"] = v.DateCreated + ercs["date_modified"] = v.DateModified + ercs["owner_id"] = v.OwnerID + ercs["status"] = v.Status + ercs["profile_version_id"] = v.ProfileVersionID + ercs["properties"] = flattenProperties(v.Properties) + ercs["optimized_for_provisioning"] = v.OptimizedForProvisioning + + res[k] = ercs + } + return res + } + return nil +} + +func dataSourceEraProfileEngineDiff(ctx context.Context, d *schema.ResourceData, m interface{}) (bool, error) { + // check for profile type + if ptype, vok := d.GetOk("profile_type"); vok { + if ptype == "Compute" { + if _, sok := d.GetOk("engine"); sok { + return false, fmt.Errorf("compute profile type should not be used if engine is given") + } + } + } + return true, nil +} diff --git a/nutanix/data_source_nutanix_ndb_profiles_test.go b/nutanix/data_source_nutanix_ndb_profiles_test.go new file mode 100644 index 000000000..9555af0b0 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_profiles_test.go @@ -0,0 +1,87 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraProfilesDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfilesDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_profiles.test", "profiles.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_profiles.test", "profiles.0.id"), + ), + }, + }, + }) +} + +func TestAccEraProfilesDataSource_ByEngine(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfilesDataSourceConfigByEngine(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_profiles.test", "profiles.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_profiles.test", "profiles.0.id"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profiles.test", "profiles.0.status", "READY"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profiles.test", "profiles.0.engine_type", "postgres_database"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profiles.test", "profiles.0.system_profile", "true"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profiles.test", "profiles.0.topology", "ALL"), + ), + }, + }, + }) +} + +func TestAccEraProfilesDataSource_ByProfileType(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraProfilesDataSourceConfigByProfileType(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_profiles.test", "profiles.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_profiles.test", "profiles.0.id"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profiles.test", "profiles.0.status", "READY"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profiles.test", "profiles.0.engine_type", "postgres_database"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profiles.test", "profiles.0.type", "Network"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profiles.test", "profiles.0.system_profile", "false"), + resource.TestCheckResourceAttr("data.nutanix_ndb_profiles.test", "profiles.0.topology", "ALL"), + ), + }, + }, + }) +} + +func testAccEraProfilesDataSourceConfig() string { + return ` + data "nutanix_ndb_profiles" "test" { } + ` +} + +func testAccEraProfilesDataSourceConfigByEngine() string { + return ` + data "nutanix_ndb_profiles" "test" { + engine = "postgres_database" + } + ` +} + +func testAccEraProfilesDataSourceConfigByProfileType() string { + return ` + data "nutanix_ndb_profiles" "test" { + engine = "postgres_database" + profile_type = "Network" + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_sla.go b/nutanix/data_source_nutanix_ndb_sla.go new file mode 100644 index 000000000..de958ff43 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_sla.go @@ -0,0 +1,169 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceNutanixEraSLA() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraSLARead, + Schema: map[string]*schema.Schema{ + "sla_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"sla_name"}, + }, + "sla_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"sla_id"}, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "system_sla": { + Type: schema.TypeBool, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "continuous_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "daily_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "weekly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "monthly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "quartely_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "yearly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "pitr_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "current_active_frequency": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceNutanixEraSLARead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + slaID, iok := d.GetOk("sla_id") + slaName, nok := d.GetOk("sla_name") + + if !iok && !nok { + return diag.Errorf("please provide one of sla_id or sla_name attributes") + } + + resp, err := conn.Service.GetSLA(ctx, slaID.(string), slaName.(string)) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("unique_name", resp.Uniquename); err != nil { + return diag.FromErr(err) + } + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.Ownerid); err != nil { + return diag.FromErr(err) + } + if err := d.Set("system_sla", resp.Systemsla); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_created", resp.Datecreated); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_modified", resp.Datemodified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("continuous_retention", resp.Continuousretention); err != nil { + return diag.FromErr(err) + } + if err := d.Set("daily_retention", resp.Dailyretention); err != nil { + return diag.FromErr(err) + } + if err := d.Set("weekly_retention", resp.Weeklyretention); err != nil { + return diag.FromErr(err) + } + if err := d.Set("monthly_retention", resp.Monthlyretention); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("quartely_retention", resp.Quarterlyretention); err != nil { + return diag.FromErr(err) + } + if err := d.Set("yearly_retention", resp.Yearlyretention); err != nil { + return diag.FromErr(err) + } + if err := d.Set("reference_count", resp.Referencecount); err != nil { + return diag.FromErr(err) + } + if err := d.Set("pitr_enabled", resp.PitrEnabled); err != nil { + return diag.FromErr(err) + } + if err := d.Set("current_active_frequency", resp.CurrentActiveFrequency); err != nil { + return diag.FromErr(err) + } + + d.SetId(*resp.ID) + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_sla_test.go b/nutanix/data_source_nutanix_ndb_sla_test.go new file mode 100644 index 000000000..380dd2e79 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_sla_test.go @@ -0,0 +1,67 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraSLADataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSLADataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_slas.test1", "slas.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_slas.test1", "slas.0.name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_slas.test1", "slas.0.unique_name"), + resource.TestCheckResourceAttr("data.nutanix_ndb_sla.test", "system_sla", "true"), + resource.TestCheckResourceAttr("data.nutanix_ndb_sla.test", "yearly_retention", "0"), + resource.TestCheckResourceAttr("data.nutanix_ndb_sla.test", "pitr_enabled", "true"), + ), + }, + }, + }) +} + +func TestAccEraSLADataSource_ByName(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSLADataSourceConfigByName(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_slas.test1", "slas.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_slas.test1", "slas.0.name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_slas.test1", "slas.0.unique_name"), + resource.TestCheckResourceAttr("data.nutanix_ndb_sla.test", "system_sla", "true"), + resource.TestCheckResourceAttr("data.nutanix_ndb_sla.test", "yearly_retention", "0"), + resource.TestCheckResourceAttr("data.nutanix_ndb_sla.test", "pitr_enabled", "true"), + ), + }, + }, + }) +} + +func testAccEraSLADataSourceConfig() string { + return ` + data "nutanix_ndb_slas" "test1" {} + + data "nutanix_ndb_sla" "test"{ + sla_id = data.nutanix_ndb_slas.test1.slas.0.id + } + ` +} + +func testAccEraSLADataSourceConfigByName() string { + return ` + data "nutanix_ndb_slas" "test1" {} + + data "nutanix_ndb_sla" "test"{ + sla_name = data.nutanix_ndb_slas.test1.slas.0.name + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_slas.go b/nutanix/data_source_nutanix_ndb_slas.go new file mode 100644 index 000000000..735600de7 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_slas.go @@ -0,0 +1,144 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + Era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixEraSLAs() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraSLAsRead, + Schema: map[string]*schema.Schema{ + "slas": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "system_sla": { + Type: schema.TypeBool, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "continuous_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "daily_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "weekly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "monthly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "quartely_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "yearly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "pitr_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "current_active_frequency": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixEraSLAsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ListSLA(ctx) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("slas", flattenSLAsResponse(resp)); err != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era clusters: %+v", err) + } + d.SetId(uuid) + return nil +} + +func flattenSLAsResponse(sla *Era.SLAResponse) []map[string]interface{} { + if sla != nil { + lst := []map[string]interface{}{} + for _, data := range *sla { + d := map[string]interface{}{} + d["id"] = data.ID + d["name"] = data.Name + d["unique_name"] = data.Uniquename + d["description"] = data.Description + d["owner_id"] = data.Ownerid + d["system_sla"] = data.Systemsla + d["date_created"] = data.Datecreated + d["date_modified"] = data.Datemodified + d["continuous_retention"] = data.Continuousretention + d["daily_retention"] = data.Dailyretention + d["weekly_retention"] = data.Weeklyretention + d["monthly_retention"] = data.Monthlyretention + d["quartely_retention"] = data.Quarterlyretention + d["yearly_retention"] = data.Yearlyretention + d["reference_count"] = data.Referencecount + d["pitr_enabled"] = data.PitrEnabled + d["current_active_frequency"] = data.CurrentActiveFrequency + lst = append(lst, d) + } + return lst + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_slas_test.go b/nutanix/data_source_nutanix_ndb_slas_test.go new file mode 100644 index 000000000..19f29e0e5 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_slas_test.go @@ -0,0 +1,30 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraSLAsDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSLAsDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_slas.test", "slas.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_slas.test", "slas.0.id"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_slas.test", "slas.0.name"), + ), + }, + }, + }) +} + +func testAccEraSLAsDataSourceConfig() string { + return ` + data "nutanix_ndb_slas" "test" { } + ` +} diff --git a/nutanix/main_test.go b/nutanix/main_test.go index 8eec06ce4..197273f69 100644 --- a/nutanix/main_test.go +++ b/nutanix/main_test.go @@ -40,6 +40,8 @@ type TestConfig struct { ClusterUUID string `json:"cluster_uuid"` } `json:"destination_az"` } `json:"protection_policy"` + // sshKey required for ndb database provision test + SSHKey string `json:"ssh_key"` } type IPMIConfig struct { diff --git a/nutanix/provider.go b/nutanix/provider.go index cd5d16233..a0aa1e0d5 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -16,6 +16,7 @@ var requiredProviderFields map[string][]string = map[string][]string{ "karbon": {"username", "password", "endpoint"}, "foundation": {"foundation_endpoint"}, "foundation_central": {"username", "password", "endpoint"}, + "ndb": {"ndb_endpoint", "ndb_username", "ndb_password"}, } // Provider function returns the object that implements the terraform.ResourceProvider interface, specifically a schema.Provider @@ -44,6 +45,8 @@ func Provider() *schema.Provider { "foundation_endpoint": "endpoint for foundation VM (eg. Foundation VM IP)", "foundation_port": "Port for foundation VM", + + "ndb_endpoint": "endpoint for Era VM (era ip)", } // Nutanix provider schema @@ -111,6 +114,24 @@ func Provider() *schema.Provider { DefaultFunc: schema.EnvDefaultFunc("FOUNDATION_PORT", nil), Description: descriptions["foundation_port"], }, + "ndb_endpoint": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("NDB_ENDPOINT", nil), + Description: descriptions["ndb_endpoint"], + }, + "ndb_username": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("NDB_USERNAME", nil), + Description: descriptions["ndb_username"], + }, + "ndb_password": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("NDB_PASSWORD", nil), + Description: descriptions["ndb_password"], + }, }, DataSourcesMap: map[string]*schema.Resource{ "nutanix_image": dataSourceNutanixImage(), @@ -167,6 +188,14 @@ func Provider() *schema.Provider { "nutanix_floating_ip": dataSourceNutanixFloatingIP(), "nutanix_floating_ips": dataSourceNutanixFloatingIPs(), "nutanix_static_routes": dataSourceNutanixStaticRoute(), + "nutanix_ndb_sla": dataSourceNutanixEraSLA(), + "nutanix_ndb_slas": dataSourceNutanixEraSLAs(), + "nutanix_ndb_profile": dataSourceNutanixEraProfile(), + "nutanix_ndb_profiles": dataSourceNutanixEraProfiles(), + "nutanix_ndb_cluster": dataSourceNutanixEraCluster(), + "nutanix_ndb_clusters": dataSourceNutanixEraClusters(), + "nutanix_ndb_database": dataSourceNutanixEraDatabase(), + "nutanix_ndb_databases": dataSourceNutanixEraDatabases(), }, ResourcesMap: map[string]*schema.Resource{ "nutanix_virtual_machine": resourceNutanixVirtualMachine(), @@ -195,6 +224,7 @@ func Provider() *schema.Provider { "nutanix_floating_ip": resourceNutanixFloatingIP(), "nutanix_static_routes": resourceNutanixStaticRoute(), "nutanix_user_groups": resourceNutanixUserGroups(), + "nutanix_ndb_database": resourceDatabaseInstance(), }, ConfigureContextFunc: providerConfigure, } @@ -237,6 +267,9 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData) (interface{} ProxyURL: d.Get("proxy_url").(string), FoundationEndpoint: d.Get("foundation_endpoint").(string), FoundationPort: d.Get("foundation_port").(string), + NdbEndpoint: d.Get("ndb_endpoint").(string), + NdbUsername: d.Get("ndb_username").(string), + NdbPassword: d.Get("ndb_password").(string), RequiredFields: requiredProviderFields, } c, err := config.Client() diff --git a/nutanix/provider_test.go b/nutanix/provider_test.go index d86f59079..7b487583e 100644 --- a/nutanix/provider_test.go +++ b/nutanix/provider_test.go @@ -49,6 +49,14 @@ func testAccFoundationPreCheck(t *testing.T) { } } +func testAccEraPreCheck(t *testing.T) { + if os.Getenv("NDB_ENDPOINT") == "" || + os.Getenv("NDB_USERNAME") == "" || + os.Getenv("NDB_PASSWORD") == "" { + t.Fatal("`NDB_USERNAME`,`NDB_PASSWORD`,`NDB_ENDPOINT` must be set for acceptance testing") + } +} + func randIntBetween(min, max int) int { rand.Seed(time.Now().UnixNano()) return rand.Intn(max-min) + min diff --git a/nutanix/resource_nutanix_nbd_database_test.go b/nutanix/resource_nutanix_nbd_database_test.go new file mode 100644 index 000000000..93fb86cd7 --- /dev/null +++ b/nutanix/resource_nutanix_nbd_database_test.go @@ -0,0 +1,125 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameDB = "nutanix_ndb_database.acctest-managed" + +func TestAccEra_basic(t *testing.T) { + name := "test-pg-inst-tf" + desc := "this is desc" + vmName := "testvm12" + sshKey := testVars.SSHKey + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseConfig(name, desc, vmName, sshKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameDB, "name", name), + resource.TestCheckResourceAttr(resourceNameDB, "description", desc), + ), + }, + }, + }) +} + +func testAccEraDatabaseConfig(name, desc, vmName, sshKey string) string { + return fmt.Sprintf(` + data "nutanix_ndb_profiles" "p"{ + } + data "nutanix_ndb_slas" "slas"{} + data "nutanix_ndb_clusters" "clusters"{} + + locals { + profiles_by_type = { + for p in data.nutanix_ndb_profiles.p.profiles : p.type => p... + } + storage_profiles = { + for p in local.profiles_by_type.Storage: p.name => p + } + compute_profiles = { + for p in local.profiles_by_type.Compute: p.name => p + } + network_profiles = { + for p in local.profiles_by_type.Network: p.name => p + } + database_parameter_profiles = { + for p in local.profiles_by_type.Database_Parameter: p.name => p + } + software_profiles = { + for p in local.profiles_by_type.Software: p.name => p + } + slas = { + for p in data.nutanix_ndb_slas.slas.slas: p.name => p + } + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + + resource "nutanix_ndb_database" "acctest-managed" { + databasetype = "postgres_database" + name = "%[1]s" + description = "%[2]s" + softwareprofileid = local.software_profiles["POSTGRES_10.4_OOB"].id + softwareprofileversionid = local.software_profiles["POSTGRES_10.4_OOB"].latest_version_id + computeprofileid = local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + networkprofileid = local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + dbparameterprofileid = local.database_parameter_profiles.DEFAULT_POSTGRES_PARAMS.id + + postgresql_info{ + listener_port = "5432" + database_size= "200" + db_password = "password" + database_names= "testdb1" + } + nxclusterid= local.clusters.EraCluster.id + sshpublickey= "%[4]s" + nodes{ + vmname= "%[3]s" + networkprofileid= local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + } + timemachineinfo { + name= "test-pg-inst" + description="" + slaid=local.slas["DEFAULT_OOB_BRONZE_SLA"].id + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } + } + `, name, desc, vmName, sshKey) +} diff --git a/nutanix/resource_nutanix_ndb_database.go b/nutanix/resource_nutanix_ndb_database.go new file mode 100644 index 000000000..123e396ce --- /dev/null +++ b/nutanix/resource_nutanix_ndb_database.go @@ -0,0 +1,742 @@ +package nutanix + +import ( + "context" + "fmt" + "log" + "time" + + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var ( + eraDelay = 1 * time.Minute + EraProvisionTimeout = 35 * time.Minute +) + +func resourceDatabaseInstance() *schema.Resource { + return &schema.Resource{ + CreateContext: createDatabaseInstance, + ReadContext: readDatabaseInstance, + UpdateContext: updateDatabaseInstance, + DeleteContext: deleteDatabaseInstance, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraProvisionTimeout), + }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + Schema: map[string]*schema.Schema{ + "database_instance_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "databasetype": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + }, + + "softwareprofileid": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "softwareprofileversionid": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "computeprofileid": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "networkprofileid": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "dbparameterprofileid": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "newdbservertimezone": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "nxclusterid": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "sshpublickey": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "createdbserver": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "dbserverid": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "clustered": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "autotunestagingdrive": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "nodecount": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + + "vm_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + + "actionarguments": actionArgumentsSchema(), + + "timemachineinfo": timeMachineInfoSchema(), + + "nodes": nodesSchema(), + + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + + "value": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + }, + }, + }, + "postgresql_info": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "listener_port": { + Type: schema.TypeString, + Required: true, + }, + "database_size": { + Type: schema.TypeString, + Required: true, + }, + "auto_tune_staging_drive": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "allocate_pg_hugepage": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "cluster_database": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "auth_method": { + Type: schema.TypeString, + Optional: true, + Default: "md5", + }, + "database_names": { + Type: schema.TypeString, + Required: true, + }, + "db_password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + "pre_create_script": { + Type: schema.TypeString, + Optional: true, + }, + "post_create_script": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + // Computed values + + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "tags": dataSourceEraDBInstanceTags(), + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + }, + } +} + +func createDatabaseInstance(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + // check for resource schema validation + er := schemaValidation("era_provision_database", d) + if er != nil { + return diag.FromErr(er) + } + + log.Println("Creating the request!!!") + req, err := buildEraRequest(d) + if err != nil { + return diag.FromErr(err) + } + + resp, err := conn.Service.ProvisionDatabase(ctx, req) + if err != nil { + return diag.Errorf("error while sending request...........:\n %s\n\n", err.Error()) + } + d.SetId(resp.Entityid) + + // Get Operation ID from response of ProvisionDatabaseResponse and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for db Instance (%s) to create: %s", resp.Entityid, errWaitTask) + } + + return readDatabaseInstance(ctx, d, meta) +} + +func buildEraRequest(d *schema.ResourceData) (*era.ProvisionDatabaseRequest, error) { + return &era.ProvisionDatabaseRequest{ + Databasetype: utils.StringPtr(d.Get("databasetype").(string)), + Name: utils.StringPtr(d.Get("name").(string)), + Databasedescription: utils.StringPtr(d.Get("description").(string)), + Softwareprofileid: utils.StringPtr(d.Get("softwareprofileid").(string)), + Softwareprofileversionid: utils.StringPtr(d.Get("softwareprofileversionid").(string)), + Computeprofileid: utils.StringPtr(d.Get("computeprofileid").(string)), + Networkprofileid: utils.StringPtr(d.Get("networkprofileid").(string)), + Dbparameterprofileid: utils.StringPtr(d.Get("dbparameterprofileid").(string)), + DatabaseServerID: utils.StringPtr(d.Get("dbserverid").(string)), + Timemachineinfo: buildTimeMachineFromResourceData(d.Get("timemachineinfo").(*schema.Set)), + Actionarguments: expandActionArguments(d), + Createdbserver: d.Get("createdbserver").(bool), + Nodecount: utils.IntPtr(d.Get("nodecount").(int)), + Nxclusterid: utils.StringPtr(d.Get("nxclusterid").(string)), + Sshpublickey: utils.StringPtr(d.Get("sshpublickey").(string)), + Clustered: d.Get("clustered").(bool), + Nodes: buildNodesFromResourceData(d.Get("nodes").(*schema.Set)), + Autotunestagingdrive: d.Get("autotunestagingdrive").(bool), + VMPassword: utils.StringPtr(d.Get("vm_password").(string)), + }, nil +} + +func readDatabaseInstance(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*Client).Era + if c == nil { + return diag.Errorf("era is nil") + } + + databaseInstanceID := d.Id() + + resp, err := c.Service.GetDatabaseInstance(ctx, databaseInstanceID) + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + if err = d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + props := []interface{}{} + for _, prop := range resp.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + if err := d.Set("properties", props); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.Datecreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.Datemodified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("internal", resp.Internal); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("placeholder", resp.Placeholder); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_name", resp.Databasename); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_cluster_type", resp.Databaseclustertype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_status", resp.Databasestatus); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster_id", resp.Dbserverlogicalclusterid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.Timemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_time_machine_id", resp.Parenttimemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_zone", resp.Timezone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("info", flattenDBInfo(resp.Info)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("group_info", resp.GroupInfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metric", resp.Metric); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("category", resp.Category); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_source_database_id", resp.ParentSourceDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.Lcmconfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine", flattenDBTimeMachine(resp.TimeMachine)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_nodes", flattenDBNodes(resp.Databasenodes)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("linked_databases", flattenDBLinkedDbs(resp.Linkeddatabases)); err != nil { + return diag.FromErr(err) + } + } + return nil +} + +func updateDatabaseInstance(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*Client).Era + if c == nil { + return diag.Errorf("era is nil") + } + + dbID := d.Id() + name := d.Get("name").(string) + description := d.Get("description").(string) + + updateReq := era.UpdateDatabaseRequest{ + Name: name, + Description: description, + Tags: []interface{}{}, + Resetname: true, + Resetdescription: true, + Resettags: true, + } + + res, err := c.Service.UpdateDatabase(ctx, &updateReq, dbID) + if err != nil { + return diag.FromErr(err) + } + + if res != nil { + if err = d.Set("description", res.Description); err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", res.Name); err != nil { + return diag.FromErr(err) + } + } + + return nil +} + +func deleteDatabaseInstance(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + conn := m.(*Client).Era + if conn == nil { + return diag.Errorf("era is nil") + } + + dbID := d.Id() + + req := era.DeleteDatabaseRequest{ + Delete: true, + Remove: false, + Softremove: false, + Forced: false, + Deletetimemachine: true, + Deletelogicalcluster: true, + } + res, err := conn.Service.DeleteDatabase(ctx, &req, dbID) + if err != nil { + return diag.FromErr(err) + } + + log.Printf("Operation to delete instance with id %s has started, operation id: %s", dbID, res.Operationid) + opID := res.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Cluster GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for db Instance (%s) to delete: %s", res.Entityid, errWaitTask) + } + return nil +} + +func expandActionArguments(d *schema.ResourceData) []*era.Actionarguments { + args := []*era.Actionarguments{} + if post, ok := d.GetOk("postgresql_info"); ok { + brr := post.([]interface{}) + + for _, arg := range brr { + val := arg.(map[string]interface{}) + var values interface{} + if plist, pok := val["listener_port"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "listener_port", + Value: values, + }) + } + if plist, pok := val["database_size"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "database_size", + Value: values, + }) + } + if plist, pok := val["db_password"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "db_password", + Value: values, + }) + } + if plist, pok := val["database_names"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "database_names", + Value: values, + }) + } + if plist, pok := val["auto_tune_staging_drive"]; pok && plist.(bool) { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "auto_tune_staging_drive", + Value: values, + }) + } + if plist, pok := val["allocate_pg_hugepage"]; pok { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "allocate_pg_hugepage", + Value: values, + }) + } + if plist, pok := val["auth_method"]; pok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "auth_method", + Value: values, + }) + } + if plist, clok := val["cluster_database"]; clok { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "cluster_database", + Value: values, + }) + } + if plist, clok := val["pre_create_script"]; clok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "pre_create_script", + Value: values, + }) + } + if plist, clok := val["post_create_script"]; clok && len(plist.(string)) > 0 { + values = plist + + args = append(args, &era.Actionarguments{ + Name: "post_create_script", + Value: values, + }) + } + } + } + resp := buildActionArgumentsFromResourceData(d.Get("actionarguments").(*schema.Set), args) + + return resp +} + +func eraRefresh(ctx context.Context, conn *era.Client, opID era.GetOperationRequest) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + opRes, err := conn.Service.GetOperation(opID) + if err != nil { + return nil, "FAILED", err + } + if *opRes.Status == "5" || *opRes.Status == "4" { + if *opRes.Status == "5" { + return opRes, "COMPLETED", nil + } + return opRes, "FAILED", + fmt.Errorf("error_detail: %s, percentage_complete: %s", utils.StringValue(opRes.Message), utils.StringValue(opRes.Percentagecomplete)) + } + return opRes, "PENDING", nil + } +} diff --git a/test_config.json b/test_config.json index 8e432fe72..180e8c940 100644 --- a/test_config.json +++ b/test_config.json @@ -58,7 +58,8 @@ "uuid":"", "cluster_uuid":"" } - } + }, + "ssh_key":"" } diff --git a/website/docs/d/ndb_cluster.html.markdown b/website/docs/d/ndb_cluster.html.markdown new file mode 100644 index 000000000..77d5f4a87 --- /dev/null +++ b/website/docs/d/ndb_cluster.html.markdown @@ -0,0 +1,65 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_cluster" +sidebar_current: "docs-nutanix-datasource-ndb-cluster" +description: |- + Describes a cluster in Nutanix Database Service +--- + +# nutanix_ndb_cluster + +Describes a cluster in Nutanix Database Service + +## Example Usage + +```hcl +data "nutanix_ndb_cluster" "c1" { + cluster_name = "" +} + +output "cluster" { + value = data.nutanix_ndb_cluster.c1 +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `cluster_id`: ID of cluster +* `cluster_name`: name of cluster + +* `cluster_name` and `cluster_id` are mutually exclusive. + +## Attribute Reference + +The following attributes are exported: + +* `id`: - id of cluster +* `name`: - name of cluster +* `unique_name`: - unique name of cluster +* `ip_addresses`: - IP address +* `fqdns`: - fqdn +* `nx_cluster_uuid`: - nutanix cluster uuid +* `description`: - description +* `cloud_type`: - cloud type +* `date_created`: - creation date +* `date_modified`: - date modified +* `version`: - version +* `owner_id`: - owner UUID +* `status`: - current status +* `hypervisor_type`: - hypervisor type +* `hypervisor_version`: - hypervisor version +* `properties`: - list of properties +* `reference_count`: - NA +* `username`: - username +* `password`: - password +* `cloud_info`: - cloud info +* `resource_config`: - resource related consumption info +* `management_server_info`: - NA +* `entity_counts`: - no. of entities related +* `healthy`: - if healthy status + + +See detailed information in [NDB Cluster](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1NzY-get-cluster-by-id). diff --git a/website/docs/d/ndb_clusters.html.markdown b/website/docs/d/ndb_clusters.html.markdown new file mode 100644 index 000000000..8ef7aacdf --- /dev/null +++ b/website/docs/d/ndb_clusters.html.markdown @@ -0,0 +1,60 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_clusters" +sidebar_current: "docs-nutanix-datasource-ndb-clusters" +description: |- + List all clusters in Nutanix Database Service +--- + +# nutanix_ndb_clusters + +List all clusters in Nutanix Database Service + +## Example Usage + +```hcl +data "nutanix_ndb_clusters" "clusters" { +} + +output "clusters_op" { + value = data.nutanix_ndb_clusters.clusters +} + +``` + +## Attribute Reference + +The following arguments are exported: + +* `clusters`: list of clusters + +## clusters + +The following attributes are exported for each cluster: + +* `id`: - id of cluster +* `name`: - name of cluster +* `unique_name`: - unique name of cluster +* `ip_addresses`: - IP address +* `fqdns`: - fqdn +* `nx_cluster_uuid`: - nutanix cluster uuid +* `description`: - description +* `cloud_type`: - cloud type +* `date_created`: - creation date +* `date_modified`: - date modified +* `version`: - version +* `owner_id`: - owner UUID +* `status`: - current status +* `hypervisor_type`: - hypervisor type +* `hypervisor_version`: - hypervisor version +* `properties`: - list of properties +* `reference_count`: - NA +* `username`: - username +* `password`: - password +* `cloud_info`: - cloud info +* `resource_config`: - resource related consumption info +* `management_server_info`: - NA +* `entity_counts`: - no. of entities related +* `healthy`: - if healthy status + +See detailed information in [NDB Clusters](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1NzQ-get-all-clusters). diff --git a/website/docs/d/ndb_database.html.markdown b/website/docs/d/ndb_database.html.markdown new file mode 100644 index 000000000..7e288c154 --- /dev/null +++ b/website/docs/d/ndb_database.html.markdown @@ -0,0 +1,73 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_database" +sidebar_current: "docs-nutanix-datasource-ndb-database" +description: |- + Describes a database instance in Nutanix Database Service +--- + +# nutanix_ndb_database + +Describes a database instance in Nutanix Database Service + +## Example Usage + +```hcl +data "nutanix_ndb_database" "db1" { + database_id = "" +} + +output "db1_output" { + value = data.nutanix_ndb_database.db1 +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `database_id`: ID of database instance + +## Attribute Reference + +The following attributes are exported: + +* `id`: - id of database instance +* `name`: - name of database instance +* `description`: - description +* `date_created`: - creation date +* `date_modified`: - date modified +* `owner_id`: - owner ID +* `properties`: - properties +* `tags`: - tags attached +* `clustered`: - if clustered or not +* `clone`: - if cloned +* `era_created`: - if era created +* `internal`: - if internal database +* `placeholder`: - NA +* `database_name`: - database instance name +* `type`: - database engine type +* `status`: - status of database instance +* `database_status`: - NA +* `dbserver_logical_cluster_id`: - NA +* `time_machine_id`: - time machine ID +* `parent_time_machine_id`: - parent time machine ID +* `time_zone`: - timezone +* `info`: - info regarding disks, vm, storage, etc. +* `group_info`: - group info +* `metadata`: - metadata of database instance +* `metric`: - metrics +* `category`: - category of instance +* `parent_database_id`: - parent database ID +* `parent_source_database_id`: - parent source database ID +* `lcm_config`: - lcm configuration +* `time_machine`: - time machine related config info +* `database_nodes`: - nodes info +* `dbserver_logical_cluster`: - NA +* `linked_databases`: - list of databases created in instance with info +* `databases`: - NA +* `database_group_state_info`: - NA + + +See detailed information in [Database Instance](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1NDA-get-a-database-using-id). diff --git a/website/docs/d/ndb_databases.html.markdown b/website/docs/d/ndb_databases.html.markdown new file mode 100644 index 000000000..d916f0cc7 --- /dev/null +++ b/website/docs/d/ndb_databases.html.markdown @@ -0,0 +1,71 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_databases" +sidebar_current: "docs-nutanix-datasource-ndb-databases" +description: |- + List all database instances in Nutanix Database Service +--- + +# nutanix_ndb_database + +List all database instances in Nutanix Database Service + +## Example Usage + +```hcl +data "nutanix_ndb_databases" "dbs" {} + +output "dbs_output" { + value = data.nutanix_ndb_databases.dbs +} + +``` + +## Attribute Reference + +The following attributes are exported: + +* `database_instances`: - list of database instances + +## database_instances + +The following attributes are exported for each database_instances: + +* `id`: - id of database instance +* `name`: - name of database instance +* `description`: - description +* `date_created`: - creation date +* `date_modified`: - date modified +* `owner_id`: - owner ID +* `properties`: - properties +* `tags`: - tags attached +* `clustered`: - if clustered or not +* `clone`: - if cloned +* `era_created`: - if era created +* `internal`: - if internal database +* `placeholder`: - NA +* `database_name`: - database instance name +* `type`: - database engine type +* `status`: - status of database instance +* `database_status`: - NA +* `dbserver_logical_cluster_id`: - NA +* `time_machine_id`: - time machine ID +* `parent_time_machine_id`: - parent time machine ID +* `time_zone`: - timezone +* `info`: - info regarding disks, vm, storage, etc. +* `group_info`: - group info +* `metadata`: - metadata of database instance +* `metric`: - metrics +* `category`: - category of instance +* `parent_database_id`: - parent database ID +* `parent_source_database_id`: - parent source database ID +* `lcm_config`: - lcm configuration +* `time_machine`: - time machine related config info +* `database_nodes`: - nodes info +* `dbserver_logical_cluster`: - NA +* `linked_databases`: - list of databases created in instance with info +* `databases`: - NA +* `database_group_state_info`: - NA + + +See detailed information in [List Database Instances](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1MzY-get-all-source-databases). diff --git a/website/docs/d/ndb_profile.html.markdown b/website/docs/d/ndb_profile.html.markdown new file mode 100644 index 000000000..d1db8b63d --- /dev/null +++ b/website/docs/d/ndb_profile.html.markdown @@ -0,0 +1,58 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_profile" +sidebar_current: "docs-nutanix-datasource-ndb-profile" +description: |- + Describes a profile in Nutanix Database Service +--- + +# nutanix_ndb_profile + +Describes a profile in Nutanix Database Service + +## Example Usage + +```hcl +data "nutanix_ndb_profile" "profile1" { + profile_type = "Network" + profile_name = "TEST_NETWORK_PROFILE" +} + +output "profile" { + value = data.nutanix_ndb_profile.profile1 +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `engine`: Database engine. For eg. postgres_database +* `profile_type`: Profile type. Types: Software, Compute, Network and Database_Parameter +* `profile_id`: Profile ID for query +* `profile_name`: Profile Name for query + + +## Attribute Reference + +The following attributes are exported: + +* `id`: - id of profile +* `name`: - profile name +* `description`: - description of profile +* `status`: - status of profile +* `owner`: - owner name +* `engine_type`: - database engine type +* `db_version`: - database version +* `topology`: - topology +* `system_profile`: - if system profile or not +* `assoc_db_servers`: - associated DB servers +* `assoc_databases`: - associated databases +* `latest_version`: - latest version for engine software +* `latest_version_id`: - ID of latest version for engine software +* `versions`: - profile's different version config +* `cluster_availability`: - list of clusters availability +* `nx_cluster_id`: - era cluster ID + +See detailed information in [Nutanix Database Service Profile](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1MjY-get-all-profiles). diff --git a/website/docs/d/ndb_profiles.html.markdown b/website/docs/d/ndb_profiles.html.markdown new file mode 100644 index 000000000..c8af10532 --- /dev/null +++ b/website/docs/d/ndb_profiles.html.markdown @@ -0,0 +1,58 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_profiles" +sidebar_current: "docs-nutanix-datasource-ndb-profiles" +description: |- + List profiles in Nutanix Database Service +--- + +# nutanix_ndb_profile + +List profiles in Nutanix Database Service + +## Example Usage + +```hcl +data "nutanix_ndb_profiles" "profiles" {} + +output "profiles_list" { + value = data.nutanix_ndb_profiles.profiles +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `engine`: Database engine. For eg. postgres_database +* `profile_type`: profile type. Types: Software, Compute, Network and Database_Parameter + +## Attribute Reference + +The following attributes are exported: + +* `profiles`: List of profiles + +## profiles + +The following attributes are exported for each profile: + +* `id`: - id of profile +* `name`: - profile name +* `description`: - description of profile +* `status`: - status of profile +* `owner`: - owner name +* `engine_type`: - database engine type +* `db_version`: - database version +* `topology`: - topology +* `system_profile`: - if system profile or not +* `assoc_db_servers`: - associated DB servers +* `assoc_databases`: - associated databases +* `latest_version`: - latest version for engine software +* `latest_version_id`: - ID of latest version for engine software +* `versions`: - profile's different version config +* `cluster_availability`: - list of clusters availability +* `nx_cluster_id`: - era cluster ID + +See detailed information in [Nutanix Database Service Profiles](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1MjY-get-all-profiles). diff --git a/website/docs/d/ndb_sla.html.markdown b/website/docs/d/ndb_sla.html.markdown new file mode 100644 index 000000000..b71981b27 --- /dev/null +++ b/website/docs/d/ndb_sla.html.markdown @@ -0,0 +1,59 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_sla" +sidebar_current: "docs-nutanix-datasource-ndb-sla" +description: |- + Describes a SLA in Nutanix Database Service +--- + +# nutanix_ndb_sla + +Describes a SLA in Nutanix Database Service + +## Example Usage + +```hcl +data "nutanix_ndb_sla" "sla1" { + sla_name = "test-sla" +} + +output "sla" { + value = data.nutanix_ndb_sla.sla1 +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `sla_id`: SLA ID for query +* `sla_name`: SLA Name for query + +* `sla_id` and `sla_name` are mutually exclusive. + +## Attribute Reference + +The following attributes are exported: + +* `id`: - id of sla +* `name`: - sla name +* `unique_name`: - unique name +* `description`: - description of sla +* `owner_id`: - owner ID +* `system_sla`: - if system sla +* `date_created`: - creation date +* `date_modified`: - last modified +* `continuous_retention`: - continuous retention of logs limit +* `daily_retention`: - Daily snapshots retention limit +* `weekly_retention`: - weeky snapshots retention limit +* `monthly_retention`: - Monthly snapshots retention limit +* `quartely_retention`: - Daily snapshots retention limit +* `yearly_retention`: - Yearly snapshots retention limit +* `reference_count`: - Reference count +* `pitr_enabled`: - If point in time recovery enabled +* `current_active_frequency`: - Current active frequency + + + +See detailed information in [Nutanix Database Service SLA](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1ODc-get-sla-by-id). diff --git a/website/docs/d/ndb_slas.html.markdown b/website/docs/d/ndb_slas.html.markdown new file mode 100644 index 000000000..33d2baa92 --- /dev/null +++ b/website/docs/d/ndb_slas.html.markdown @@ -0,0 +1,52 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_slas" +sidebar_current: "docs-nutanix-datasource-ndb-slas" +description: |- + Lists all SLAs in Nutanix Database Service +--- + +# nutanix_ndb_slas + +Lists all SLAs in Nutanix Database Service + +## Example Usage + +```hcl +data "nutanix_ndb_slas" "slas" {} + +output "sla" { + value = data.nutanix_ndb_slas.slas +} + +``` + +## Attribute Reference + +The following attributes are exported: + +* `slas`: - list of slas + +### slas + +Each sla in list exports following attributes: + +* `id`: - ID of sla +* `name`: - sla name +* `unique_name`: - unique name +* `description`: - description of sla +* `owner_id`: - owner ID +* `system_sla`: - if system sla +* `date_created`: - creation date +* `date_modified`: - last modified +* `continuous_retention`: - continuous retention of logs limit +* `daily_retention`: - Daily snapshots retention limit +* `weekly_retention`: - weeky snapshots retention limit +* `monthly_retention`: - Monthly snapshots retention limit +* `quartely_retention`: - Daily snapshots retention limit +* `yearly_retention`: - Yearly snapshots retention limit +* `reference_count`: - Reference count +* `pitr_enabled`: - If point in time recovery enabled +* `current_active_frequency`: - Current active frequency + +See detailed information in [Nutanix Database Service SLAs](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1ODU-get-all-sl-as). diff --git a/website/docs/r/ndb_database.html.markdown b/website/docs/r/ndb_database.html.markdown new file mode 100644 index 000000000..a266905d1 --- /dev/null +++ b/website/docs/r/ndb_database.html.markdown @@ -0,0 +1,229 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_database" +sidebar_current: "docs-nutanix-resource-ndb-database" +description: |- + This operation submits a request to create, update and delete database instance in Nutanix database service (NDB). + Note: For 1.8.0-beta.1 release, only postgress database type is qualified and officially supported. +--- + +# nutanix_ndb_database + +Provides a resource to create database instance based on the input parameters. For 1.8.0-beta.1 release, only postgress database type is qualified and officially supported. + +## Example Usage + +``` hcl +resource "nutanix_ndb_database" "dbp" { + + // name of database type + databasetype = "postgres_database" + + // required name of db instance + name = "test-inst" + description = "add description" + + // adding the profiles details + softwareprofileid = "{{ software_profile_id }}" + softwareprofileversionid = "{{ software_profile_version_id }}" + computeprofileid = "{{ compute_profile_id }}" + networkprofileid = "{{ network_profile_id }}" + dbparameterprofileid = "{{ db_parameter_profile_id }}" + + // postgreSQL Info + postgresql_info{ + listener_port = "{{ listner_port }}" + + database_size= "{{ 200 }}" + + db_password = "password" + + database_names= "testdb1" + } + + // era cluster id + nxclusterid= local.clusters.EraCluster.id + + // ssh-key + sshpublickey= "{{ ssh-public-key }}" + + // node for single instance + nodes{ + // name of dbserver vm + vmname= "test-era-vm1" + + // network profile id + networkprofileid= "" + } + + // time machine info + timemachineinfo { + name= "test-pg-inst" + description="description of time machine" + slaid= "{{ sla_id }}" + + // schedule info fields are optional. + schedule { + snapshottimeofday{ + hours= 16 + minutes= 0 + seconds= 0 + } + continuousschedule{ + enabled=true + logbackupinterval= 30 + snapshotsperday=1 + } + weeklyschedule{ + enabled=true + dayofweek= "WEDNESDAY" + } + monthlyschedule{ + enabled = true + dayofmonth= "27" + } + quartelyschedule{ + enabled=true + startmonth="JANUARY" + dayofmonth= 27 + } + yearlyschedule{ + enabled= false + dayofmonth= 31 + month="DECEMBER" + } + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name`: - (Required) Name of the instance. +* `description`: - (Optional) The description +* `databasetype`: - (Optional) Type of database. Valid values: oracle_database, postgres_database, sqlserver_database, mariadb_database and mysql_database +* `softwareprofileid`: - (Optional) ID of software profile +* `softwareprofileversionid`: - (Optional) ID of version in software profile +* `computeprofileid`: - (Optional) ID of compute profile +* `networkprofileid`: - (Optional) ID of network profile +* `dbparameterprofileid`: - (Optional) DB parameters profile ID +* `newdbservertimezone`: - (Optional) Timezone of new DB server VM +* `nxclusterid`: - (Optional) Cluster ID for DB server VM +* `sshpublickey`: - (Optional) public key for ssh access to DB server VM +* `createdbserver`: - (Optional) Set this to create new DB server VM. Default: true +* `dbserverid`: - (Optional) DB server VM ID for creating instance on registered DB server VM +* `clustered`: - (Optional) If clustered database. Default: false +* `autotunestagingdrive`: - (Optional) Enable auto tuning of staging drive. Default: true +* `nodecount`: - (Optional) No. of nodes/db server vms. Default: 1 +* `vm_password`: - (Optional) password for DB server VM and era drive user +* `actionarguments`: - (Optional) action arguments for database. For postgress, you can use postgresql_info +* `timemachineinfo`: - (Optional) time machine config +* `nodes`: - (Optional) nodes info +* `postgresql_info`: - (Optional) action arguments for postgress type database. + +### actionarguments + +Structure for each action argument in actionarguments list: + +* `name`: - (Required) name of argument +* `value`: - (Required) value for argument + +### nodes + +Each node in nodes supports the following: + +* `properties`: - (Optional) list of additional properties +* `vmname`: - (Required) name of vm +* `networkprofileid`: - (Required) network profile ID +* `dbserverid`: - (Optional) Database server ID required for existing VM + +### timemachineinfo + +The timemachineinfo attribute supports the following: + +* `name`: - (Required) name of time machine +* `description`: - (Optional) description of time machine +* `slaid`: - (Required) SLA ID +* `autotunelogdrive`: - (Optional) enable auto tune log drive. Default: true +* `schedule`: - (Optional) schedule for snapshots +* `tags`: - (Optional) tags + +### schedule + +The schedule attribute supports the following: + +* `snapshottimeofday`: - (Optional) daily snapshot config +* `continuousschedule`: - (Optional) snapshot freq and log config +* `weeklyschedule`: - (Optional) weekly snapshot config +* `monthlyschedule`: - (Optional) monthly snapshot config +* `quartelyschedule`: - (Optional) quaterly snapshot config +* `yearlyschedule`: - (Optional) yearly snapshot config + +### snapshottimeofday + +The snapshottimeofday attribute supports the following for HH:MM:SS time snapshot needs to be taken: + +* `hours`: - (Required) hours +* `minutes`: - (Required) minutes +* `seconds`: - (Required) seconds + +### continuousschedule + +The continuousschedule attribute supports the following: + +* `enabled`: - (Required) to enable +* `logbackupinterval`: - (Required) log catchup interval for database +* `snapshotsperday`: - (Required) num of snapshots per day + +### weeklyschedule + +The weeklyschedule attribute supports the following: + +* `enabled`: - (Required) to enable +* `dayofweek`: - (Required) day of week to take snaphsot. Eg. "WEDNESDAY" + +### monthlyschedule + +The monthlyschedule attribute supports the following: + +* `enabled`: - (Required) to enable +* `dayofmonth`: - (Required) day of month to take snapshot + +### quartelyschedule + +The quartelyschedule attribute supports the following: + +* `enabled`: - (Required) to enable +* `startmonth`: - (Required) quarter start month +* `dayofmonth`: - (Required) month's day for snapshot + +### yearlyschedule + +The yearlyschedule attribute supports the following: + +* `enabled`: - (Required) to enable +* `month`: - (Required) month for snapshot +* `dayofmonth`: - (Required) day of month to take snapshot + +### postgresql_info + +The postgresql_info attribute supports the following: + +* `listener_port`: - (Required) listener port for database instance +* `database_size`: - (Required) initial database size +* `auto_tune_staging_drive`: - (Optional) enable auto tuning of staging drive. Default: false +* `allocate_pg_hugepage`: - (Optional) allocate huge page. Default: false +* `cluster_database`: - (Optional) if clustered database. Default: false +* `auth_method`: - (Optional) auth methods. Default: md5 +* `database_names`: - (Required) name of initial database to be created +* `db_password`: - (Required) database instance password +* `pre_create_script`: - (Optional) pre instance create script +* `post_create_script`: - (Optional) post instance create script + +## lifecycle + +* `Update` : - Currently only update of instance's name and description is supported using this resource + +See detailed information in [NDB Database Instance](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1Mzg-provision-a-database). diff --git a/website/nutanix.erb b/website/nutanix.erb index 7455dd5ba..4268643b0 100644 --- a/website/nutanix.erb +++ b/website/nutanix.erb @@ -163,6 +163,30 @@ > nutanix_vpcs + > + nutanix_ndb_cluster + + > + nutanix_ndb_clusters + + > + nutanix_ndb_database + + > + nutanix_ndb_databases + + > + nutanix_ndb_profile + + > + nutanix_ndb_profiles + + > + nutanix_ndb_sla + + > + nutanix_ndb_slas + @@ -247,6 +271,9 @@ > nutanix_user_groups + > + nutanix_ndb_database +