diff --git a/client/era/era_service.go b/client/era/era_service.go index a2afe1c7c..66bb1db6b 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -39,6 +39,21 @@ type Service interface { DeleteProfileVersion(ctx context.Context, profileID string, profileVersionID string) (*string, error) DatabaseScale(ctx context.Context, id string, req *DatabaseScale) (*ProvisionDatabaseResponse, error) RegisterDatabase(ctx context.Context, request *RegisterDBInputRequest) (*ProvisionDatabaseResponse, error) + GetTimeMachine(ctx context.Context, tmsID string, tmsName string) (*TimeMachine, error) + ListTimeMachines(ctx context.Context) (*ListTimeMachines, error) + DatabaseSnapshot(ctx context.Context, id string, req *DatabaseSnapshotRequest) (*ProvisionDatabaseResponse, error) + UpdateSnapshot(ctx context.Context, id string, req *UpdateSnapshotRequest) (*SnapshotResponse, error) + GetSnapshot(ctx context.Context, id string, filter *FilterParams) (*SnapshotResponse, error) + DeleteSnapshot(ctx context.Context, id string) (*ProvisionDatabaseResponse, error) + ListSnapshots(ctx context.Context, tmsID string) (*ListSnapshots, error) + CreateClone(ctx context.Context, id string, req *CloneRequest) (*ProvisionDatabaseResponse, error) + UpdateCloneDatabase(ctx context.Context, id string, req *UpdateDatabaseRequest) (*UpdateDatabaseResponse, error) + GetClone(ctx context.Context, id string, name string, filterParams *FilterParams) (*GetDatabaseResponse, error) + ListClones(ctx context.Context, filter *FilterParams) (*ListDatabaseInstance, error) + DeleteClone(ctx context.Context, id string, req *DeleteDatabaseRequest) (*ProvisionDatabaseResponse, error) + AuthorizeDBServer(ctx context.Context, id string, req []*string) (*AuthorizeDBServerResponse, error) + DeAuthorizeDBServer(ctx context.Context, id string, req []*string) (*AuthorizeDBServerResponse, error) + TimeMachineCapability(ctx context.Context, tmsID string) (*TimeMachineCapability, error) } type ServiceClient struct { @@ -362,6 +377,15 @@ func (sc ServiceClient) DatabaseRestore(ctx context.Context, databaseID string, res := new(ProvisionDatabaseResponse) return res, sc.c.Do(ctx, httpReq, res) } +func (sc ServiceClient) DatabaseSnapshot(ctx context.Context, id string, req *DatabaseSnapshotRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/snapshots", id), req) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} func (sc ServiceClient) LogCatchUp(ctx context.Context, tmsID string, req *LogCatchUpRequest) (*ProvisionDatabaseResponse, error) { httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/log-catchups", tmsID), req) @@ -436,3 +460,171 @@ func (sc ServiceClient) DeleteProfileVersion(ctx context.Context, profileID stri return res, sc.c.Do(ctx, httpReq, res) } + +func (sc ServiceClient) UpdateSnapshot(ctx context.Context, snapshotID string, req *UpdateSnapshotRequest) (*SnapshotResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/snapshots/i/%s", snapshotID), req) + if err != nil { + return nil, err + } + + res := new(SnapshotResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteSnapshot(ctx context.Context, snapshotID string) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/snapshots/%s", snapshotID), nil) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetSnapshot(ctx context.Context, snapshotID string, filter *FilterParams) (*SnapshotResponse, error) { + path := fmt.Sprintf("/snapshots/%s", snapshotID) + if filter != nil { + path = path + "?load-replicated-child-snapshots=" + filter.LoadReplicatedChildSnapshots + "&time-zone=" + filter.TimeZone + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(SnapshotResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListSnapshots(ctx context.Context, tmsID string) (*ListSnapshots, error) { + path := ("/snapshots?all=false&time-zone=UTC") + if tmsID != "" { + path = path + "&value-type=time-machine&value=" + tmsID + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(ListSnapshots) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetTimeMachine(ctx context.Context, tmsID string, tmsName string) (*TimeMachine, error) { + path := "" + + if len(tmsName) > 0 { + path = fmt.Sprintf("/tms/%s?value-type=name&detailed=false&load-database=false&load-clones=false&time-zone=UTC", tmsName) + } else { + path = fmt.Sprintf("/tms/%s?value-type=id&detailed=false&load-database=false&load-clones=false&time-zone=UTC", tmsID) + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(TimeMachine) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListTimeMachines(ctx context.Context) (*ListTimeMachines, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/tms", nil) + if err != nil { + return nil, err + } + + res := new(ListTimeMachines) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateClone(ctx context.Context, id string, req *CloneRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/clones", id), req) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetClone(ctx context.Context, id string, name string, filter *FilterParams) (*GetDatabaseResponse, error) { + path := "" + + if name != "" { + path = fmt.Sprintf("/clones/%s?value-type=name&detailed=%s&any-status=%s&load-dbserver-cluster=%s&time-zone=%s", name, filter.Detailed, filter.AnyStatus, filter.LoadDBServerCluster, filter.TimeZone) + } else { + path = fmt.Sprintf("/clones/%s?value-type=id&detailed=%s&any-status=%s&load-dbserver-cluster=%s&time-zone=%s", id, filter.Detailed, filter.AnyStatus, filter.LoadDBServerCluster, filter.TimeZone) + } + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(GetDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListClones(ctx context.Context, filter *FilterParams) (*ListDatabaseInstance, error) { + path := fmt.Sprintf("/clones?detailed=%s&any-status=%s&load-dbserver-cluster=%s&order-by-dbserver-cluster=%s&order-by-dbserver-logical-cluster=%s&time-zone=%s", + filter.Detailed, filter.AnyStatus, filter.LoadDBServerCluster, filter.OrderByDBServerCluster, filter.OrderByDBServerLogicalCluster, filter.TimeZone) + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(ListDatabaseInstance) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateCloneDatabase(ctx context.Context, id string, req *UpdateDatabaseRequest) (*UpdateDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/clones/%s", id), req) + res := new(UpdateDatabaseResponse) + + if err != nil { + return nil, err + } + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteClone(ctx context.Context, cloneID string, req *DeleteDatabaseRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/clones/%s", cloneID), req) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) AuthorizeDBServer(ctx context.Context, tmsID string, req []*string) (*AuthorizeDBServerResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/tms/%s/dbservers", tmsID), req) + if err != nil { + return nil, err + } + + res := new(AuthorizeDBServerResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeAuthorizeDBServer(ctx context.Context, tmsID string, req []*string) (*AuthorizeDBServerResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/tms/%s/dbservers", tmsID), req) + if err != nil { + return nil, err + } + + res := new(AuthorizeDBServerResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) TimeMachineCapability(ctx context.Context, tmsID string) (*TimeMachineCapability, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/tms/%s/capability?time-zone=UTC&type=detailed&load-db-logs=true&load-snapshots=true", tmsID), "") + if err != nil { + return nil, err + } + + res := new(TimeMachineCapability) + + return res, sc.c.Do(ctx, httpReq, res) +} diff --git a/client/era/era_structs.go b/client/era/era_structs.go index fa22b127c..5743a16d8 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -288,13 +288,14 @@ type IPInfos struct { } type Nodes struct { - Properties []*NodesProperties `json:"properties"` - Vmname *string `json:"vmName,omitempty"` - Networkprofileid *string `json:"networkProfileId,omitempty"` - DatabaseServerID *string `json:"dbserverId,omitempty"` - NxClusterID *string `json:"nxClusterId,omitempty"` - ComputeProfileID *string `json:"computeProfileId,omitempty"` - IPInfos []*IPInfos `json:"ipInfos,omitempty"` + Properties []*NodesProperties `json:"properties"` + Vmname *string `json:"vmName,omitempty"` + Networkprofileid *string `json:"networkProfileId,omitempty"` + DatabaseServerID *string `json:"dbserverId,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + ComputeProfileID *string `json:"computeProfileId,omitempty"` + NewDBServerTimeZone *string `json:"newDbServerTimeZone,omitempty"` + IPInfos []*IPInfos `json:"ipInfos,omitempty"` } // ProvisionDatabaseResponse structs @@ -700,6 +701,7 @@ type InfoBpgConfig struct { type Info struct { Secureinfo interface{} `json:"secureInfo"` Info *InfoBpgConfig `json:"info"` + CreatedBy *string `json:"created_by,omitempty"` } type DBInstanceMetadata struct { Logcatchupforrestoredispatched bool `json:"logCatchUpForRestoreDispatched,omitempty"` @@ -763,13 +765,13 @@ type MetricMemoryInfo struct { } type MetricStorageInfo struct { - LastUpdatedTimeInUTC *string `json:"lastUpdatedTimeInUTC,omitempty"` - ControllerNumIops []*int `json:"controllerNumIops,omitempty"` - ControllerAvgIoLatencyUsecs []*int `json:"controllerAvgIoLatencyUsecs,omitempty"` - Size *int `json:"size,omitempty"` - AllocatedSize *int `json:"allocatedSize,omitempty"` - UsedSize *int `json:"usedSize,omitempty"` - Unit *string `json:"unit,omitempty"` + LastUpdatedTimeInUTC interface{} `json:"lastUpdatedTimeInUTC,omitempty"` + ControllerNumIops []*int `json:"controllerNumIops,omitempty"` + ControllerAvgIoLatencyUsecs []*int `json:"controllerAvgIoLatencyUsecs,omitempty"` + Size interface{} `json:"size,omitempty"` + AllocatedSize interface{} `json:"allocatedSize,omitempty"` + UsedSize interface{} `json:"usedSize,omitempty"` + Unit interface{} `json:"unit,omitempty"` } type Metric struct { @@ -1086,3 +1088,313 @@ type UnRegisterDatabaseRequest struct { Delete bool `json:"delete,omitempty"` DeleteTimeMachine bool `json:"deleteTimeMachine,omitempty"` } +type DatabaseSnapshotRequest struct { + Name *string `json:"name,omitempty"` + LcmConfig *LCMConfigSnapshot `json:"lcmConfig,omitempty"` + ReplicateToClusters []*string `json:"replicateToClusterIds,omitempty"` +} + +type LCMConfigSnapshot struct { + SnapshotLCMConfig *SnapshotLCMConfig `json:"snapshotLCMConfig,omitempty"` +} + +type SnapshotLCMConfig struct { + ExpiryDetails *DBExpiryDetails `json:"expiryDetails,omitempty"` +} + +type ListTimeMachines []*TimeMachine + +type CloneLCMConfig struct { + DatabaseLCMConfig *DatabaseLCMConfig `json:"databaseLCMConfig,omitempty"` +} + +type DatabaseLCMConfig struct { + ExpiryDetails *DBExpiryDetails `json:"expiryDetails,omitempty"` + RefreshDetails *DBRefreshDetails `json:"refreshDetails,omitempty"` +} + +type CloneRequest struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + SSHPublicKey *string `json:"sshPublicKey,omitempty"` + DbserverID *string `json:"dbserverId,omitempty"` + DbserverClusterID *string `json:"dbserverClusterId,omitempty"` + DbserverLogicalClusterID *string `json:"dbserverLogicalClusterId,omitempty"` + TimeMachineID *string `json:"timeMachineId,omitempty"` + SnapshotID *string `json:"snapshotId,omitempty"` + UserPitrTimestamp *string `json:"userPitrTimestamp,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + VMPassword *string `json:"vmPassword,omitempty"` + ComputeProfileID *string `json:"computeProfileId,omitempty"` + NetworkProfileID *string `json:"networkProfileId,omitempty"` + DatabaseParameterProfileID *string `json:"databaseParameterProfileId,omitempty"` + NodeCount *int `json:"nodeCount,omitempty"` + Nodes []*Nodes `json:"nodes,omitempty"` + ActionArguments []*Actionarguments `json:"actionArguments,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + LatestSnapshot bool `json:"latestSnapshot,omitempty"` + CreateDbserver bool `json:"createDbserver,omitempty"` + Clustered bool `json:"clustered,omitempty"` + LcmConfig *CloneLCMConfig `json:"lcmConfig,omitempty"` +} + +type AuthorizeDBServerResponse struct { + ErrorCode *int `json:"errorCode,omitempty"` + Info *string `json:"info,omitempty"` + Message *string `json:"message,omitempty"` + Status *string `json:"status,omitempty"` +} + +type FilterParams struct { + Detailed string `json:"detailed,omitempty"` + AnyStatus string `json:"any-status,omitempty"` + LoadDBServerCluster string `json:"load-dbserver-cluster"` + TimeZone string `json:"time-zone,omitempty"` + OrderByDBServerCluster string `json:"order-by-dbserver-cluster,omitempty"` + OrderByDBServerLogicalCluster string `json:"order-by-dbserver-logical-cluster,omitempty"` + LoadReplicatedChildSnapshots string `json:"load-replicated-child-snapshots,omitempty"` +} + +type UpdateSnapshotRequest struct { + Name *string `json:"name,omitempty"` + ResetName bool `json:"resetName,omitempty"` +} + +type ListSnapshots []SnapshotResponse + +type SnapshotResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + SnapshotID *string `json:"snapshotId,omitempty"` + SnapshotUUID *string `json:"snapshotUuid,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + ProtectionDomainID *string `json:"protectionDomainId,omitempty"` + ParentSnapshotID *string `json:"parentSnapshotId,omitempty"` + TimeMachineID *string `json:"timeMachineId,omitempty"` + DatabaseNodeID *string `json:"databaseNodeId,omitempty"` + AppInfoVersion *string `json:"appInfoVersion,omitempty"` + Status *string `json:"status,omitempty"` + Type *string `json:"type,omitempty"` + SnapshotTimeStamp *string `json:"snapshotTimeStamp,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + SoftwareSnapshotID *string `json:"softwareSnapshotId,omitempty"` + FromTimeStamp *string `json:"fromTimeStamp,omitempty"` + ToTimeStamp *string `json:"toTimeStamp,omitempty"` + ApplicableTypes []*string `json:"applicableTypes,omitempty"` + DBServerStorageMetadataVersion *int `json:"dbServerStorageMetadataVersion,omitempty"` + SnapshotTimeStampDate *int64 `json:"snapshotTimeStampDate,omitempty"` + SnapshotSize *float64 `json:"snapshotSize,omitempty"` + ParentSnapshot *bool `json:"parentSnapshot,omitempty"` + SoftwareDatabaseSnapshot bool `json:"softwareDatabaseSnapshot,omitempty"` + Processed bool `json:"processed,omitempty"` + DatabaseSnapshot bool `json:"databaseSnapshot,omitempty"` + Properties []*DBInstanceProperties `json:"properties"` + Tags []*Tags `json:"tags"` + Info *CloneInfo `json:"info,omitempty"` + Metadata *ClonedMetadata `json:"metadata,omitempty"` + Metric *Metric `json:"metric,omitempty"` + LcmConfig *LcmConfig `json:"lcmConfig,omitempty"` + SanitisedFromSnapshotID interface{} `json:"sanitisedFromSnapshotId,omitempty"` + AccessLevel interface{} `json:"accessLevel"` + DbserverID interface{} `json:"dbserverId,omitempty"` + DbserverName interface{} `json:"dbserverName,omitempty"` + DbserverIP interface{} `json:"dbserverIp,omitempty"` + ReplicatedSnapshots interface{} `json:"replicatedSnapshots,omitempty"` + SoftwareSnapshot interface{} `json:"softwareSnapshot,omitempty"` + SanitisedSnapshots interface{} `json:"sanitisedSnapshots,omitempty"` + SnapshotFamily interface{} `json:"snapshotFamily,omitempty"` +} + +type LinkedDBInfo struct { + Info *Info `json:"info,omitempty"` +} + +type CloneLinkedDBInfo struct { + ID *string `json:"id,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + Status *string `json:"status,omitempty"` + Info *LinkedDBInfo `json:"info,omitempty"` + AppConsistent bool `json:"appConsistent,omitempty"` + Clone bool `json:"clone,omitempty"` + Message interface{} `json:"message,omitempty"` +} + +type CloneInfo struct { + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + LinkedDatabases []*CloneLinkedDBInfo `json:"linkedDatabases,omitempty"` + Databases interface{} `json:"databases,omitempty"` + DatabaseGroupID interface{} `json:"databaseGroupId,omitempty"` + MissingDatabases interface{} `json:"missingDatabases,omitempty"` + ReplicationHistory interface{} `json:"replicationHistory,omitempty"` +} + +type ClonedMetadata struct { + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + DeregisterInfo interface{} `json:"deregisterInfo,omitempty"` + FromTimeStamp string `json:"fromTimeStamp,omitempty"` + ToTimeStamp string `json:"toTimeStamp,omitempty"` + ReplicationRetryCount int `json:"replicationRetryCount,omitempty"` + LastReplicationRetryTimestamp interface{} `json:"lastReplicationRetryTimestamp,omitempty"` + LastReplicationRetrySourceSnapshotID interface{} `json:"lastReplicationRetrySourceSnapshotId,omitempty"` + Async bool `json:"async,omitempty"` + Standby bool `json:"standby,omitempty"` + CurationRetryCount int `json:"curationRetryCount,omitempty"` + OperationsUsingSnapshot []interface{} `json:"operationsUsingSnapshot,omitempty"` +} + +type Capability struct { + Mode *string `json:"mode,omitempty"` + From *string `json:"from,omitempty"` + To *string `json:"to,omitempty"` + TimeUnit *string `json:"timeUnit,omitempty"` + TimeUnitNumber *string `json:"timeUnitNumber,omitempty"` + DatabaseIds []*string `json:"databaseIds,omitempty"` + Snapshots *ListSnapshots `json:"snapshots,omitempty"` + ContinuousRegion *ContinuousRegion `json:"continuousRegion,omitempty"` + DatabasesContinuousRegion interface{} `json:"databasesContinuousRegion,omitempty"` +} + +type TimeMachineCapability struct { + TimeMachineID *string `json:"timeMachineId,omitempty"` + OutputTimeZone *string `json:"outputTimeZone,omitempty"` + Type *string `json:"type,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + NxClusterAssociationType *string `json:"nxClusterAssociationType,omitempty"` + SLAID *string `json:"slaId,omitempty"` + CapabilityResetTime *string `json:"capabilityResetTime,omitempty"` + LastContinuousSnapshotTime *string `json:"lastContinuousSnapshotTime,omitempty"` + LogCatchupStartTime *string `json:"logCatchupStartTime,omitempty"` + DatabaseIds []*string `json:"databaseIds,omitempty"` + HealWithResetCapability bool `json:"healWithResetCapability,omitempty"` + Source bool `json:"source,omitempty"` + Capability []*Capability `json:"capability,omitempty"` + LogTimeInfo map[string]interface{} `json:"logTimeInfo,omitempty"` + LastDBLog *DBLogs `json:"lastDbLog,omitempty"` + LastContinuousSnapshot *LastContinuousSnapshot `json:"lastContinuousSnapshot,omitempty"` + OverallContinuousRangeEndTime interface{} `json:"overallContinuousRangeEndTime,omitempty"` +} + +type ProcessedRanges struct { + First string `json:"first,omitempty"` + Second string `json:"second,omitempty"` +} + +type DBLogsInfo struct { + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + UnknownTimeRange bool `json:"unknownTimeRange,omitempty"` +} + +type DBLogsMetadata struct { + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + DeregisterInfo *DeregisterInfo `json:"deregisterInfo,omitempty"` + CurationRetryCount int `json:"curationRetryCount,omitempty"` + CreatedDirectly bool `json:"createdDirectly,omitempty"` + UpdatedDirectly bool `json:"updatedDirectly,omitempty"` +} + +type DBLogs struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + EraLogDriveID string `json:"eraLogDriveId,omitempty"` + DatabaseNodeID string `json:"databaseNodeId,omitempty"` + FromTime string `json:"fromTime,omitempty"` + ToTime string `json:"toTime,omitempty"` + Status string `json:"status,omitempty"` + Size int `json:"size,omitempty"` + Info *DBLogsInfo `json:"info,omitempty"` + Metadata *DBLogsMetadata `json:"metadata,omitempty"` + DateCreated string `json:"dateCreated,omitempty"` + DateModified string `json:"dateModified,omitempty"` + OwnerID string `json:"ownerId,omitempty"` + DatabaseID interface{} `json:"databaseId,omitempty"` + Message interface{} `json:"message,omitempty"` + Unprocessed bool `json:"unprocessed,omitempty"` + LogCopyOperationID interface{} `json:"logCopyOperationId,omitempty"` +} + +type ContinuousRegion struct { + FromTime string `json:"fromTime,omitempty"` + ToTime string `json:"toTime,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + SnapshotIds []string `json:"snapshotIds,omitempty"` + PartialRanges bool `json:"partialRanges,omitempty"` + SubRange bool `json:"subRange,omitempty"` + Message interface{} `json:"message,omitempty"` + UnknownTimeRanges interface{} `json:"unknownTimeRanges,omitempty"` + TimeRangeAndDatabases interface{} `json:"timeRangeAndDatabases,omitempty"` + Snapshots interface{} `json:"snapshots,omitempty"` + DBLogs []*DBLogs `json:"dbLogs,omitempty"` + ProcessedRanges []*ProcessedRanges `json:"processedRanges,omitempty"` + UnprocessedRanges []*ProcessedRanges `json:"unprocessedRanges,omitempty"` +} + +type LastContinuousSnapshotMetadata struct { + FromTimeStamp string `json:"fromTimeStamp,omitempty"` + ToTimeStamp string `json:"toTimeStamp,omitempty"` + ReplicationRetryCount int `json:"replicationRetryCount,omitempty"` + CurationRetryCount int `json:"curationRetryCount,omitempty"` + Async bool `json:"async,omitempty"` + Standby bool `json:"standby,omitempty"` + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + DeregisterInfo interface{} `json:"deregisterInfo,omitempty"` + LastReplicationRetryTimestamp interface{} `json:"lastReplicationRetryTimestamp,omitempty"` + LastReplicationRetrySourceSnapshotID interface{} `json:"lastReplicationRetrySourceSnapshotId,omitempty"` + OperationsUsingSnapshot []interface{} `json:"operationsUsingSnapshot,omitempty"` +} + +type LastContinuousSnapshot struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + OwnerID string `json:"ownerId,omitempty"` + DateCreated string `json:"dateCreated,omitempty"` + DateModified string `json:"dateModified,omitempty"` + SnapshotID string `json:"snapshotId,omitempty"` + SnapshotUUID string `json:"snapshotUuid,omitempty"` + NxClusterID string `json:"nxClusterId,omitempty"` + ProtectionDomainID string `json:"protectionDomainId,omitempty"` + TimeMachineID string `json:"timeMachineId,omitempty"` + DatabaseNodeID string `json:"databaseNodeId,omitempty"` + AppInfoVersion string `json:"appInfoVersion,omitempty"` + Status string `json:"status,omitempty"` + Type string `json:"type,omitempty"` + SnapshotTimeStamp string `json:"snapshotTimeStamp,omitempty"` + SoftwareSnapshotID string `json:"softwareSnapshotId,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + FromTimeStamp string `json:"fromTimeStamp,omitempty"` + ToTimeStamp string `json:"toTimeStamp,omitempty"` + ApplicableTypes []string `json:"applicableTypes,omitempty"` + SoftwareDatabaseSnapshot bool `json:"softwareDatabaseSnapshot,omitempty"` + Processed bool `json:"processed,omitempty"` + DatabaseSnapshot bool `json:"databaseSnapshot,omitempty"` + ParentSnapshot bool `json:"parentSnapshot,omitempty"` + DBServerStorageMetadataVersion int `json:"dbServerStorageMetadataVersion,omitempty"` + SnapshotTimeStampDate int64 `json:"snapshotTimeStampDate,omitempty"` + SnapshotSize float64 `json:"snapshotSize,omitempty"` + AccessLevel interface{} `json:"accessLevel,omitempty"` + Metric interface{} `json:"metric,omitempty"` + SanitisedFromSnapshotID interface{} `json:"sanitisedFromSnapshotId,omitempty"` + DBserverID interface{} `json:"dbserverId,omitempty"` + DBserverName interface{} `json:"dbserverName,omitempty"` + DBserverIP interface{} `json:"dbserverIp,omitempty"` + ReplicatedSnapshots interface{} `json:"replicatedSnapshots,omitempty"` + SoftwareSnapshot interface{} `json:"softwareSnapshot,omitempty"` + SanitisedSnapshots interface{} `json:"sanitisedSnapshots,omitempty"` + Description interface{} `json:"description,omitempty"` + SnapshotFamily interface{} `json:"snapshotFamily,omitempty"` + ParentSnapshotID interface{} `json:"parentSnapshotId,omitempty"` + Properties []*DBInstanceProperties `json:"properties,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Info *CloneInfo `json:"info,omitempty"` + Metadata *LastContinuousSnapshotMetadata `json:"metadata,omitempty"` + LcmConfig *LcmConfig `json:"lcmConfig,omitempty"` +} diff --git a/examples/ndb/clone/main.tf b/examples/ndb/clone/main.tf new file mode 100644 index 000000000..6d6f1f8f5 --- /dev/null +++ b/examples/ndb/clone/main.tf @@ -0,0 +1,42 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + + +## resource for ndb_clone with Point in time given time machine name + +resource "nutanix_ndb_clone" "name" { + time_machine_name = "test-pg-inst" + name = "test-inst-tf-check" + nx_cluster_id = "{{ nx_Cluster_id }}" + ssh_public_key = "{{ sshkey }}" + user_pitr_timestamp= "{{ point_in_time }}" + time_zone = "Asia/Calcutta" + create_dbserver = true + compute_profile_id = "{{ compute_profile_id }}" + network_profile_id ="{{ network_profile_id }}" + database_parameter_profile_id = "{{ databse_profile_id }}" + nodes{ + vm_name= "test_vm_clone" + compute_profile_id = "{{ compute_profile_id }}" + network_profile_id ="{{ network_profile_id }}" + nx_cluster_id = "{{ nx_Cluster_id }}" + } + postgresql_info{ + vm_name="test_vm_clone" + db_password= "pass" + } +} diff --git a/examples/ndb/clone/terraform.tfvars b/examples/ndb/clone/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/clone/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/clone/variables.tf b/examples/ndb/clone/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/clone/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/examples/ndb/database_snapshot/main.tf b/examples/ndb/database_snapshot/main.tf new file mode 100644 index 000000000..a343539d8 --- /dev/null +++ b/examples/ndb/database_snapshot/main.tf @@ -0,0 +1,32 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +// resource to create snapshot with time machine id + +resource "nutanix_ndb_database_snapshot" "name" { + time_machine_id = "{{ tms_ID }}" + name = "test-snap" + remove_schedule_in_days = 1 +} + +// resource to craete snapshot with time machine name + +resource "nutanix_ndb_database_snapshot" "name" { + time_machine_name = "{{ tms_name }}" + name = "test-snap" + remove_schedule_in_days = 1 +} \ No newline at end of file diff --git a/examples/ndb/database_snapshot/terraform.tfvars b/examples/ndb/database_snapshot/terraform.tfvars new file mode 100644 index 000000000..4f5de990b --- /dev/null +++ b/examples/ndb/database_snapshot/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/database_snapshot/variables.tf b/examples/ndb/database_snapshot/variables.tf new file mode 100644 index 000000000..1a0cb89bf --- /dev/null +++ b/examples/ndb/database_snapshot/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/nutanix/data_source_nutanix_ndb_clone.go b/nutanix/data_source_nutanix_ndb_clone.go new file mode 100644 index 000000000..ec4f61489 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_clone.go @@ -0,0 +1,380 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBClone() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBCloneRead, + Schema: map[string]*schema.Schema{ + "clone_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"clone_name"}, + }, + "clone_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"clone_id"}, + }, + "filters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "detailed": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "any_status": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "load_dbserver_cluster": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + }, + }, + }, + }, + + // computed + + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + "databases": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_group_state_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func dataSourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + cloneID, ok := d.GetOk("clone_id") + cloneName, cok := d.GetOk("clone_name") + + if !ok && !cok { + return diag.Errorf("atleast one of clone_id or clone_name is required") + } + + filterParams := &era.FilterParams{} + if filter, fok := d.GetOk("filters"); fok { + filterList := filter.([]interface{}) + + for _, v := range filterList { + val := v.(map[string]interface{}) + + if detailed, dok := val["detailed"]; dok { + filterParams.Detailed = detailed.(string) + } + + if anyStatus, aok := val["any_status"]; aok { + filterParams.AnyStatus = anyStatus.(string) + } + if loadDB, lok := val["load_dbserver_details"]; lok { + filterParams.LoadDBServerCluster = loadDB.(string) + } + + if timezone, tok := val["timezone"]; tok { + filterParams.TimeZone = timezone.(string) + } + } + } else { + filterParams.Detailed = "false" + filterParams.AnyStatus = "false" + filterParams.LoadDBServerCluster = "false" + filterParams.TimeZone = "UTC" + } + + resp, err := conn.Service.GetClone(ctx, cloneID.(string), cloneName.(string), filterParams) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.Ownerid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.Datecreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.Datemodified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties", flattenDBInstanceProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clustered", resp.Clustered); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("internal", resp.Internal); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("placeholder", resp.Placeholder); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_name", resp.Databasename); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_cluster_type", resp.Databaseclustertype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_status", resp.Databasestatus); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster_id", resp.Dbserverlogicalclusterid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.Timemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_time_machine_id", resp.Parenttimemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_zone", resp.Timezone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("info", flattenDBInfo(resp.Info)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("group_info", resp.GroupInfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metric", resp.Metric); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("category", resp.Category); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_source_database_id", resp.ParentSourceDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.Lcmconfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine", flattenDBTimeMachine(resp.TimeMachine)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_nodes", flattenDBNodes(resp.Databasenodes)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("linked_databases", flattenDBLinkedDbs(resp.Linkeddatabases)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("databases", resp.Databases); err != nil { + return diag.FromErr(err) + } + if err := d.Set("database_group_state_info", resp.DatabaseGroupStateInfo); err != nil { + return diag.FromErr(err) + } + + d.SetId(resp.ID) + + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_clones.go b/nutanix/data_source_nutanix_ndb_clones.go new file mode 100644 index 000000000..1696c57ee --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_clones.go @@ -0,0 +1,258 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBClones() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBClonesRead, + Schema: map[string]*schema.Schema{ + "filters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "detailed": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "any_status": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "load_dbserver_cluster": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + }, + "order_by_dbserver_cluster": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + "order_by_dbserver_logical_cluster": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + }, + }, + }, + "clones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + "databases": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_group_state_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBClonesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + filterParams := &era.FilterParams{} + if filter, fok := d.GetOk("filters"); fok { + filterList := filter.([]interface{}) + + for _, v := range filterList { + val := v.(map[string]interface{}) + + if detailed, dok := val["detailed"]; dok { + filterParams.Detailed = detailed.(string) + } + + if anyStatus, aok := val["any_status"]; aok { + filterParams.AnyStatus = anyStatus.(string) + } + if loadDB, lok := val["load_dbserver_details"]; lok { + filterParams.LoadDBServerCluster = loadDB.(string) + } + + if timezone, tok := val["timezone"]; tok { + filterParams.TimeZone = timezone.(string) + } + + if orderCls, ok := val["order_by_dbserver_cluster"]; ok { + filterParams.OrderByDBServerCluster = orderCls.(string) + } + + if orderLogicalCls, ok := val["order_by_dbserver_logical_cluster"]; ok { + filterParams.OrderByDBServerLogicalCluster = orderLogicalCls.(string) + } + } + } else { + filterParams.Detailed = "false" + filterParams.AnyStatus = "false" + filterParams.LoadDBServerCluster = "false" + filterParams.TimeZone = "UTC" + filterParams.OrderByDBServerCluster = "false" + filterParams.OrderByDBServerLogicalCluster = "false" + } + + resp, err := conn.Service.ListClones(ctx, filterParams) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("clones", flattenDatabaseIntancesList(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + if er != nil { + return diag.Errorf("Error generating UUID for era clones: %+v", er) + } + d.SetId(uuid) + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_database.go b/nutanix/data_source_nutanix_ndb_database.go index 3b0a54626..6ba87668d 100644 --- a/nutanix/data_source_nutanix_ndb_database.go +++ b/nutanix/data_source_nutanix_ndb_database.go @@ -572,7 +572,7 @@ func flattenDBLcmConfig(pr *Era.LcmConfig) []map[string]interface{} { if pr != nil { lcm := map[string]interface{}{} - lcm["expiryDetails"] = flattenEraExpiryDetails(pr.ExpiryDetails) + lcm["expiry_details"] = flattenEraExpiryDetails(pr.ExpiryDetails) lcm["refresh_details"] = flattenEraRefreshDetails(pr.RefreshDetails) var preLcmComm []map[string]interface{} @@ -847,7 +847,9 @@ func flattenTimeMachineMetadata(pr *Era.TimeMachineMetadata) []map[string]interf tm["secure_info"] = pr.SecureInfo tm["info"] = pr.Info - tm["deregister_info"] = flattenDeRegiserInfo(pr.DeregisterInfo) + if pr.DeregisterInfo != nil { + tm["deregister_info"] = flattenDeRegiserInfo(pr.DeregisterInfo) + } tm["capability_reset_time"] = pr.CapabilityResetTime tm["auto_heal"] = pr.AutoHeal tm["auto_heal_snapshot_count"] = pr.AutoHealSnapshotCount diff --git a/nutanix/data_source_nutanix_ndb_snapshot.go b/nutanix/data_source_nutanix_ndb_snapshot.go new file mode 100644 index 000000000..dd45803ef --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_snapshot.go @@ -0,0 +1,480 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBSnapshot() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBSnapshotRead, + Schema: map[string]*schema.Schema{ + "snapshot_id": { + Type: schema.TypeString, + Required: true, + }, + "filters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + }, + "load_replicated_child_snapshots": { + Type: schema.TypeString, + Optional: true, + Default: "false", + }, + }, + }, + }, + + // computed args + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retyr_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + } +} + +func dataSourceNutanixNDBSnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + snapID := "" + if snapshotID, ok := d.GetOk("snapshot_id"); ok { + snapID = snapshotID.(string) + } + + filterParams := &era.FilterParams{} + if filter, ok := d.GetOk("filters"); ok { + filterList := filter.([]interface{}) + + for _, v := range filterList { + val := v.(map[string]interface{}) + + if timezone, tok := val["timezone"]; tok { + filterParams.TimeZone = timezone.(string) + } + + if loadRep, lok := val["load_replicated_child_snapshots"]; lok { + filterParams.LoadReplicatedChildSnapshots = loadRep.(string) + } + } + } else { + filterParams.TimeZone = "UTC" + filterParams.LoadReplicatedChildSnapshots = "false" + } + + resp, err := conn.Service.GetSnapshot(ctx, snapID, filterParams) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.OwnerID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties", flattenDBInstanceProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_uuid", resp.SnapshotUUID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_id", resp.NxClusterID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("protection_domain_id", resp.ProtectionDomainID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_snapshot_id", resp.ParentSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.TimeMachineID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_node_id", resp.DatabaseNodeID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("app_info_version", resp.AppInfoVersion); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("applicable_types", resp.ApplicableTypes); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_timestamp", resp.SnapshotTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_snapshot_id", resp.SoftwareSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_database_snapshot", resp.SoftwareDatabaseSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_storage_metadata_version", resp.DBServerStorageMetadataVersion); err != nil { + return diag.FromErr(err) + } + + // if err := d.Set("santised", resp.Sanitized); err != nil { + // return diag.FromErr(err) + // } + + if err := d.Set("santised_from_snapshot_id", resp.SanitisedFromSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("timezone", resp.TimeZone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("processed", resp.Processed); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_snapshot", resp.DatabaseSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("from_timestamp", resp.FromTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("to_timestamp", resp.ToTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_id", resp.DbserverID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_name", resp.DbserverName); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_ip", resp.DbserverIP); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("replicated_snapshots", resp.ReplicatedSnapshots); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_snapshot", resp.SoftwareSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("santised_snapshots", resp.SanitisedSnapshots); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_family", resp.SnapshotFamily); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_timestamp_date", resp.SnapshotTimeStampDate); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_snapshot", resp.ParentSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_size", resp.SnapshotSize); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.LcmConfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenClonedMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + d.SetId(snapID) + return nil +} + +func flattenClonedMetadata(pr *era.ClonedMetadata) []interface{} { + if pr != nil { + cloneMetadata := make([]interface{}, 0) + meta := make(map[string]interface{}) + + meta["secure_info"] = pr.SecureInfo + meta["info"] = pr.Info + meta["deregister_info"] = pr.DeregisterInfo + meta["from_timestamp"] = pr.FromTimeStamp + meta["to_timestamp"] = pr.ToTimeStamp + meta["replication_retry_count"] = pr.ReplicationRetryCount + meta["last_replication_retyr_source_snapshot_id"] = pr.LastReplicationRetrySourceSnapshotID + meta["async"] = pr.Async + meta["stand_by"] = pr.Standby + meta["curation_retry_count"] = pr.CurationRetryCount + meta["operations_using_snapshot"] = pr.OperationsUsingSnapshot + + cloneMetadata = append(cloneMetadata, meta) + + return cloneMetadata + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_snapshot_test.go b/nutanix/data_source_nutanix_ndb_snapshot_test.go new file mode 100644 index 000000000..56db008b6 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_snapshot_test.go @@ -0,0 +1,101 @@ +package nutanix + +import ( + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceNDBSnapshotName = "data.nutanix_ndb_snapshot.test" + +func TestAccEraSnapshotDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "name"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "owner_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "properties.#"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "metadata.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "snapshot_uuid"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "status", "ACTIVE"), + ), + }, + }, + }) +} + +func TestAccEraSnapshotDataSource_WithFilters(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotDataSourceConfigWithFilters(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "name"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "owner_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "properties.#"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "nx_cluster_id"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "metadata.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "snapshot_uuid"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "status", "ACTIVE"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "type", "DAILY_EXTRA"), + ), + }, + }, + }) +} + +func TestAccEraSnapshotDataSource_WithWrongFilters(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotDataSourceConfigWithWrongFilters(), + ExpectError: regexp.MustCompile("An internal error has occurred"), + }, + }, + }) +} + +func testAccEraSnapshotDataSourceConfig() string { + return ` + data "nutanix_ndb_snapshots" "test1" {} + + data "nutanix_ndb_snapshot" "test" { + snapshot_id = data.nutanix_ndb_snapshots.test1.snapshots.0.id + } + ` +} + +func testAccEraSnapshotDataSourceConfigWithFilters() string { + return ` + data "nutanix_ndb_snapshots" "test1" {} + + data "nutanix_ndb_snapshot" "test" { + snapshot_id = data.nutanix_ndb_snapshots.test1.snapshots.0.id + filters{ + timezone= "UTC" + } + } + ` +} + +func testAccEraSnapshotDataSourceConfigWithWrongFilters() string { + return ` + data "nutanix_ndb_snapshots" "test1" {} + + data "nutanix_ndb_snapshot" "test" { + snapshot_id = data.nutanix_ndb_snapshots.test1.snapshots.0.id + filters{ + timezone= "IST" + } + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_snapshots.go b/nutanix/data_source_nutanix_ndb_snapshots.go new file mode 100644 index 000000000..888c85e09 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_snapshots.go @@ -0,0 +1,341 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBSnapshots() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBSnapshotsRead, + Schema: map[string]*schema.Schema{ + "filters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retyr_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBSnapshotsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tmsID := "" + if filter, ok := d.GetOk("filters"); ok { + filterList := filter.([]interface{}) + + for _, v := range filterList { + val := v.(map[string]interface{}) + + if tms, ok := val["time_machine_id"]; ok { + tmsID = tms.(string) + } + } + } + + resp, err := conn.Service.ListSnapshots(ctx, tmsID) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("snapshots", flattenSnapshotsList(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + if er != nil { + return diag.Errorf("Error generating UUID for era snapshots: %+v", er) + } + d.SetId(uuid) + return nil +} + +func flattenSnapshotsList(sn *era.ListSnapshots) []map[string]interface{} { + if sn != nil { + snpList := []map[string]interface{}{} + for _, val := range *sn { + snap := map[string]interface{}{} + + snap["id"] = val.ID + snap["name"] = val.Name + snap["description"] = val.Description + snap["owner_id"] = val.OwnerID + snap["date_created"] = val.DateCreated + snap["date_modified"] = val.DateModified + snap["properties"] = flattenDBInstanceProperties(val.Properties) + snap["tags"] = flattenDBTags(val.Tags) + snap["snapshot_uuid"] = val.SnapshotUUID + snap["nx_cluster_id"] = val.NxClusterID + snap["protection_domain_id"] = val.ProtectionDomainID + snap["parent_snapshot_id"] = val.ParentSnapshotID + snap["time_machine_id"] = val.TimeMachineID + snap["database_node_id"] = val.DatabaseNodeID + snap["app_info_version"] = val.AppInfoVersion + snap["status"] = val.Status + snap["type"] = val.Type + snap["applicable_types"] = val.ApplicableTypes + snap["snapshot_timestamp"] = val.SnapshotTimeStamp + snap["metadata"] = flattenClonedMetadata(val.Metadata) + snap["software_snapshot_id"] = val.SoftwareSnapshotID + snap["software_database_snapshot"] = val.SoftwareDatabaseSnapshot + snap["dbserver_storage_metadata_version"] = val.DBServerStorageMetadataVersion + // snap["santised"] = val.Sanitized + snap["santised_from_snapshot_id"] = val.SanitisedFromSnapshotID + snap["timezone"] = val.TimeZone + snap["processed"] = val.Processed + snap["database_snapshot"] = val.DatabaseSnapshot + snap["from_timestamp"] = val.FromTimeStamp + snap["to_timestamp"] = val.ToTimeStamp + snap["dbserver_id"] = val.DbserverID + snap["dbserver_name"] = val.DbserverName + snap["dbserver_ip"] = val.DbserverIP + snap["replicated_snapshots"] = val.ReplicatedSnapshots + snap["software_snapshot"] = val.SoftwareSnapshot + snap["santised_snapshots"] = val.SanitisedSnapshots + snap["snapshot_family"] = val.SnapshotFamily + snap["snapshot_timestamp_date"] = val.SnapshotTimeStampDate + snap["lcm_config"] = flattenDBLcmConfig(val.LcmConfig) + snap["parent_snapshot"] = val.ParentSnapshot + snap["snapshot_size"] = val.SnapshotSize + + snpList = append(snpList, snap) + } + return snpList + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_snapshots_test.go b/nutanix/data_source_nutanix_ndb_snapshots_test.go new file mode 100644 index 000000000..2bfc67a34 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_snapshots_test.go @@ -0,0 +1,67 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceNDBSnapshotsName = "data.nutanix_ndb_snapshots.test" + +func TestAccEraSnapshotsDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotsDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.name"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.owner_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.properties.#"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.metadata.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.snapshot_uuid"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.status", "ACTIVE"), + ), + }, + }, + }) +} + +func TestAccEraSnapshotsDataSource_WithFilters(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraSnapshotsDataSourceConfigWithFilters(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.name"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.owner_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.properties.#"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.metadata.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.snapshot_uuid"), + resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.status", "ACTIVE"), + ), + }, + }, + }) +} + +func testAccEraSnapshotsDataSourceConfig() string { + return ` + data "nutanix_ndb_snapshots" "test" {} + ` +} + +func testAccEraSnapshotsDataSourceConfigWithFilters() string { + return ` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_snapshots" "test" { + filters{ + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_time_machine.go b/nutanix/data_source_nutanix_ndb_time_machine.go new file mode 100644 index 000000000..789aac348 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machine.go @@ -0,0 +1,676 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceNutanixNDBTimeMachine() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBTimeMachineRead, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_name"}, + }, + "time_machine_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "database_id": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "ea_status": { + Type: schema.TypeString, + Computed: true, + }, + "scope": { + Type: schema.TypeString, + Computed: true, + }, + "sla_id": { + Type: schema.TypeString, + Computed: true, + }, + "schedule_id": { + Type: schema.TypeString, + Computed: true, + }, + "database": { + Type: schema.TypeString, + Computed: true, + }, + "clones": { + Type: schema.TypeString, + Computed: true, + }, + "source_nx_clusters": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "sla_update_in_progress": { + Type: schema.TypeBool, + Computed: true, + }, + "metric": { + Type: schema.TypeString, + Computed: true, + }, + "sla_update_metadata": { + Type: schema.TypeString, + Computed: true, + }, + "sla": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "system_sla": { + Type: schema.TypeBool, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + + "continuous_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "daily_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "weekly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "monthly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "quarterly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "yearly_retention": { + Type: schema.TypeInt, + Computed: true, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "pitr_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "current_active_frequency": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "system_policy": { + Type: schema.TypeBool, + Computed: true, + }, + "global_policy": { + Type: schema.TypeBool, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_time_of_day": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Computed: true, + }, + "minutes": { + Type: schema.TypeInt, + Computed: true, + }, + "seconds": { + Type: schema.TypeInt, + Computed: true, + }, + "extra": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "continuous_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_backup_interval": { + Type: schema.TypeInt, + Computed: true, + }, + "snapshots_per_day": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "weekly_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day_of_week": { + Type: schema.TypeString, + Computed: true, + }, + "day_of_week_value": { + Type: schema.TypeString, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "monthly_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "yearly_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "month": { + Type: schema.TypeString, + Computed: true, + }, + "month_value": { + Type: schema.TypeString, + Computed: true, + }, + "day_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "quartely_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_month": { + Type: schema.TypeString, + Computed: true, + }, + "start_month_value": { + Type: schema.TypeString, + Computed: true, + }, + "day_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "daily_schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "start_time": { + Type: schema.TypeString, + Computed: true, + }, + "time_zone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "capability_reset_time": { + Type: schema.TypeString, + Computed: true, + }, + "auto_heal": { + Type: schema.TypeBool, + Computed: true, + }, + "auto_heal_snapshot_count": { + Type: schema.TypeInt, + Computed: true, + }, + "auto_heal_log_catchup_count": { + Type: schema.TypeInt, + Computed: true, + }, + "first_snapshot_captured": { + Type: schema.TypeBool, + Computed: true, + }, + "first_snapshot_dispatched": { + Type: schema.TypeBool, + Computed: true, + }, + "last_snapshot_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_auto_snapshot_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_snapshot_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "last_auto_snapshot_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "last_successful_snapshot_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_successive_failure_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_heal_snapshot_operation": { + Type: schema.TypeString, + Computed: true, + }, + "last_log_catchup_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_successful_log_catchup_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "last_log_catchup_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + "log_catchup_successive_failure_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_pause_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_pause_by_force": { + Type: schema.TypeBool, + Computed: true, + }, + "last_resume_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_pause_reason": { + Type: schema.TypeString, + Computed: true, + }, + "state_before_restore": { + Type: schema.TypeString, + Computed: true, + }, + "last_health_alerted_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_ea_breakdown_time": { + Type: schema.TypeString, + Computed: true, + }, + "authorized_dbservers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "last_heal_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_heal_system_triggered": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBTimeMachineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + // call time Machine API + + resp, err := conn.Service.GetTimeMachine(ctx, tmsID.(string), tmsName.(string)) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.OwnerID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("access_level", resp.AccessLevel); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties", flattenDBInstanceProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clustered", resp.Clustered); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("internal", resp.Internal); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_id", resp.DatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("category", resp.Category); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("ea_status", resp.EaStatus); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clustered", resp.Clustered); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("scope", resp.Scope); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("sla_id", resp.SLAID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("schedule_id", resp.ScheduleID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metric", resp.Metric); err != nil { + return diag.FromErr(err) + } + if err := d.Set("database", resp.Database); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clones", resp.Clones); err != nil { + return diag.FromErr(err) + } + if err := d.Set("source_nx_clusters", resp.SourceNxClusters); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("sla_update_in_progress", resp.SLAUpdateInProgress); err != nil { + return diag.FromErr(err) + } + if err := d.Set("sla", flattenDBSLA(resp.SLA)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("schedule", flattenSchedule(resp.Schedule)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenTimeMachineMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + d.SetId(*resp.ID) + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_time_machine_capability.go b/nutanix/data_source_nutanix_ndb_time_machine_capability.go new file mode 100644 index 000000000..43d3bc489 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machine_capability.go @@ -0,0 +1,1217 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func dataSourceNutanixNDBTmsCapability() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBTmsCapabilityRead, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Required: true, + }, + "output_time_zone": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "source": { + Type: schema.TypeBool, + Computed: true, + }, + "nx_cluster_association_type": { + Type: schema.TypeString, + Computed: true, + }, + "sla_id": { + Type: schema.TypeString, + Computed: true, + }, + "overall_continuous_range_end_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_continuous_snapshot_time": { + Type: schema.TypeString, + Computed: true, + }, + "log_catchup_start_time": { + Type: schema.TypeString, + Computed: true, + }, + "heal_with_reset_capability": { + Type: schema.TypeBool, + Computed: true, + }, + "database_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + // check data schema later + "log_time_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "capability": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Computed: true, + }, + "from": { + Type: schema.TypeString, + Computed: true, + }, + "to": { + Type: schema.TypeString, + Computed: true, + }, + "time_unit": { + Type: schema.TypeString, + Computed: true, + }, + "time_unit_number": { + Type: schema.TypeString, + Computed: true, + }, + "database_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retyr_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "continuous_region": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_time": { + Type: schema.TypeString, + Computed: true, + }, + "to_time": { + Type: schema.TypeString, + Computed: true, + }, + "sub_range": { + Type: schema.TypeBool, + Computed: true, + }, + "message": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "unknown_time_ranges": { + Type: schema.TypeString, + Computed: true, + }, + "processed_ranges": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "first": { + Type: schema.TypeString, + Computed: true, + }, + "second": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "unprocessed_ranges": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "first": { + Type: schema.TypeString, + Computed: true, + }, + "second": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "partial_ranges": { + Type: schema.TypeBool, + Computed: true, + }, + "time_range_and_databases": { + Type: schema.TypeString, + Computed: true, + }, + "snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "db_logs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "era_log_drive_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "from_time": { + Type: schema.TypeString, + Computed: true, + }, + "to_time": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + "info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "unknown_time_range": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "deregister_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + }, + "operations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "created_directly": { + Type: schema.TypeBool, + Computed: true, + }, + "updated_directly": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_id": { + Type: schema.TypeString, + Computed: true, + }, + "message": { + Type: schema.TypeString, + Computed: true, + }, + "unprocessed": { + Type: schema.TypeBool, + Computed: true, + }, + "log_copy_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "databases_continuous_region": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "capability_reset_time": { + Type: schema.TypeString, + Computed: true, + }, + "last_db_log": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "era_log_drive_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "from_time": { + Type: schema.TypeString, + Computed: true, + }, + "to_time": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "deregister_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + }, + "operations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "created_directly": { + Type: schema.TypeBool, + Computed: true, + }, + "updated_directly": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_id": { + Type: schema.TypeString, + Computed: true, + }, + "message": { + Type: schema.TypeString, + Computed: true, + }, + "unprocessed": { + Type: schema.TypeBool, + Computed: true, + }, + "log_copy_operation_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "last_continuous_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retry_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_replication_retry_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBTmsCapabilityRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tmsID := d.Get("time_machine_id") + resp, er := conn.Service.TimeMachineCapability(ctx, tmsID.(string)) + if er != nil { + return diag.FromErr(er) + } + + if err := d.Set("output_time_zone", resp.OutputTimeZone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_id", resp.NxClusterID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("source", resp.Source); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_association_type", resp.NxClusterAssociationType); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("sla_id", resp.SLAID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("overall_continuous_range_end_time", resp.OverallContinuousRangeEndTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("last_continuous_snapshot_time", resp.LastContinuousSnapshotTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("log_catchup_start_time", resp.LogCatchupStartTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("heal_with_reset_capability", resp.HealWithResetCapability); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_ids", utils.StringValueSlice(resp.DatabaseIds)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("capability", flattenTmsCapability(resp.Capability)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("capability_reset_time", resp.CapabilityResetTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("last_db_log", flattenLastDBLog(resp.LastDBLog)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("last_continuous_snapshot", flattenLastContinousSnapshot(resp.LastContinuousSnapshot)); err != nil { + return diag.FromErr(err) + } + uuid, e := uuid.GenerateUUID() + + if e != nil { + return diag.Errorf("Error generating UUID for era tms capability: %+v", e) + } + d.SetId(uuid) + return nil +} + +func flattenTmsCapability(pr []*era.Capability) []map[string]interface{} { + if len(pr) > 0 { + tmsList := []map[string]interface{}{} + + for _, v := range pr { + cap := map[string]interface{}{} + + cap["continuous_region"] = flattenContinousRegion(v.ContinuousRegion) + cap["database_ids"] = utils.StringValueSlice(v.DatabaseIds) + cap["databases_continuous_region"] = v.DatabasesContinuousRegion + cap["from"] = v.From + cap["mode"] = v.Mode + cap["snapshots"] = flattenSnapshotsList(v.Snapshots) + cap["time_unit"] = v.TimeUnit + cap["time_unit_number"] = v.TimeUnitNumber + cap["to"] = v.To + + tmsList = append(tmsList, cap) + } + return tmsList + } + return nil +} + +func flattenContinousRegion(pr *era.ContinuousRegion) []map[string]interface{} { + if pr != nil { + continousRegion := make([]map[string]interface{}, 0) + conReg := map[string]interface{}{} + + conReg["from_time"] = pr.FromTime + conReg["to_time"] = pr.ToTime + conReg["sub_range"] = pr.SubRange + conReg["message"] = pr.Message + conReg["snapshot_ids"] = utils.StringSlice(pr.SnapshotIds) + conReg["unknown_time_ranges"] = pr.UnknownTimeRanges + conReg["processed_ranges"] = flattenProcessedRanges(pr.ProcessedRanges) + conReg["unprocessed_ranges"] = flattenProcessedRanges(pr.UnprocessedRanges) + conReg["partial_ranges"] = pr.PartialRanges + conReg["time_range_and_databases"] = pr.TimeRangeAndDatabases + conReg["snapshots"] = pr.Snapshots + conReg["db_logs"] = flattenDBLogs(pr.DBLogs) + conReg["timezone"] = pr.TimeZone + + continousRegion = append(continousRegion, conReg) + return continousRegion + } + return nil +} + +func flattenDBLogs(pr []*era.DBLogs) []map[string]interface{} { + if len(pr) > 0 { + res := make([]map[string]interface{}, len(pr)) + + for _, v := range pr { + val := map[string]interface{}{} + + val["id"] = v.ID + val["name"] = v.Name + val["era_log_drive_id"] = v.EraLogDriveID + val["database_node_id"] = v.DatabaseNodeID + val["from_time"] = v.FromTime + val["to_time"] = v.ToTime + val["status"] = v.Status + val["size"] = v.Size + val["metadata"] = flattenDBLogMetadata(v.Metadata) + val["date_created"] = v.DateCreated + val["date_modified"] = v.DateModified + val["owner_id"] = v.OwnerID + val["database_id"] = v.DatabaseID + val["message"] = v.Message + val["unprocessed"] = v.Unprocessed + val["log_copy_operation_id"] = v.LogCopyOperationID + + res = append(res, val) + } + return res + } + return nil +} + +func flattenDBLogMetadata(pr *era.DBLogsMetadata) []map[string]interface{} { + if pr != nil { + logsMeta := make([]map[string]interface{}, 0) + log := map[string]interface{}{} + + log["secure_info"] = pr.SecureInfo + log["info"] = pr.Info + log["deregister_info"] = flattenDeRegiserInfo(pr.DeregisterInfo) + log["curation_retry_count"] = pr.CurationRetryCount + log["created_directly"] = pr.CreatedDirectly + log["updated_directly"] = pr.UpdatedDirectly + + logsMeta = append(logsMeta, log) + return logsMeta + } + return nil +} + +func flattenLastDBLog(pr *era.DBLogs) []map[string]interface{} { + if pr != nil { + res := make([]map[string]interface{}, 0) + val := map[string]interface{}{} + + val["id"] = pr.ID + val["name"] = pr.Name + val["era_log_drive_id"] = pr.EraLogDriveID + val["database_node_id"] = pr.DatabaseNodeID + val["from_time"] = pr.FromTime + val["to_time"] = pr.ToTime + val["status"] = pr.Status + val["size"] = pr.Size + val["metadata"] = flattenDBLogMetadata(pr.Metadata) + val["date_created"] = pr.DateCreated + val["date_modified"] = pr.DateModified + val["owner_id"] = pr.OwnerID + val["database_id"] = pr.DatabaseID + val["message"] = pr.Message + val["unprocessed"] = pr.Unprocessed + val["log_copy_operation_id"] = pr.LogCopyOperationID + + res = append(res, val) + return res + } + return nil +} + +func flattenLastContinousSnapshot(pr *era.LastContinuousSnapshot) []map[string]interface{} { + if pr != nil { + snpList := make([]map[string]interface{}, 0) + snap := map[string]interface{}{} + + snap["id"] = pr.ID + snap["name"] = pr.Name + snap["description"] = pr.Description + snap["owner_id"] = pr.OwnerID + snap["date_created"] = pr.DateCreated + snap["date_modified"] = pr.DateModified + snap["properties"] = flattenDBInstanceProperties(pr.Properties) + snap["tags"] = flattenDBTags(pr.Tags) + snap["snapshot_uuid"] = pr.SnapshotUUID + snap["nx_cluster_id"] = pr.NxClusterID + snap["protection_domain_id"] = pr.ProtectionDomainID + snap["parent_snapshot_id"] = pr.ParentSnapshotID + snap["time_machine_id"] = pr.TimeMachineID + snap["database_node_id"] = pr.DatabaseNodeID + snap["app_info_version"] = pr.AppInfoVersion + snap["status"] = pr.Status + snap["type"] = pr.Type + snap["applicable_types"] = pr.ApplicableTypes + snap["snapshot_timestamp"] = pr.SnapshotTimeStamp + snap["metadata"] = flattenLastContinousSnapshotMetadata(pr.Metadata) + snap["software_snapshot_id"] = pr.SoftwareSnapshotID + snap["software_database_snapshot"] = pr.SoftwareDatabaseSnapshot + snap["santised_from_snapshot_id"] = pr.SanitisedFromSnapshotID + snap["processed"] = pr.Processed + snap["database_snapshot"] = pr.DatabaseSnapshot + snap["from_timestamp"] = pr.FromTimeStamp + snap["to_timestamp"] = pr.ToTimeStamp + snap["dbserver_id"] = pr.DBserverID + snap["dbserver_name"] = pr.DBserverName + snap["dbserver_ip"] = pr.DBserverIP + snap["replicated_snapshots"] = pr.ReplicatedSnapshots + snap["software_snapshot"] = pr.SoftwareSnapshot + snap["santised_snapshots"] = pr.SanitisedSnapshots + snap["snapshot_family"] = pr.SnapshotFamily + snap["snapshot_timestamp_date"] = pr.SnapshotTimeStampDate + snap["lcm_config"] = flattenDBLcmConfig(pr.LcmConfig) + snap["parent_snapshot"] = pr.ParentSnapshot + snap["snapshot_size"] = pr.SnapshotSize + + snpList = append(snpList, snap) + return snpList + } + return nil +} + +func flattenLastContinousSnapshotMetadata(pr *era.LastContinuousSnapshotMetadata) []map[string]interface{} { + if pr != nil { + res := make([]map[string]interface{}, 0) + + meta := map[string]interface{}{} + + meta["secure_info"] = pr.SecureInfo + meta["info"] = pr.Info + meta["deregister_info"] = pr.DeregisterInfo + meta["from_timestamp"] = pr.FromTimeStamp + meta["to_timestamp"] = pr.ToTimeStamp + meta["replication_retry_count"] = pr.ReplicationRetryCount + meta["last_replication_retry_timestamp"] = pr.LastReplicationRetryTimestamp + meta["last_replication_retry_source_snapshot_id"] = pr.LastReplicationRetrySourceSnapshotID + meta["async"] = pr.Async + meta["stand_by"] = pr.Standby + meta["curation_retry_count"] = pr.CurationRetryCount + meta["operations_using_snapshot"] = pr.OperationsUsingSnapshot + + res = append(res, meta) + return res + } + return nil +} + +func flattenProcessedRanges(pr []*era.ProcessedRanges) []interface{} { + if len(pr) > 0 { + res := make([]interface{}, len(pr)) + + for _, v := range pr { + proRanges := map[string]interface{}{} + + proRanges["first"] = v.First + proRanges["second"] = v.Second + + res = append(res, proRanges) + } + return res + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_time_machine_capability_test.go b/nutanix/data_source_nutanix_ndb_time_machine_capability_test.go new file mode 100644 index 000000000..b0bab83ac --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machine_capability_test.go @@ -0,0 +1,39 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceNDBTmsCapability = "data.nutanix_ndb_tms_capability.test" + +func TestAccEraTmsCapabilityDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTmsCapabilityDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "output_time_zone"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "type"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "nx_cluster_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "sla_id"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "capability.#"), + resource.TestCheckResourceAttrSet(dataSourceNDBTmsCapability, "capability.0.mode"), + ), + }, + }, + }) +} + +func testAccEraTmsCapabilityDataSourceConfig() string { + return ` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_tms_capability" "test"{ + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_time_machine_test.go b/nutanix/data_source_nutanix_ndb_time_machine_test.go new file mode 100644 index 000000000..60242bc29 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machine_test.go @@ -0,0 +1,69 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceTMName = "data.nutanix_ndb_time_machine.test" + +func TestAccEraTimeMachineDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTimeMachineDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceTMName, "name"), + resource.TestCheckResourceAttrSet(dataSourceTMName, "description"), + resource.TestCheckResourceAttr(dataSourceTMName, "metadata.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMName, "clone", "false"), + resource.TestCheckResourceAttr(dataSourceTMName, "sla.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMName, "schedule.#", "1"), + ), + }, + }, + }) +} + +func TestAccEraTimeMachineDataSource_basicWithID(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTimeMachineDataSourceConfigWithID(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceTMName, "name"), + resource.TestCheckResourceAttrSet(dataSourceTMName, "description"), + resource.TestCheckResourceAttr(dataSourceTMName, "metadata.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMName, "clone", "false"), + resource.TestCheckResourceAttr(dataSourceTMName, "sla.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMName, "schedule.#", "1"), + ), + }, + }, + }) +} + +func testAccEraTimeMachineDataSourceConfig() string { + return ` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_time_machine" "test"{ + time_machine_name = data.nutanix_ndb_time_machines.test1.time_machines.0.name + } + ` +} + +func testAccEraTimeMachineDataSourceConfigWithID() string { + return ` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_time_machine" "test"{ + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_time_machines.go b/nutanix/data_source_nutanix_ndb_time_machines.go new file mode 100644 index 000000000..3365492bc --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machines.go @@ -0,0 +1,86 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBTimeMachines() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBTimeMachinesRead, + Schema: map[string]*schema.Schema{ + "time_machines": dataSourceEraTimeMachine(), + }, + } +} + +func dataSourceNutanixNDBTimeMachinesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + // call tms API + resp, err := conn.Service.ListTimeMachines(ctx) + + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("time_machines", flattenTimeMachines(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era time machines: %+v", err) + } + d.SetId(uuid) + return nil +} + +func flattenTimeMachines(tms *era.ListTimeMachines) []map[string]interface{} { + if tms != nil { + lst := []map[string]interface{}{} + + for _, pr := range *tms { + tmac := map[string]interface{}{} + + tmac["id"] = pr.ID + tmac["name"] = pr.Name + tmac["description"] = pr.Description + tmac["owner_id"] = pr.OwnerID + tmac["date_created"] = pr.DateCreated + tmac["date_modified"] = pr.DateModified + tmac["access_level"] = pr.AccessLevel + tmac["properties"] = flattenDBInstanceProperties(pr.Properties) + tmac["tags"] = flattenDBTags(pr.Tags) + tmac["clustered"] = pr.Clustered + tmac["clone"] = pr.Clone + tmac["internal"] = pr.Internal + tmac["database_id"] = pr.DatabaseID + tmac["type"] = pr.Type + tmac["category"] = pr.Category + tmac["status"] = pr.Status + tmac["ea_status"] = pr.EaStatus + tmac["scope"] = pr.Scope + tmac["sla_id"] = pr.SLAID + tmac["schedule_id"] = pr.ScheduleID + tmac["metric"] = pr.Metric + // tmac["sla_update_metadata"] = pr.SLAUpdateMetadata + tmac["database"] = pr.Database + tmac["clones"] = pr.Clones + tmac["source_nx_clusters"] = pr.SourceNxClusters + tmac["sla_update_in_progress"] = pr.SLAUpdateInProgress + tmac["sla"] = flattenDBSLA(pr.SLA) + tmac["schedule"] = flattenSchedule(pr.Schedule) + tmac["metadata"] = flattenTimeMachineMetadata(pr.Metadata) + + lst = append(lst, tmac) + } + return lst + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_time_machines_test.go b/nutanix/data_source_nutanix_ndb_time_machines_test.go new file mode 100644 index 000000000..f3f91e74c --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_time_machines_test.go @@ -0,0 +1,35 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const dataSourceTMsName = "data.nutanix_ndb_time_machines.test" + +func TestAccEraTimeMachinesDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTimeMachinesDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceTMsName, "time_machines.0.name"), + resource.TestCheckResourceAttrSet(dataSourceTMsName, "time_machines.0.description"), + resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.metadata.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.clone", "false"), + resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.sla.#", "1"), + resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.schedule.#", "1"), + ), + }, + }, + }) +} + +func testAccEraTimeMachinesDataSourceConfig() string { + return ` + data "nutanix_ndb_time_machines" "test" {} + ` +} diff --git a/nutanix/provider.go b/nutanix/provider.go index 7214657da..ad894a93a 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -196,6 +196,13 @@ func Provider() *schema.Provider { "nutanix_ndb_clusters": dataSourceNutanixEraClusters(), "nutanix_ndb_database": dataSourceNutanixEraDatabase(), "nutanix_ndb_databases": dataSourceNutanixEraDatabases(), + "nutanix_ndb_time_machine": dataSourceNutanixNDBTimeMachine(), + "nutanix_ndb_time_machines": dataSourceNutanixNDBTimeMachines(), + "nutanix_ndb_clone": dataSourceNutanixNDBClone(), + "nutanix_ndb_clones": dataSourceNutanixNDBClones(), + "nutanix_ndb_snapshot": dataSourceNutanixNDBSnapshot(), + "nutanix_ndb_snapshots": dataSourceNutanixNDBSnapshots(), + "nutanix_ndb_tms_capability": dataSourceNutanixNDBTmsCapability(), }, ResourcesMap: map[string]*schema.Resource{ "nutanix_virtual_machine": resourceNutanixVirtualMachine(), @@ -233,6 +240,9 @@ func Provider() *schema.Provider { "nutanix_ndb_scale_database": resourceNutanixNDBScaleDatabase(), "nutanix_ndb_database_scale": resourceNutanixNDBScaleDatabase(), "nutanix_ndb_register_database": resourceNutanixNDBRegisterDatabase(), + "nutanix_ndb_database_snapshot": resourceNutanixNDBDatabaseSnapshot(), + "nutanix_ndb_clone": resourceNutanixNDBClone(), + "nutanix_ndb_authorize_dbserver": resourceNutanixNDBAuthorizeDBServer(), }, ConfigureContextFunc: providerConfigure, } diff --git a/nutanix/resource_nutanix_ndb_authorize_dbservers.go b/nutanix/resource_nutanix_ndb_authorize_dbservers.go new file mode 100644 index 000000000..87736d6f4 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_authorize_dbservers.go @@ -0,0 +1,125 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBAuthorizeDBServer() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBAuthorizeDBServerCreate, + ReadContext: resourceNutanixNDBAuthorizeDBServerRead, + UpdateContext: resourceNutanixNDBAuthorizeDBServerUpdate, + DeleteContext: resourceNutanixNDBAuthorizeDBServerDelete, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_name"}, + }, + "time_machine_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + }, + "dbservers_id": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceNutanixNDBAuthorizeDBServerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + req := make([]*string, 0) + + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + if len(tmsName.(string)) > 0 { + // call time machine API with value-type name + res, er := conn.Service.GetTimeMachine(ctx, "", tmsName.(string)) + if er != nil { + return diag.FromErr(er) + } + + tmsID = *res.ID + } + + if dbserversID, ok := d.GetOk("dbservers_id"); ok { + dbser := dbserversID.([]interface{}) + + for _, v := range dbser { + req = append(req, utils.StringPtr(v.(string))) + } + } + // call for Authorize API + + resp, err := conn.Service.AuthorizeDBServer(ctx, tmsID.(string), req) + if err != nil { + return diag.FromErr(err) + } + + if resp.Status == utils.StringPtr("success") { + d.SetId(tmsID.(string)) + } + + return nil +} + +func resourceNutanixNDBAuthorizeDBServerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func resourceNutanixNDBAuthorizeDBServerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func resourceNutanixNDBAuthorizeDBServerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + if len(tmsName.(string)) > 0 { + // call time machine API with value-type name + res, er := conn.Service.GetTimeMachine(ctx, "", tmsName.(string)) + if er != nil { + return diag.FromErr(er) + } + + tmsID = *res.ID + } + + deauthorizeDBs := make([]*string, 0) + + if dbserversID, ok := d.GetOk("dbservers_id"); ok { + dbser := dbserversID.([]interface{}) + + for _, v := range dbser { + deauthorizeDBs = append(deauthorizeDBs, utils.StringPtr(v.(string))) + } + } + + _, err := conn.Service.DeAuthorizeDBServer(ctx, tmsID.(string), deauthorizeDBs) + if err != nil { + return diag.FromErr(err) + } + d.SetId("") + return nil +} diff --git a/nutanix/resource_nutanix_ndb_clone.go b/nutanix/resource_nutanix_ndb_clone.go new file mode 100644 index 000000000..31ad0ff43 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_clone.go @@ -0,0 +1,833 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBClone() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBCloneCreate, + ReadContext: resourceNutanixNDBCloneRead, + UpdateContext: resourceNutanixNDBCloneUpdate, + DeleteContext: resourceNutanixNDBCloneDelete, + + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_name"}, + }, + "time_machine_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"user_pitr_timestamp"}, + }, + "user_pitr_timestamp": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"snapshot_id"}, + }, + "time_zone": { + Type: schema.TypeString, + Optional: true, + }, + "node_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "nodes": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vm_name": { + Type: schema.TypeString, + Optional: true, + }, + "compute_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "network_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "new_db_server_time_zone": { + Type: schema.TypeString, + Optional: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "dbserver_id": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "lcm_config": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database_lcm_config": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expiry_details": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expire_in_days": { + Type: schema.TypeInt, + Optional: true, + }, + "expiry_date_timezone": { + Type: schema.TypeString, + Required: true, + }, + "delete_database": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "refresh_details": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "refresh_in_days": { + Type: schema.TypeInt, + Optional: true, + }, + "refresh_time": { + Type: schema.TypeString, + Optional: true, + }, + "refresh_date_timezone": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Required: true, + }, + "ssh_public_key": { + Type: schema.TypeString, + Optional: true, + }, + "compute_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "network_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "database_parameter_profile_id": { + Type: schema.TypeString, + Optional: true, + }, + "vm_password": { + Type: schema.TypeString, + Optional: true, + }, + "create_dbserver": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "clustered": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "dbserver_id": { + Type: schema.TypeString, + Optional: true, + }, + "dbserver_cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "dbserver_logical_cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "latest_snapshot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "postgresql_info": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vm_name": { + Type: schema.TypeString, + Required: true, + }, + "dbserver_description": { + Type: schema.TypeString, + Optional: true, + }, + "db_password": { + Type: schema.TypeString, + Required: true, + }, + "pre_clone_cmd": { + Type: schema.TypeString, + Optional: true, + }, + "post_clone_cmd": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "actionarguments": actionArgumentsSchema(), + // Computed values + + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "tags": dataSourceEraDBInstanceTags(), + "clone": { + Type: schema.TypeBool, + Computed: true, + }, + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "placeholder": { + Type: schema.TypeBool, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "database_cluster_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "database_status": { + Type: schema.TypeString, + Computed: true, + }, + "info": dataSourceEraDatabaseInfo(), + "group_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": dataSourceEraDBInstanceMetadata(), + "metric": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "category": { + Type: schema.TypeString, + Computed: true, + }, + "parent_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_source_database_id": { + Type: schema.TypeString, + Computed: true, + }, + "time_machine": dataSourceEraTimeMachine(), + "dbserver_logical_cluster": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "database_nodes": dataSourceEraDatabaseNodes(), + "linked_databases": dataSourceEraLinkedDatabases(), + }, + } +} + +func resourceNutanixNDBCloneCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + req := &era.CloneRequest{} + + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + if len(tmsName.(string)) > 0 { + // call time machine API with value-type name + res, err := conn.Service.GetTimeMachine(ctx, "", tmsName.(string)) + if err != nil { + return diag.FromErr(err) + } + + tmsID = *res.ID + } + + req.TimeMachineID = utils.StringPtr(tmsID.(string)) + + // build request for clone + if err := buildCloneRequest(d, req); err != nil { + return diag.FromErr(err) + } + + // call clone API + + resp, err := conn.Service.CreateClone(ctx, tmsID.(string), req) + if err != nil { + return diag.FromErr(err) + } + d.SetId(resp.Entityid) + + // Get Operation ID from response of Clone and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for time machine clone (%s) to create: %s", resp.Entityid, errWaitTask) + } + return nil +} + +func resourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.GetClone(ctx, d.Id(), "", nil) + if err != nil { + return diag.FromErr(err) + } + if resp != nil { + if err = d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + props := []interface{}{} + for _, prop := range resp.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + if err := d.Set("properties", props); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.Datecreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.Datemodified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clone", resp.Clone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("internal", resp.Internal); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("placeholder", resp.Placeholder); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_name", resp.Databasename); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_cluster_type", resp.Databaseclustertype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_status", resp.Databasestatus); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster_id", resp.Dbserverlogicalclusterid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.Timemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_time_machine_id", resp.Parenttimemachineid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_zone", resp.Timezone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("info", flattenDBInfo(resp.Info)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("group_info", resp.GroupInfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metric", resp.Metric); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("category", resp.Category); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_source_database_id", resp.ParentSourceDatabaseID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.Lcmconfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine", flattenDBTimeMachine(resp.TimeMachine)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_logical_cluster", resp.Dbserverlogicalcluster); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_nodes", flattenDBNodes(resp.Databasenodes)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("linked_databases", flattenDBLinkedDbs(resp.Linkeddatabases)); err != nil { + return diag.FromErr(err) + } + } + + return nil +} + +func resourceNutanixNDBCloneUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + dbID := d.Id() + + name := "" + description := "" + + if d.HasChange("name") { + name = d.Get("name").(string) + } + + if d.HasChange("description") { + description = d.Get("description").(string) + } + + tags := make([]*era.Tags, 0) + if d.HasChange("tags") { + tags = expandTags(d.Get("tags").([]interface{})) + } + + updateReq := era.UpdateDatabaseRequest{ + Name: name, + Description: description, + Tags: tags, + Resetname: true, + Resetdescription: true, + Resettags: true, + } + + res, err := conn.Service.UpdateCloneDatabase(ctx, dbID, &updateReq) + if err != nil { + return diag.FromErr(err) + } + + if res != nil { + if err = d.Set("description", res.Description); err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", res.Name); err != nil { + return diag.FromErr(err) + } + } + return nil +} + +func resourceNutanixNDBCloneDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + if conn == nil { + return diag.Errorf("era is nil") + } + + dbID := d.Id() + + req := era.DeleteDatabaseRequest{ + Delete: true, + Remove: false, + Softremove: false, + Forced: false, + Deletetimemachine: true, + Deletelogicalcluster: true, + } + res, err := conn.Service.DeleteClone(ctx, dbID, &req) + if err != nil { + return diag.FromErr(err) + } + + log.Printf("Operation to unregister clone instance with id %s has started, operation id: %s", dbID, res.Operationid) + opID := res.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Cluster GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for clone Instance (%s) to unregister: %s", res.Entityid, errWaitTask) + } + return nil +} + +func buildCloneRequest(d *schema.ResourceData, res *era.CloneRequest) error { + if name, ok := d.GetOk("name"); ok { + res.Name = utils.StringPtr(name.(string)) + } + + if des, ok := d.GetOk("description"); ok { + res.Description = utils.StringPtr(des.(string)) + } + + if nxcls, ok := d.GetOk("nx_cluster_id"); ok { + res.NxClusterID = utils.StringPtr(nxcls.(string)) + } + + if ssh, ok := d.GetOk("ssh_public_key"); ok { + res.SSHPublicKey = utils.StringPtr(ssh.(string)) + } + if userPitrTimestamp, ok := d.GetOk("user_pitr_timestamp"); ok { + res.UserPitrTimestamp = utils.StringPtr(userPitrTimestamp.(string)) + } + if timeZone, ok := d.GetOk("time_zone"); ok && len(timeZone.(string)) > 0 { + res.TimeZone = utils.StringPtr(timeZone.(string)) + } + if computeProfileID, ok := d.GetOk("compute_profile_id"); ok { + res.ComputeProfileID = utils.StringPtr(computeProfileID.(string)) + } + if networkProfileID, ok := d.GetOk("network_profile_id"); ok { + res.NetworkProfileID = utils.StringPtr(networkProfileID.(string)) + } + if databaseParameterProfileID, ok := d.GetOk("database_parameter_profile_id"); ok { + res.DatabaseParameterProfileID = utils.StringPtr(databaseParameterProfileID.(string)) + } + if snapshotID, ok := d.GetOk("snapshot_id"); ok { + res.SnapshotID = utils.StringPtr(snapshotID.(string)) + } + + if dbserverID, ok := d.GetOk("dbserver_id"); ok { + res.DbserverID = utils.StringPtr(dbserverID.(string)) + } + if dbserverClusterID, ok := d.GetOk("dbserver_cluster_id"); ok { + res.DbserverClusterID = utils.StringPtr(dbserverClusterID.(string)) + } + if dbserverLogicalClusterID, ok := d.GetOk("dbserver_logical_cluster_id"); ok { + res.DbserverLogicalClusterID = utils.StringPtr(dbserverLogicalClusterID.(string)) + } + if createDbserver, ok := d.GetOk("create_dbserver"); ok { + res.CreateDbserver = createDbserver.(bool) + } + if clustered, ok := d.GetOk("clustered"); ok { + res.Clustered = clustered.(bool) + } + if nodeCount, ok := d.GetOk("node_count"); ok { + res.NodeCount = utils.IntPtr(nodeCount.(int)) + } + + if nodes, ok := d.GetOk("nodes"); ok { + res.Nodes = expandClonesNodes(nodes.([]interface{})) + } + + if lcmConfig, ok := d.GetOk("lcm_config"); ok { + res.LcmConfig = expandLCMConfig(lcmConfig.([]interface{})) + } + + if postgres, ok := d.GetOk("postgresql_info"); ok && len(postgres.([]interface{})) > 0 { + res.ActionArguments = expandPostgreSQLCloneActionArgs(d, postgres.([]interface{})) + } + + if tags, ok := d.GetOk("tags"); ok && len(tags.([]interface{})) > 0 { + res.Tags = expandTags(tags.([]interface{})) + } + return nil +} + +func expandClonesNodes(pr []interface{}) []*era.Nodes { + nodes := make([]*era.Nodes, len(pr)) + if len(pr) > 0 { + for k, v := range pr { + val := v.(map[string]interface{}) + node := &era.Nodes{} + + if v1, ok1 := val["network_profile_id"]; ok1 && len(v1.(string)) > 0 { + node.Networkprofileid = utils.StringPtr(v1.(string)) + } + + if v1, ok1 := val["compute_profile_id"]; ok1 && len(v1.(string)) > 0 { + node.ComputeProfileID = utils.StringPtr(v1.(string)) + } + + if v1, ok1 := val["vm_name"]; ok1 && len(v1.(string)) > 0 { + node.Vmname = utils.StringPtr(v1.(string)) + } + + if v1, ok1 := val["nx_cluster_id"]; ok1 && len(v1.(string)) > 0 { + node.NxClusterID = utils.StringPtr(v1.(string)) + } + + if v1, ok1 := val["new_db_server_time_zone"]; ok1 && len(v1.(string)) > 0 { + node.NewDBServerTimeZone = utils.StringPtr(v1.(string)) + } + if v1, ok1 := val["properties"]; ok1 && len(v1.([]interface{})) > 0 { + node.Properties = v1.([]*era.NodesProperties) + } + + if v1, ok1 := val["dbserver_id"]; ok1 && len(v1.(string)) > 0 { + node.DatabaseServerID = utils.StringPtr(v1.(string)) + } + nodes[k] = node + } + return nodes + } + return nil +} + +func expandPostgreSQLCloneActionArgs(d *schema.ResourceData, pr []interface{}) []*era.Actionarguments { + if len(pr) > 0 { + args := []*era.Actionarguments{} + + postgresProp := pr[0].(map[string]interface{}) + for key, value := range postgresProp { + args = append(args, &era.Actionarguments{ + Name: key, + Value: utils.StringPtr(value.(string)), + }) + } + resp := buildActionArgumentsFromResourceData(d.Get("actionarguments").(*schema.Set), args) + return resp + } + return nil +} + +func expandLCMConfig(pr []interface{}) *era.CloneLCMConfig { + if len(pr) > 0 { + cloneLcm := &era.CloneLCMConfig{} + for _, v := range pr { + val := v.(map[string]interface{}) + + if v1, ok1 := val["database_lcm_config"]; ok1 && len(v1.([]interface{})) > 0 { + dbLcm := v1.([]interface{}) + dbLcmConfig := &era.DatabaseLCMConfig{} + for _, v := range dbLcm { + val := v.(map[string]interface{}) + + if exp, ok1 := val["expiry_details"]; ok1 { + dbLcmConfig.ExpiryDetails = expandDBExpiryDetails(exp.([]interface{})) + } + + if ref, ok1 := val["refresh_details"]; ok1 { + dbLcmConfig.RefreshDetails = expandDBRefreshDetails(ref.([]interface{})) + } + } + cloneLcm.DatabaseLCMConfig = dbLcmConfig + } + } + return cloneLcm + } + return nil +} + +func expandDBExpiryDetails(pr []interface{}) *era.DBExpiryDetails { + if len(pr) > 0 { + expDetails := &era.DBExpiryDetails{} + + for _, v := range pr { + val := v.(map[string]interface{}) + + if v1, ok1 := val["expire_in_days"]; ok1 { + expDetails.ExpireInDays = utils.IntPtr(v1.(int)) + } + if v1, ok1 := val["expiry_date_timezone"]; ok1 && len(v1.(string)) > 0 { + expDetails.ExpiryDateTimezone = utils.StringPtr(v1.(string)) + } + if v1, ok1 := val["delete_database"]; ok1 { + expDetails.DeleteDatabase = v1.(bool) + } + } + return expDetails + } + return nil +} + +func expandDBRefreshDetails(pr []interface{}) *era.DBRefreshDetails { + if len(pr) > 0 { + refDetails := &era.DBRefreshDetails{} + + for _, v := range pr { + val := v.(map[string]interface{}) + + if v1, ok1 := val["refresh_in_days"]; ok1 { + refDetails.RefreshInDays = v1.(int) + } + if v1, ok1 := val["refresh_time"]; ok1 && len(v1.(string)) > 0 { + refDetails.RefreshTime = v1.(string) + } + if v1, ok1 := val["refresh_date_timezone"]; ok1 && len(v1.(string)) > 0 { + refDetails.RefreshDateTimezone = v1.(string) + } + } + return refDetails + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_database_scale_test.go b/nutanix/resource_nutanix_ndb_database_scale_test.go new file mode 100644 index 000000000..3a32ce64f --- /dev/null +++ b/nutanix/resource_nutanix_ndb_database_scale_test.go @@ -0,0 +1,44 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameScaleDB = "nutanix_ndb_database_scale.acctest-managed" + +func TestAccEra_Scalebasic(t *testing.T) { + storageSize := "4" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseScaleConfig(storageSize), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameScaleDB, "application_type", "postgres_database"), + resource.TestCheckResourceAttr(resourceNameScaleDB, "data_storage_size", storageSize), + resource.TestCheckResourceAttr(resourceNameScaleDB, "metadata.#", "1"), + resource.TestCheckResourceAttrSet(resourceNameScaleDB, "name"), + resource.TestCheckResourceAttrSet(resourceNameScaleDB, "description"), + ), + }, + }, + }) +} + +func testAccEraDatabaseScaleConfig(size string) string { + return fmt.Sprintf(` + data "nutanix_ndb_databases" "test" { + database_type = "postgres_database" + } + + resource "nutanix_ndb_database_scale" "acctest-managed" { + application_type = "postgres_database" + database_uuid = data.nutanix_ndb_databases.test.database_instances.1.id + data_storage_size = %[1]s + } + `, size) +} diff --git a/nutanix/resource_nutanix_ndb_database_snapshot.go b/nutanix/resource_nutanix_ndb_database_snapshot.go new file mode 100644 index 000000000..cdd680433 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_database_snapshot.go @@ -0,0 +1,596 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBDatabaseSnapshot() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBDatabaseSnapshotCreate, + ReadContext: resourceNutanixNDBDatabaseSnapshotRead, + UpdateContext: resourceNutanixNDBDatabaseSnapshotUpdate, + DeleteContext: resourceNutanixNDBDatabaseSnapshotDelete, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_name"}, + }, + "time_machine_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "remove_schedule_in_days": { + Type: schema.TypeInt, + Optional: true, + }, + "expiry_date_timezone": { + Type: schema.TypeString, + Optional: true, + Default: "Asia/Calcutta", + }, + "replicate_to_clusters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + // computed + "id": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "properties": dataSourceEraDatabaseProperties(), + "tags": dataSourceEraDBInstanceTags(), + "snapshot_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "database_node_id": { + Type: schema.TypeString, + Computed: true, + }, + "app_info_version": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "applicable_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "snapshot_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "metadata": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "replication_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "last_replication_retyr_source_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "async": { + Type: schema.TypeBool, + Computed: true, + }, + "stand_by": { + Type: schema.TypeBool, + Computed: true, + }, + "curation_retry_count": { + Type: schema.TypeInt, + Computed: true, + }, + "operations_using_snapshot": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "software_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "software_database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_storage_metadata_version": { + Type: schema.TypeInt, + Computed: true, + }, + "santised": { + Type: schema.TypeBool, + Computed: true, + }, + "santised_from_snapshot_id": { + Type: schema.TypeString, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + "processed": { + Type: schema.TypeBool, + Computed: true, + }, + "database_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "from_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "to_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_id": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_name": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_ip": { + Type: schema.TypeString, + Computed: true, + }, + "replicated_snapshots": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "software_snapshot": { + Type: schema.TypeString, + Computed: true, + }, + "santised_snapshots": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_family": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_timestamp_date": { + Type: schema.TypeInt, + Computed: true, + }, + "lcm_config": dataSourceEraLCMConfig(), + "parent_snapshot": { + Type: schema.TypeBool, + Computed: true, + }, + "snapshot_size": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + } +} + +func resourceNutanixNDBDatabaseSnapshotCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.DatabaseSnapshotRequest{} + snapshotName := "" + tmsID, tok := d.GetOk("time_machine_id") + tmsName, tnOk := d.GetOk("time_machine_name") + + if !tok && !tnOk { + return diag.Errorf("Atleast one of time_machine_id or time_machine_name is required to perform clone") + } + + if len(tmsName.(string)) > 0 { + // call time machine API with value-type name + res, err := conn.Service.GetTimeMachine(ctx, tmsID.(string), tmsName.(string)) + if err != nil { + return diag.FromErr(err) + } + + tmsID = *res.ID + } + + if name, ok := d.GetOk("name"); ok { + req.Name = utils.StringPtr(name.(string)) + snapshotName = utils.StringValue(req.Name) + } + + if rm, ok := d.GetOk("remove_schedule_in_days"); ok { + lcmConfig := &era.LCMConfigSnapshot{} + snapshotLCM := &era.SnapshotLCMConfig{} + expDetails := &era.DBExpiryDetails{} + + expDetails.ExpireInDays = utils.IntPtr(rm.(int)) + + if tmzone, pk := d.GetOk("expiry_date_timezone"); pk { + expDetails.ExpiryDateTimezone = utils.StringPtr(tmzone.(string)) + } + + snapshotLCM.ExpiryDetails = expDetails + lcmConfig.SnapshotLCMConfig = snapshotLCM + req.LcmConfig = lcmConfig + } + + if rep, ok := d.GetOk("replicate_to_clusters"); ok && len(rep.([]interface{})) > 0 { + repList := rep.([]interface{}) + + for _, v := range repList { + req.ReplicateToClusters = append(req.ReplicateToClusters, utils.StringPtr(v.(string))) + } + } + + // call the snapshot API + + resp, err := conn.Service.DatabaseSnapshot(ctx, tmsID.(string), req) + if err != nil { + return diag.FromErr(err) + } + + // d.SetId(resp.Entityid) + + // Get Operation ID from response of snapshot and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for snapshot (%s) to create: %s", resp.Entityid, errWaitTask) + } + + // Get all the Snapshots based on tms + + uniqueID := "" + timeStamp := 0 + tmsResp, ter := conn.Service.ListSnapshots(ctx, resp.Entityid) + if ter != nil { + return diag.FromErr(ter) + } + for _, val := range *tmsResp { + if snapshotName == utils.StringValue(val.Name) { + if (int(*val.SnapshotTimeStampDate)) > timeStamp { + uniqueID = utils.StringValue(val.ID) + timeStamp = int(utils.Int64Value(val.SnapshotTimeStampDate)) + } + } + } + d.SetId(uniqueID) + return resourceNutanixNDBDatabaseSnapshotRead(ctx, d, meta) +} + +func resourceNutanixNDBDatabaseSnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + // setting the default values for Get snapshot + filterParams := &era.FilterParams{} + filterParams.LoadReplicatedChildSnapshots = "false" + filterParams.TimeZone = "UTC" + + resp, err := conn.Service.GetSnapshot(ctx, d.Id(), filterParams) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.OwnerID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("properties", flattenDBInstanceProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_uuid", resp.SnapshotUUID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_id", resp.NxClusterID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("protection_domain_id", resp.ProtectionDomainID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_snapshot_id", resp.ParentSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("time_machine_id", resp.TimeMachineID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_node_id", resp.DatabaseNodeID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("app_info_version", resp.AppInfoVersion); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("applicable_types", resp.ApplicableTypes); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_timestamp", resp.SnapshotTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_snapshot_id", resp.SoftwareSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_database_snapshot", resp.SoftwareDatabaseSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_storage_metadata_version", resp.DBServerStorageMetadataVersion); err != nil { + return diag.FromErr(err) + } + + // if err := d.Set("santised", resp.Sanitized); err != nil { + // return diag.FromErr(err) + // } + + if err := d.Set("santised_from_snapshot_id", resp.SanitisedFromSnapshotID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("timezone", resp.TimeZone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("processed", resp.Processed); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("database_snapshot", resp.DatabaseSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("from_timestamp", resp.FromTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("to_timestamp", resp.ToTimeStamp); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_id", resp.DbserverID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_name", resp.DbserverName); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_ip", resp.DbserverIP); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("replicated_snapshots", resp.ReplicatedSnapshots); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("software_snapshot", resp.SoftwareSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("santised_snapshots", resp.SanitisedSnapshots); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_family", resp.SnapshotFamily); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_timestamp_date", resp.SnapshotTimeStampDate); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_snapshot", resp.ParentSnapshot); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("snapshot_size", resp.SnapshotSize); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("lcm_config", flattenDBLcmConfig(resp.LcmConfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("metadata", flattenClonedMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + return nil +} + +func resourceNutanixNDBDatabaseSnapshotUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + updateReq := &era.UpdateSnapshotRequest{} + + if d.HasChange("name") { + updateReq.Name = utils.StringPtr(d.Get("name").(string)) + } + + // reset the name is by default value provided + updateReq.ResetName = true + + // API to update database snapshot + + resp, err := conn.Service.UpdateSnapshot(ctx, d.Id(), updateReq) + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + if err = d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + } + + return resourceNutanixNDBDatabaseSnapshotRead(ctx, d, meta) +} + +func resourceNutanixNDBDatabaseSnapshotDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.DeleteSnapshot(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + opID := resp.Operationid + + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for snapshot (%s) to delete: %s", resp.Entityid, errWaitTask) + } + + d.SetId("") + return nil +} diff --git a/nutanix/resource_nutanix_ndb_database_snapshot_test.go b/nutanix/resource_nutanix_ndb_database_snapshot_test.go new file mode 100644 index 000000000..e804719b7 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_database_snapshot_test.go @@ -0,0 +1,88 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameSnapshotDB = "nutanix_ndb_database_snapshot.acctest-managed" + +func TestAccEra_Snapshotbasic(t *testing.T) { + name := "test-acc-snapshot" + removalIndays := "2" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseSnapshotConfig(name, removalIndays), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "name", name), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "remove_schedule_in_days", removalIndays), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "metadata.#", "1"), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "database_snapshot", "false"), + ), + }, + }, + }) +} + +func TestAccEra_Snapshot_ReplicateToClusters(t *testing.T) { + name := "test-acc-snapshot" + removalIndays := "2" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseSnapshotConfigReplicateToClusters(name, removalIndays), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "name", name), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "remove_schedule_in_days", removalIndays), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "metadata.#", "1"), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "database_snapshot", "false"), + resource.TestCheckResourceAttr(resourceNameSnapshotDB, "replicate_to_clusters.#", "2"), + ), + }, + }, + }) +} + +func testAccEraDatabaseSnapshotConfig(name, removalIndays string) string { + return fmt.Sprintf(` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_time_machine" "test"{ + time_machine_name = data.nutanix_ndb_time_machines.test1.time_machines.0.name + } + + resource "nutanix_ndb_database_snapshot" "acctest-managed" { + time_machine_id = data.nutanix_ndb_time_machine.test.id + name = "%[1]s" + remove_schedule_in_days = "%[2]s" + } + `, name, removalIndays) +} + +func testAccEraDatabaseSnapshotConfigReplicateToClusters(name, removalIndays string) string { + return fmt.Sprintf(` + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_time_machine" "test"{ + time_machine_name = data.nutanix_ndb_time_machines.test1.time_machines.0.name + } + + data "nutanix_ndb_clusters" "test" { } + + resource "nutanix_ndb_database_snapshot" "acctest-managed" { + time_machine_id = data.nutanix_ndb_time_machine.test.id + name = "%[1]s" + remove_schedule_in_days = "%[2]s" + replicate_to_clusters = [ + data.nutanix_ndb_clusters.test.clusters.0.id, data.nutanix_ndb_clusters.test.clusters.1.id + ] + } + `, name, removalIndays) +} diff --git a/nutanix/resource_nutanix_ndb_log_catchups_test.go b/nutanix/resource_nutanix_ndb_log_catchups_test.go new file mode 100644 index 000000000..9ee93b7d9 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_log_catchups_test.go @@ -0,0 +1,36 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameLogCatchDB = "nutanix_ndb_database_log_catchup.acctest-managed" + +func TestAccEra_LogCatchUpbasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseLogCatchUpConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameLogCatchDB, "log_catchup_version", ""), + resource.TestCheckResourceAttr(resourceNameLogCatchDB, "database_id", ""), + resource.TestCheckResourceAttrSet(resourceNameLogCatchDB, "time_machine_id"), + ), + }, + }, + }) +} + +func testAccEraDatabaseLogCatchUpConfig() string { + return (` + data "nutanix_ndb_time_machines" "test1" {} + + resource "nutanix_ndb_log_catchups" "name" { + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + `) +}