diff --git a/CHANGELOG.md b/CHANGELOG.md index ad6c8311..c5706101 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,32 @@ +## 1.8.0 (Feb 23, 2023) +[Full Changelog](https://github.com/nutanix/terraform-provider-nutanix/compare/feat/v1.8.0-beta-2...feat/1.8.0-ga) + +**New Features:** +- Feat/1.8.0-ga Release with Nutanix Database Service based resource and datasources [\#553](https://github.com/nutanix/terraform-provider-nutanix/pull/553) + + New Resources: + - nutanix_ndb_maintenance_window + - nutanix_ndb_maintenance_task + - nutanix_ndb_tms_cluster + - nutanix_ndb_tag + - nutanix_ndb_network + - nutanix_ndb_dbserver_vm + - nutanix_ndb_register_dbserver + - nutanix_ndb_stretched_vlan + - nutanix_ndb_clone_refresh + - nutanix_ndb_cluster + + New Data Sources: + - nutanix_ndb_maintenance_window + - nutanix_ndb_maintenance_windows + - nutanix_ndb_tag + - nutanix_ndb_tags + - nutanix_ndb_network + - nutanix_ndb_networks + - nutanix_ndb_dbserver + - nutanix_ndb_dbservers + + ## 1.8.0-beta-2 (Jan 20, 2023) [Full Changelog](https://github.com/nutanix/terraform-provider-nutanix/compare/v1.8.0-beta.1...v1.8.0-beta.2) diff --git a/README.md b/README.md index fb71c569..88f3d23c 100644 --- a/README.md +++ b/README.md @@ -77,9 +77,11 @@ Foundation Central based modules and examples : Foundation based modules & examp > For the 1.8.0-beta.2 release of the provider, it will have N-2 compatibilty with the Nutanix Database Service. This release was tested with v2.5.1.1 , v2.5.0.2 and v2.4.1 -Note: For 1.8.0-beta.2 release, only postgress database type is qualified and officially supported. +> For the 1.8.0 release of the provider, it will have N-2 compatibility with the Nutanix database service. This release was tested with v2.5.1.1, v2.5.1 and v2.5 versions. -Checkout example : https://github.com/nutanix/terraform-provider-nutanix/blob/master/examples/ndb/database_instance +Note: For 1.8.0 release, only postgress database type is qualified and officially supported. Older versions of NDB may not support some resources. + +Checkout example : https://github.com/nutanix/terraform-provider-nutanix/blob/master/examples/ndb/ ## Example Usage @@ -196,6 +198,16 @@ From foundation getting released in 1.5.0-beta, provider configuration will acco * nutanix_ndb_register_database * nutanix_ndb_sla * nutanix_ndb_software_version_profile +* nutanix_ndb_tms_cluster +* nutanix_ndb_tag +* nutanix_ndb_dbserver_vm +* nutanix_ndb_register_dbserver +* nutanix_ndb_clone_refresh +* nutanix_ndb_network +* nutanix_ndb_stretched_vlan +* nutanix_ndb_cluster +* nutanix_ndb_maintenance_task +* nutanix_ndb_maintenance_window ## Data Sources @@ -265,6 +277,14 @@ From foundation getting released in 1.5.0-beta, provider configuration will acco * nutanix_ndb_tms_capability * nutanix_ndb_time_machine * nutanix_ndb_time_machines +* nutanix_ndb_dbserver +* nutanix_ndb_dbservers +* nutanix_ndb_tag +* nutanix_ndb_tags +* nutanix_ndb_network +* nutanix_ndb_networks +* nutanix_ndb_maintenance_window +* nutanix_ndb_maintenance_windows ## Quick Install diff --git a/client/era/era_service.go b/client/era/era_service.go index 821c092b..483ea513 100644 --- a/client/era/era_service.go +++ b/client/era/era_service.go @@ -56,6 +56,41 @@ type Service interface { TimeMachineCapability(ctx context.Context, tmsID string) (*TimeMachineCapability, error) CreateLinkedDatabase(ctx context.Context, id string, req *CreateLinkedDatabasesRequest) (*ProvisionDatabaseResponse, error) DeleteLinkedDatabase(ctx context.Context, DBID string, linkedDBID string, req *DeleteLinkedDatabaseRequest) (*ProvisionDatabaseResponse, error) + CreateMaintenanceWindow(ctx context.Context, body *MaintenanceWindowInput) (*MaintenaceWindowResponse, error) + ReadMaintenanceWindow(ctx context.Context, id string) (*MaintenaceWindowResponse, error) + UpdateMaintenaceWindow(ctx context.Context, body *MaintenanceWindowInput, id string) (*MaintenaceWindowResponse, error) + DeleteMaintenanceWindow(ctx context.Context, id string) (*AuthorizeDBServerResponse, error) + ListMaintenanceWindow(ctx context.Context) (*ListMaintenanceWindowResponse, error) + CreateMaintenanceTask(ctx context.Context, body *MaintenanceTasksInput) (*ListMaintenanceTasksResponse, error) + CreateTimeMachineCluster(ctx context.Context, tmsID string, body *TmsClusterIntentInput) (*TmsClusterResponse, error) + ReadTimeMachineCluster(ctx context.Context, tmsID string, clsID string) (*TmsClusterResponse, error) + UpdateTimeMachineCluster(ctx context.Context, tmsID string, clsID string, body *TmsClusterIntentInput) (*TmsClusterResponse, error) + DeleteTimeMachineCluster(ctx context.Context, tmsID string, clsID string, body *DeleteTmsClusterInput) (*ProvisionDatabaseResponse, error) + CreateTags(ctx context.Context, body *CreateTagsInput) (*TagsIntentResponse, error) + ReadTags(ctx context.Context, id string) (*GetTagsResponse, error) + UpdateTags(ctx context.Context, body *GetTagsResponse, id string) (*GetTagsResponse, error) + DeleteTags(ctx context.Context, id string) (*string, error) + ListTags(ctx context.Context) (*ListTagsResponse, error) + CreateNetwork(ctx context.Context, body *NetworkIntentInput) (*NetworkIntentResponse, error) + GetNetwork(ctx context.Context, id string, name string) (*NetworkIntentResponse, error) + UpdateNetwork(ctx context.Context, body *NetworkIntentInput, id string) (*NetworkIntentResponse, error) + DeleteNetwork(ctx context.Context, id string) (*string, error) + ListNetwork(ctx context.Context) (*ListNetworkResponse, error) + CreateDBServerVM(ctx context.Context, body *DBServerInputRequest) (*ProvisionDatabaseResponse, error) + ReadDBServerVM(ctx context.Context, id string) (*DBServerVMResponse, error) + UpdateDBServerVM(ctx context.Context, body *UpdateDBServerVMRequest, dbserverid string) (*DBServerVMResponse, error) + DeleteDBServerVM(ctx context.Context, req *DeleteDBServerVMRequest, dbserverid string) (*DeleteDatabaseResponse, error) + RegisterDBServerVM(ctx context.Context, body *DBServerRegisterInput) (*ProvisionDatabaseResponse, error) + GetDBServerVM(ctx context.Context, filter *DBServerFilterRequest) (*DBServerVMResponse, error) + ListDBServerVM(ctx context.Context) (*ListDBServerVMResponse, error) + CreateStretchedVlan(ctx context.Context, req *StretchedVlansInput) (*StretchedVlanResponse, error) + GetStretchedVlan(ctx context.Context, id string) (*StretchedVlanResponse, error) + UpdateStretchedVlan(ctx context.Context, id string, req *StretchedVlansInput) (*StretchedVlanResponse, error) + DeleteStretchedVlan(ctx context.Context, id string) (*string, error) + RefreshClone(ctx context.Context, body *CloneRefreshInput, id string) (*ProvisionDatabaseResponse, error) + CreateCluster(ctx context.Context, body *ClusterIntentInput) (*ProvisionDatabaseResponse, error) + UpdateCluster(ctx context.Context, req *ClusterUpdateInput, id string) (*ListClusterResponse, error) + DeleteCluster(ctx context.Context, req *DeleteClusterInput, id string) (*ProvisionDatabaseResponse, error) } type ServiceClient struct { @@ -96,10 +131,10 @@ func (sc ServiceClient) GetProfile(ctx context.Context, filter *ProfileFilter) ( func (sc ServiceClient) GetCluster(ctx context.Context, id string, name string) (*ListClusterResponse, error) { var path string if id != "" { - path = fmt.Sprintf("/clusters/%s", id) + path = fmt.Sprintf("/clusters/%s?count_entities=true", id) } if name != "" { - path = fmt.Sprintf("/clusters/name/%s", name) + path = fmt.Sprintf("/clusters/name/%s?count_entities=true", name) } httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) if err != nil { @@ -111,7 +146,7 @@ func (sc ServiceClient) GetCluster(ctx context.Context, id string, name string) } func (sc ServiceClient) ListClusters(ctx context.Context) (*ClusterListResponse, error) { - httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/clusters", nil) + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/clusters?count_entities=true", nil) if err != nil { return nil, err } @@ -650,3 +685,375 @@ func (sc ServiceClient) DeleteLinkedDatabase(ctx context.Context, id string, lin res := new(ProvisionDatabaseResponse) return res, sc.c.Do(ctx, httpReq, res) } + +func (sc ServiceClient) CreateMaintenanceWindow(ctx context.Context, body *MaintenanceWindowInput) (*MaintenaceWindowResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/maintenance", body) + if err != nil { + return nil, err + } + res := new(MaintenaceWindowResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ReadMaintenanceWindow(ctx context.Context, id string) (*MaintenaceWindowResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/maintenance/%s?load-task-associations=true", id), nil) + if err != nil { + return nil, err + } + res := new(MaintenaceWindowResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateMaintenaceWindow(ctx context.Context, body *MaintenanceWindowInput, id string) (*MaintenaceWindowResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/maintenance/%s", id), body) + if err != nil { + return nil, err + } + res := new(MaintenaceWindowResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteMaintenanceWindow(ctx context.Context, id string) (*AuthorizeDBServerResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/maintenance/%s", id), nil) + if err != nil { + return nil, err + } + res := new(AuthorizeDBServerResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListMaintenanceWindow(ctx context.Context) (*ListMaintenanceWindowResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/maintenance?load-task-associations=true", nil) + if err != nil { + return nil, err + } + res := new(ListMaintenanceWindowResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateMaintenanceTask(ctx context.Context, req *MaintenanceTasksInput) (*ListMaintenanceTasksResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/maintenance/tasks", req) + if err != nil { + return nil, err + } + + res := new(ListMaintenanceTasksResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateTags(ctx context.Context, body *CreateTagsInput) (*TagsIntentResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/tags", body) + if err != nil { + return nil, err + } + res := new(TagsIntentResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateTimeMachineCluster(ctx context.Context, tmsID string, body *TmsClusterIntentInput) (*TmsClusterResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/tms/%s/clusters", tmsID), body) + if err != nil { + return nil, err + } + res := new(TmsClusterResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ReadTags(ctx context.Context, id string) (*GetTagsResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/tags?id=%s", id), nil) + if err != nil { + return nil, err + } + res := new(GetTagsResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ReadTimeMachineCluster(ctx context.Context, tmsID string, clsID string) (*TmsClusterResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/tms/%s/clusters/%s", tmsID, clsID), nil) + if err != nil { + return nil, err + } + + res := new(TmsClusterResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateTimeMachineCluster(ctx context.Context, tmsID string, clsID string, body *TmsClusterIntentInput) (*TmsClusterResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/tms/%s/clusters/%s", tmsID, clsID), body) + if err != nil { + return nil, err + } + + res := new(TmsClusterResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) RefreshClone(ctx context.Context, body *CloneRefreshInput, id string) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, fmt.Sprintf("/clones/%s/refresh", id), body) + if err != nil { + return nil, err + } + + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteTimeMachineCluster(ctx context.Context, tmsID string, clsID string, body *DeleteTmsClusterInput) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/tms/%s/clusters/%s", tmsID, clsID), body) + if err != nil { + return nil, err + } + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateCluster(ctx context.Context, body *ClusterIntentInput) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/clusters", body) + if err != nil { + return nil, err + } + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateDBServerVM(ctx context.Context, body *DBServerInputRequest) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/dbservers/provision", body) + if err != nil { + return nil, err + } + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteCluster(ctx context.Context, body *DeleteClusterInput, id string) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/clusters/%s", id), body) + if err != nil { + return nil, err + } + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateTags(ctx context.Context, body *GetTagsResponse, id string) (*GetTagsResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPut, fmt.Sprintf("/tags/%s", id), body) + if err != nil { + return nil, err + } + res := new(GetTagsResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteTags(ctx context.Context, id string) (*string, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/tags/%s", id), nil) + if err != nil { + return nil, err + } + res := new(string) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateNetwork(ctx context.Context, body *NetworkIntentInput) (*NetworkIntentResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/resources/networks", body) + if err != nil { + return nil, err + } + + res := new(NetworkIntentResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetNetwork(ctx context.Context, id, name string) (*NetworkIntentResponse, error) { + path := "/resources/networks?detailed=true&" + if name != "" { + path += fmt.Sprintf("name=%s", name) + } else { + path += fmt.Sprintf("id=%s", id) + } + + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + res := new(NetworkIntentResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateNetwork(ctx context.Context, body *NetworkIntentInput, id string) (*NetworkIntentResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPut, fmt.Sprintf("/resources/networks/%s", id), body) + if err != nil { + return nil, err + } + res := new(NetworkIntentResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteNetwork(ctx context.Context, id string) (*string, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/resources/networks/%s", id), nil) + if err != nil { + return nil, err + } + res := new(string) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListTags(ctx context.Context) (*ListTagsResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/tags", nil) + if err != nil { + return nil, err + } + + res := new(ListTagsResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ListNetwork(ctx context.Context) (*ListNetworkResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/resources/networks", nil) + if err != nil { + return nil, err + } + + res := new(ListNetworkResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) ReadDBServerVM(ctx context.Context, id string) (*DBServerVMResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/dbservers/%s", id), nil) + if err != nil { + return nil, err + } + res := new(DBServerVMResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateDBServerVM(ctx context.Context, body *UpdateDBServerVMRequest, dbServerID string) (*DBServerVMResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/dbservers/%s", dbServerID), body) + if err != nil { + return nil, err + } + + res := new(DBServerVMResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteDBServerVM(ctx context.Context, req *DeleteDBServerVMRequest, dbServerVMID string) (*DeleteDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/dbservers/%s", dbServerVMID), req) + + if err != nil { + return nil, err + } + + res := new(DeleteDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) RegisterDBServerVM(ctx context.Context, body *DBServerRegisterInput) (*ProvisionDatabaseResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/dbservers/register", body) + if err != nil { + return nil, err + } + res := new(ProvisionDatabaseResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetDBServerVM(ctx context.Context, filter *DBServerFilterRequest) (*DBServerVMResponse, error) { + var httpReq *http.Request + var err error + + path := makeDBServerPath(filter.DBServerClusterID, filter.ID, filter.IP, filter.Name, filter.NxClusterID, filter.VMClusterID, filter.VMClusterName) + + httpReq, err = sc.c.NewRequest(ctx, http.MethodGet, path, nil) + + if err != nil { + return nil, err + } + res := new(DBServerVMResponse) + + return res, sc.c.Do(ctx, httpReq, res) +} + +func makeDBServerPath(dbserverClsid, id, ip, name, nxClsid, vmclsid, vmclsName *string) string { + path := "/dbservers" + + if dbserverClsid != nil { + path += fmt.Sprintf("/%s?value-type=dbserver-cluster-id", *dbserverClsid) + } + if id != nil { + path += fmt.Sprintf("/%s?value-type=id", *id) + } + if ip != nil { + path += fmt.Sprintf("/%s?value-type=ip", *ip) + } + if name != nil { + path += fmt.Sprintf("/%s?value-type=name", *name) + } + if nxClsid != nil { + path += fmt.Sprintf("/%s?value-type=nx-cluster-id", *nxClsid) + } + if vmclsid != nil { + path += fmt.Sprintf("/%s?value-type=vm-cluster-id", *vmclsid) + } + if vmclsName != nil { + path += fmt.Sprintf("/%s?value-type=vm-cluster-name", *vmclsName) + } + + path += "&load-dbserver-cluster=false&load-databases=false&load-clones=false&load-metrics=false&detailed=false&curator=false&time-zone=UTC" + + return path +} + +func (sc ServiceClient) ListDBServerVM(ctx context.Context) (*ListDBServerVMResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, "/dbservers?load-dbserver-cluster=false&load-databases=false&load-clones=false&detailed=false&load-metrics=false&time-zone=UTC", nil) + + if err != nil { + return nil, err + } + res := new(ListDBServerVMResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) CreateStretchedVlan(ctx context.Context, req *StretchedVlansInput) (*StretchedVlanResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPost, "/resources/networks/stretched-vlan", req) + if err != nil { + return nil, err + } + res := new(StretchedVlanResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) GetStretchedVlan(ctx context.Context, id string) (*StretchedVlanResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodGet, fmt.Sprintf("/resources/networks/stretched-vlan/%s", id), nil) + if err != nil { + return nil, err + } + res := new(StretchedVlanResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateStretchedVlan(ctx context.Context, id string, req *StretchedVlansInput) (*StretchedVlanResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPut, fmt.Sprintf("/resources/networks/stretched-vlan/%s", id), req) + if err != nil { + return nil, err + } + res := new(StretchedVlanResponse) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) DeleteStretchedVlan(ctx context.Context, id string) (*string, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodDelete, fmt.Sprintf("/resources/networks/stretched-vlan/%s", id), nil) + if err != nil { + return nil, err + } + res := new(string) + return res, sc.c.Do(ctx, httpReq, res) +} + +func (sc ServiceClient) UpdateCluster(ctx context.Context, req *ClusterUpdateInput, id string) (*ListClusterResponse, error) { + httpReq, err := sc.c.NewRequest(ctx, http.MethodPatch, fmt.Sprintf("/clusters/%s", id), req) + if err != nil { + return nil, err + } + res := new(ListClusterResponse) + return res, sc.c.Do(ctx, httpReq, res) +} diff --git a/client/era/era_structs.go b/client/era/era_structs.go index ec5d2075..b9ef0433 100644 --- a/client/era/era_structs.go +++ b/client/era/era_structs.go @@ -80,6 +80,35 @@ type Versions struct { VersionClusterAssociation []*VersionClusterAssociation `json:"versionClusterAssociation,omitempty"` } +type ProfilesEntity struct { + WindowsDomain *int `json:"WindowsDomain,omitempty"` + Software *int `json:"Software,omitempty"` + Compute *int `json:"Compute,omitempty"` + Network *int `json:"Network,omitempty"` + Storage *int `json:"Storage,omitempty"` + DatabaseParameter *int `json:"Database_Parameter,omitempty"` +} + +type ProfileTimeMachinesCount struct { + Profiles *ProfilesEntity `json:"profiles,omitempty"` + TimeMachines *int `json:"timeMachines,omitempty"` +} + +type EngineCounts struct { + OracleDatabase *ProfileTimeMachinesCount `json:"oracle_database,omitempty"` + PostgresDatabase *ProfileTimeMachinesCount `json:"postgres_database,omitempty"` + MongodbDatabase *ProfileTimeMachinesCount `json:"mongodb_database,omitempty"` + SqlserverDatabase *ProfileTimeMachinesCount `json:"sqlserver_database,omitempty"` + SaphanaDatabase *ProfileTimeMachinesCount `json:"saphana_database,omitempty"` + MariadbDatabase *ProfileTimeMachinesCount `json:"mariadb_database,omitempty"` + MySQLDatabase *ProfileTimeMachinesCount `json:"mysql_database,omitempty"` +} + +type EntityCounts struct { + DBServers *int `json:"dbServers,omitempty"` + EngineCounts *EngineCounts `json:"engineCounts,omitempty"` +} + // ListClustersResponse structs type ListClusterResponse struct { ID *string `json:"id,omitempty"` @@ -104,7 +133,7 @@ type ListClusterResponse struct { Cloudinfo interface{} `json:"cloudInfo,omitempty"` Resourceconfig *Resourceconfig `json:"resourceConfig,omitempty"` Managementserverinfo interface{} `json:"managementServerInfo,omitempty"` - Entitycounts interface{} `json:"entityCounts,omitempty"` + EntityCounts *EntityCounts `json:"entityCounts,omitempty"` Healthy bool `json:"healthy,omitempty"` } @@ -167,6 +196,15 @@ type MaintenanceTasks struct { Tasks []*Tasks `json:"tasks,omitempty"` } +type ClusterIPInfos struct { + NxClusterID *string `json:"nxClusterId,omitempty"` + IPInfos []*IPInfos `json:"ipInfos,omitempty"` +} + +type ClusterInfo struct { + ClusterIPInfos []*ClusterIPInfos `json:"clusterIpInfos,omitempty"` +} + // ProvisionDatabaseRequestStructs type ProvisionDatabaseRequest struct { Createdbserver bool `json:"createDbserver,omitempty"` @@ -191,6 +229,7 @@ type ProvisionDatabaseRequest struct { Nodes []*Nodes `json:"nodes,omitempty"` Tags []*Tags `json:"tags,omitempty"` MaintenanceTasks *MaintenanceTasks `json:"maintenanceTasks,omitempty"` + ClusterInfo *ClusterInfo `json:"clusterInfo,omitempty"` } type Snapshottimeofday struct { @@ -363,7 +402,7 @@ type DatabaseServerProperties struct { Secure bool `json:"secure"` Description interface{} `json:"description"` } -type Metadata struct { +type DBServerMetadata struct { Physicaleradrive bool `json:"physicalEraDrive"` Clustered bool `json:"clustered"` Singleinstance bool `json:"singleInstance"` @@ -417,7 +456,7 @@ type Dbservers struct { Tags []*Tags `json:"tags"` Vminfo *VMInfo `json:"vmInfo"` Info *Info `json:"info"` - Metadata *Metadata `json:"metadata"` + Metadata *DBServerMetadata `json:"metadata"` Metric *Metric `json:"metric"` Lcmconfig *LcmConfig `json:"lcmConfig"` TimeMachineInfo []*Properties `json:"time_machine_info"` @@ -745,11 +784,11 @@ type DbserverMetadata struct { // Lastclocksyncalerttime interface{} `json:"lastClockSyncAlertTime"` } -type VMInfo struct { - OsType *string `json:"osType,omitempty"` - OsVersion *string `json:"osVersion,omitempty"` - Distribution *string `json:"distribution,omitempty"` -} +// type VMInfo struct { +// OsType *string `json:"osType,omitempty"` +// OsVersion *string `json:"osVersion,omitempty"` +// Distribution *string `json:"distribution,omitempty"` +// } type MetricVMInfo struct { NumVCPUs *int `json:"numVCPUs,omitempty"` @@ -1194,20 +1233,21 @@ type SnapshotResponse struct { SoftwareDatabaseSnapshot bool `json:"softwareDatabaseSnapshot,omitempty"` Processed bool `json:"processed,omitempty"` DatabaseSnapshot bool `json:"databaseSnapshot,omitempty"` + Sanitized bool `json:"sanitised,omitempty"` //nolint:all Properties []*DBInstanceProperties `json:"properties"` Tags []*Tags `json:"tags"` Info *CloneInfo `json:"info,omitempty"` Metadata *ClonedMetadata `json:"metadata,omitempty"` Metric *Metric `json:"metric,omitempty"` LcmConfig *LcmConfig `json:"lcmConfig,omitempty"` - SanitisedFromSnapshotID interface{} `json:"sanitisedFromSnapshotId,omitempty"` + SanitizedFromSnapshotID interface{} `json:"sanitisedFromSnapshotId,omitempty"` AccessLevel interface{} `json:"accessLevel"` DbserverID interface{} `json:"dbserverId,omitempty"` DbserverName interface{} `json:"dbserverName,omitempty"` DbserverIP interface{} `json:"dbserverIp,omitempty"` ReplicatedSnapshots interface{} `json:"replicatedSnapshots,omitempty"` SoftwareSnapshot interface{} `json:"softwareSnapshot,omitempty"` - SanitisedSnapshots interface{} `json:"sanitisedSnapshots,omitempty"` + SanitizedSnapshots interface{} `json:"sanitisedSnapshots,omitempty"` SnapshotFamily interface{} `json:"snapshotFamily,omitempty"` } @@ -1383,13 +1423,13 @@ type LastContinuousSnapshot struct { SnapshotSize float64 `json:"snapshotSize,omitempty"` AccessLevel interface{} `json:"accessLevel,omitempty"` Metric interface{} `json:"metric,omitempty"` - SanitisedFromSnapshotID interface{} `json:"sanitisedFromSnapshotId,omitempty"` + SanitizedFromSnapshotID interface{} `json:"sanitisedFromSnapshotId,omitempty"` DBserverID interface{} `json:"dbserverId,omitempty"` DBserverName interface{} `json:"dbserverName,omitempty"` DBserverIP interface{} `json:"dbserverIp,omitempty"` ReplicatedSnapshots interface{} `json:"replicatedSnapshots,omitempty"` SoftwareSnapshot interface{} `json:"softwareSnapshot,omitempty"` - SanitisedSnapshots interface{} `json:"sanitisedSnapshots,omitempty"` + SanitizedSnapshots interface{} `json:"sanitisedSnapshots,omitempty"` Description interface{} `json:"description,omitempty"` SnapshotFamily interface{} `json:"snapshotFamily,omitempty"` ParentSnapshotID interface{} `json:"parentSnapshotId,omitempty"` @@ -1412,3 +1452,520 @@ type DeleteLinkedDatabaseRequest struct { Delete bool `json:"delete,omitempty"` Forced bool `json:"forced,omitempty"` } + +type MaintenaceSchedule struct { + Recurrence *string `json:"recurrence,omitempty"` + StartTime *string `json:"startTime,omitempty"` + DayOfWeek *string `json:"dayOfWeek,omitempty"` + WeekOfMonth *int `json:"weekOfMonth,omitempty"` + Duration *int `json:"duration,omitempty"` + Threshold interface{} `json:"threshold,omitempty"` + Hour *int `json:"hour,omitempty"` + Minute *int `json:"minute,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` +} + +type MaintenanceWindowInput struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Timezone *string `json:"timezone,omitempty"` + Schedule *MaintenaceSchedule `json:"schedule,omitempty"` + ResetSchedule *bool `json:"resetSchedule,omitempty"` + ResetDescription *bool `json:"resetDescription,omitempty"` + ResetName *bool `json:"resetName,omitempty"` +} + +type MaintenaceWindowResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + AccessLevel interface{} `json:"accessLevel,omitempty"` + Properties []*Properties `json:"properties,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Schedule *MaintenaceSchedule `json:"schedule,omitempty"` + Status *string `json:"status,omitempty"` + NextRunTime *string `json:"nextRunTime,omitempty"` + EntityTaskAssoc []*MaintenanceTasksResponse `json:"entityTaskAssoc,omitempty"` + Timezone *string `json:"timezone,omitempty"` +} + +type ListMaintenanceWindowResponse []MaintenaceWindowResponse +type MaintenanceEntities struct { + EraDBServer []*string `json:"ERA_DBSERVER,omitempty"` + EraDBServerCluster []*string `json:"ERA_DBSERVER_CLUSTER,omitempty"` +} + +type MaintenanceTasksInput struct { + Entities *MaintenanceEntities `json:"entities,omitempty"` + MaintenanceWindowID *string `json:"maintenanceWindowId,omitempty"` + Tasks []*Tasks `json:"tasks"` +} + +type MaintenanceTasksResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + AccessLevel *string `json:"accessLevel,omitempty"` + Properties []*Properties `json:"properties,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + MaintenanceWindowID *string `json:"maintenanceWindowId,omitempty"` + MaintenanceWindowOwnerID *string `json:"maintenanceWindowOwnerId,omitempty"` + EntityID *string `json:"entityId,omitempty"` + EntityType *string `json:"entityType,omitempty"` + Status *string `json:"status,omitempty"` + TaskType *string `json:"taskType,omitempty"` + Payload *Payload `json:"payload,omitempty"` + Entity interface{} `json:"entity,omitempty"` +} + +type ListMaintenanceTasksResponse []MaintenanceTasksResponse +type TmsClusterIntentInput struct { + NxClusterID *string `json:"nxClusterId,omitempty"` + Type *string `json:"type,omitempty"` + SLAID *string `json:"slaId,omitempty"` + ResetSLAID *bool `json:"resetSlaId,omitempty"` +} + +type TmsClusterResponse struct { + TimeMachineID *string `json:"timeMachineId,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + LogDriveID *string `json:"logDriveId,omitempty"` + LogDriveStatus *string `json:"logDriveStatus,omitempty"` + Type *string `json:"type,omitempty"` + Description *string `json:"description,omitempty"` + Status *string `json:"status,omitempty"` + SLAID *string `json:"slaId,omitempty"` + ScheduleID *string `json:"scheduleId,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + Info interface{} `json:"info,omitempty"` + Metadata interface{} `json:"metadata,omitempty"` + NxCluster interface{} `json:"nxCluster,omitempty"` + LogDrive interface{} `json:"logDrive,omitempty"` + SLA interface{} `json:"sla,omitempty"` + Schedule interface{} `json:"schedule,omitempty"` + SourceClusters []*string `json:"sourceClusters,omitempty"` + ResetSLAID *bool `json:"resetSlaId,omitempty"` + ResetDescription *bool `json:"resetDescription,omitempty"` + ResetType *bool `json:"resetType,omitempty"` + SubmitActivateTimeMachineOp *bool `json:"submitActivateTimeMachineOp,omitempty"` + UpdateOperationSummary interface{} `json:"updateOperationSummary,omitempty"` + StorageResourceID *string `json:"storageResourceId,omitempty"` + ForceVGBasedLogDrive *bool `json:"forceVGBasedLogDrive,omitempty"` + Source *bool `json:"source,omitempty"` +} + +type DeleteTmsClusterInput struct { + DeleteReplicatedSnapshots *bool `json:"deleteReplicatedSnapshots,omitempty"` + DeleteReplicatedProtectionDomains *bool `json:"deleteReplicatedProtectionDomains,omitempty"` +} +type CreateTagsInput struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + EntityType *string `json:"entityType,omitempty"` + Required *bool `json:"required,omitempty"` +} + +type TagsIntentResponse struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Owner *string `json:"owner,omitempty"` + Required bool `json:"required,omitempty"` + Status *string `json:"status,omitempty"` + EntityType *string `json:"entityType,omitempty"` + Values int `json:"values,omitempty"` +} + +type GetTagsResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + Owner *string `json:"owner,omitempty"` + Status *string `json:"status,omitempty"` + EntityType *string `json:"entityType,omitempty"` + Required *bool `json:"required,omitempty"` + Values *int `json:"values,omitempty"` +} + +type ListTagsResponse []*GetTagsResponse +type IPAddresses struct { + IP *string `json:"ip,omitempty"` + Status *string `json:"status,omitempty"` +} + +type IPPools struct { + StartIP *string `json:"startIP,omitempty"` + EndIP *string `json:"endIP,omitempty"` + ID *string `json:"id,omitempty"` + ModifiedBy *string `json:"modifiedBy,omitempty"` + IPAddresses []*IPAddresses `json:"ipAddresses,omitempty"` +} + +type NetworkIntentInput struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties []*Properties `json:"properties,omitempty"` + ClusterID *string `json:"clusterId,omitempty"` + IPPools []*IPPools `json:"ipPools,omitempty"` +} + +type NetworkPropertiesmap struct { + VLANSecondaryDNS *string `json:"VLAN_SECONDARY_DNS,omitempty"` + VLANSubnetMask *string `json:"VLAN_SUBNET_MASK,omitempty"` + VLANPrimaryDNS *string `json:"VLAN_PRIMARY_DNS,omitempty"` + VLANGateway *string `json:"VLAN_GATEWAY,omitempty"` +} + +type NetworkIntentResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + ClusterID *string `json:"clusterId,omitempty"` + Managed *bool `json:"managed,omitempty"` + Properties []*Properties `json:"properties,omitempty"` + PropertiesMap *NetworkPropertiesmap `json:"propertiesMap,omitempty"` + StretchedVlanID *string `json:"stretchedVlanId,omitempty"` + IPPools []*IPPools `json:"ipPools,omitempty"` +} + +type ListNetworkResponse []*NetworkIntentResponse + +type DBServerInputRequest struct { + DatabaseType *string `json:"databaseType,omitempty"` + SoftwareProfileID *string `json:"softwareProfileId,omitempty"` + SoftwareProfileVersionID *string `json:"softwareProfileVersionId,omitempty"` + NetworkProfileID *string `json:"networkProfileId,omitempty"` + ComputeProfileID *string `json:"computeProfileId,omitempty"` + VMPassword *string `json:"vmPassword,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + LatestSnapshot bool `json:"latestSnapshot,omitempty"` + ActionArguments []*Actionarguments `json:"actionArguments,omitempty"` + Description *string `json:"description,omitempty"` + TimeMachineID *string `json:"timeMachineId,omitempty"` + SnapshotID *string `json:"snapshotId,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + MaintenanceTasks *MaintenanceTasks `json:"maintenanceTasks,omitempty"` +} + +type DeleteDBServerVMRequest struct { + SoftRemove bool `json:"softRemove,omitempty"` + Remove bool `json:"remove,omitempty"` + Delete bool `json:"delete,omitempty"` + DeleteVgs bool `json:"deleteVgs,omitempty"` + DeleteVMSnapshots bool `json:"deleteVmSnapshots,omitempty"` +} + +type VMCredentials struct { + Username *string `json:"username,omitempty"` + Password *string `json:"password,omitempty"` + Label interface{} `json:"label,omitempty"` +} + +type UpdateDBServerVMRequest struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + ResetNameInNxCluster *bool `json:"resetNameInNxCluster,omitempty"` + ResetDescriptionInNxCluster *bool `json:"resetDescriptionInNxCluster,omitempty"` + ResetCredential *bool `json:"resetCredential,omitempty"` + ResetTags *bool `json:"resetTags,omitempty"` + ResetName *bool `json:"resetName,omitempty"` + ResetDescription *bool `json:"resetDescription,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Credentials []*VMCredentials `json:"credentials,omitempty"` +} + +type DiskList struct { + DeviceName *string `json:"device_name,omitempty"` + LocalMapping *string `json:"local_mapping,omitempty"` + DiskIndex *string `json:"disk_index,omitempty"` + Path *string `json:"path,omitempty"` + DiskID *string `json:"disk_id,omitempty"` + Hypervisor *string `json:"hypervisor,omitempty"` + MountPoint *string `json:"mount_point,omitempty"` + DiskUUID *string `json:"disk_uuid,omitempty"` + DiskUser *string `json:"disk_user,omitempty"` + DiskGroup *string `json:"disk_group,omitempty"` + DiskMode *string `json:"disk_mode,omitempty"` + DiskType *string `json:"disk_type,omitempty"` + VirtualDiskID *string `json:"virtual_disk_id,omitempty"` + FsType *string `json:"fs_type,omitempty"` + Size *string `json:"size,omitempty"` + DateCreated *string `json:"date_created,omitempty"` + IsEncrypted bool `json:"is_encrypted,omitempty"` +} + +type StorageProfileVGList struct { + Name *string `json:"name,omitempty"` + VgID *string `json:"vg_id,omitempty"` + VgType *string `json:"vg_type,omitempty"` + VgIscsiTarget *string `json:"vg_iscsi_target,omitempty"` + DiskList []*DiskList `json:"disk_list,omitempty"` +} + +type StorageProfile struct { + HostOsType *string `json:"host_os_type,omitempty"` + Hypervisor *string `json:"hypervisor,omitempty"` + IsEraDriveOnEsx *bool `json:"is_era_drive_on_esx,omitempty"` + LvmBased *bool `json:"lvm_based,omitempty"` + LvPath []*string `json:"lv_path,omitempty"` + DiskList []interface{} `json:"disk_list,omitempty"` + VgList []*StorageProfileVGList `json:"vg_list,omitempty"` +} + +type DriveSoftware struct { + StorageProfile *StorageProfile `json:"storage_profile,omitempty"` +} + +type DriveStorageInfo struct { + AttachedVM *string `json:"attachedVm,omitempty"` + VgName *string `json:"vgName,omitempty"` + VgUUID *string `json:"vgUuid,omitempty"` + PdName *string `json:"pdName,omitempty"` + Software *DriveSoftware `json:"software,omitempty"` +} + +type DriveInfo struct { + StorageInfo *DriveStorageInfo `json:"storage_info,omitempty"` + SourceEraPath *string `json:"source_era_path,omitempty"` +} + +type Disks struct { + ID *string `json:"id,omitempty"` + VdiskUUID *string `json:"vdiskUuid,omitempty"` + TimeMachineID interface{} `json:"timeMachineId,omitempty"` + EraDriveID *string `json:"eraDriveId,omitempty"` + EraCreated *string `json:"eraCreated,omitempty"` + Status *string `json:"status,omitempty"` + Type *string `json:"type,omitempty"` + TotalSize int `json:"totalSize,omitempty"` + UsedSize int `json:"usedSize,omitempty"` + Info interface{} `json:"info,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + Message interface{} `json:"message,omitempty"` +} + +type Drive struct { + ID *string `json:"id,omitempty"` + Path *string `json:"path,omitempty"` + HostID *string `json:"hostId,omitempty"` + VgUUID *string `json:"vgUuid,omitempty"` + ClusterID string `json:"clusterId,omitempty"` + ProtectionDomainID *string `json:"protectionDomainId,omitempty"` + EraCreated bool `json:"eraCreated,omitempty"` + Status *string `json:"status,omitempty"` + TotalSize int `json:"totalSize,omitempty"` + UsedSize int `json:"usedSize,omitempty"` + Info *DriveInfo `json:"info,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + Metadata interface{} `json:"metadata,omitempty"` + EraDisks []*Disks `json:"eraDisks,omitempty"` + ProtectionDomain interface{} `json:"protectionDomain,omitempty"` + Message interface{} `json:"message,omitempty"` +} + +type SoftwareInstallationsInfo struct { + Owner *string `json:"owner,omitempty"` +} +type SoftwareInstallations struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + EraCreated bool `json:"eraCreated,omitempty"` + Type *string `json:"type,omitempty"` + DbserverID *string `json:"dbserverId,omitempty"` + SoftwareProfileID *string `json:"softwareProfileId,omitempty"` + SoftwareProfileVersionID *string `json:"softwareProfileVersionId,omitempty"` + Version *string `json:"version,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + Info *SoftwareInstallationsInfo `json:"info,omitempty"` + Metadata interface{} `json:"metadata,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` +} + +type DBServerVMResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + DateCreated *string `json:"dateCreated,omitempty"` + DateModified *string `json:"dateModified,omitempty"` + DbserverClusterID *string `json:"dbserverClusterId,omitempty"` + VMClusterName *string `json:"vmClusterName,omitempty"` + VMClusterUUID *string `json:"vmClusterUuid,omitempty"` + Status *string `json:"status,omitempty"` + ClientID *string `json:"clientId,omitempty"` + NxClusterID *string `json:"nxClusterId,omitempty"` + EraDriveID *string `json:"eraDriveId,omitempty"` + EraVersion *string `json:"eraVersion,omitempty"` + VMTimeZone *string `json:"vmTimeZone,omitempty"` + WorkingDirectory *string `json:"workingDirectory,omitempty"` + Type *string `json:"type,omitempty"` + DatabaseType *string `json:"databaseType,omitempty"` + AccessKeyID *string `json:"accessKeyId,omitempty"` + DbserverInValidEaState *bool `json:"dbserverInValidEaState,omitempty"` + ValidDiagnosticBundleState *bool `json:"validDiagnosticBundleState,omitempty"` + WindowsDBServer *bool `json:"windowsDBServer,omitempty"` + EraCreated *bool `json:"eraCreated,omitempty"` + Internal *bool `json:"internal,omitempty"` + Placeholder *bool `json:"placeholder,omitempty"` + Clustered *bool `json:"clustered,omitempty"` + IsServerDriven *bool `json:"is_server_driven,omitempty"` + QueryCount *int `json:"queryCount,omitempty"` + IPAddresses []*string `json:"ipAddresses,omitempty"` + AssociatedTimeMachineIds []*string `json:"associatedTimeMachineIds,omitempty"` + MacAddresses []*string `json:"macAddresses,omitempty"` + VMInfo *VMInfo `json:"vmInfo,omitempty"` + Metadata *DBServerMetadata `json:"metadata,omitempty"` + Metric *Metric `json:"metric,omitempty"` + LcmConfig *LcmConfig `json:"lcmConfig,omitempty"` + EraDrive *Drive `json:"eraDrive,omitempty"` + SoftwareInstallations []*SoftwareInstallations `json:"softwareInstallations,omitempty"` + Properties []*DatabaseServerProperties `json:"properties,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + Databases interface{} `json:"databases,omitempty"` + Clones interface{} `json:"clones,omitempty"` + AccessKey interface{} `json:"accessKey,omitempty"` + ProtectionDomainID interface{} `json:"protectionDomainId,omitempty"` + AccessLevel interface{} `json:"accessLevel,omitempty"` + Fqdns interface{} `json:"fqdns,omitempty"` + Info interface{} `json:"info,omitempty"` + RequestedVersion interface{} `json:"requestedVersion,omitempty"` + AssociatedTimeMachineID interface{} `json:"associated_time_machine_id,omitempty"` + TimeMachineInfo interface{} `json:"time_machine_info,omitempty"` + ProtectionDomain interface{} `json:"protectionDomain,omitempty"` +} + +type ListDBServerVMResponse []DBServerVMResponse + +type AccessInfo struct { + AccessType *string `json:"accessType,omitempty"` + DestinationSubnet *string `json:"destinationSubnet,omitempty"` +} +type NetworkInfo struct { + VlanName *string `json:"vlanName,omitempty"` + VlanUUID *string `json:"vlanUuid,omitempty"` + VlanType *string `json:"vlanType,omitempty"` + Gateway *string `json:"gateway,omitempty"` + SubnetMask *string `json:"subnetMask,omitempty"` + Hostname *string `json:"hostname,omitempty"` + DeviceName *string `json:"deviceName,omitempty"` + MacAddress *string `json:"macAddress,omitempty"` + Flags *string `json:"flags,omitempty"` + Mtu *string `json:"mtu,omitempty"` + DefaultGatewayDevice *bool `json:"defaultGatewayDevice,omitempty"` + EraConfigured *bool `json:"eraConfigured,omitempty"` + IPAddresses []*string `json:"ipAddresses,omitempty"` + AccessInfo []*AccessInfo `json:"accessInfo,omitempty"` +} + +type VMInfo struct { + OsType *string `json:"osType,omitempty"` + OsVersion *string `json:"osVersion,omitempty"` + Distribution *string `json:"distribution,omitempty"` + SecureInfo interface{} `json:"secureInfo,omitempty"` + Info interface{} `json:"info,omitempty"` + DeregisterInfo *DeregisterInfo `json:"deregisterInfo,omitempty"` + NetworkInfo []*NetworkInfo `json:"networkInfo,omitempty"` +} + +type DBServerRegisterInput struct { + DatabaseType *string `json:"databaseType,omitempty"` + VMIP *string `json:"vmIp,omitempty"` + NxClusterUUID *string `json:"nxClusterUuid,omitempty"` + ForcedInstall *bool `json:"forcedInstall,omitempty"` + WorkingDirectory *string `json:"workingDirectory,omitempty"` + Username *string `json:"username,omitempty"` + Password *string `json:"password,omitempty"` + SSHPrivateKey *string `json:"sshPrivateKey,omitempty"` + ActionArguments []*Actionarguments `json:"actionArguments,omitempty"` +} + +type DBServerFilterRequest struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + IP *string `json:"ip,omitempty"` + VMClusterName *string `json:"vm-cluster-name,omitempty"` + VMClusterID *string `json:"vm-cluster-uuid,omitempty"` + DBServerClusterID *string `json:"dbserver-cluster-id,omitempty"` + NxClusterID *string `json:"nx-cluster-id,omitempty"` +} + +type StretchedVlanMetadata struct { + Gateway *string `json:"gateway,omitempty"` + SubnetMask *string `json:"subnetMask,omitempty"` +} + +type StretchedVlansInput struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Type *string `json:"type,omitempty"` + Metadata *StretchedVlanMetadata `json:"metadata,omitempty"` + VlanIDs []*string `json:"vlanIds,omitempty"` +} + +type StretchedVlanResponse struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Description *string `json:"description,omitempty"` + Metadata *StretchedVlanMetadata `json:"metadata,omitempty"` + Vlans []*NetworkIntentResponse `json:"vlans,omitempty"` + VlanIDs []*string `json:"vlanIds,omitempty"` +} + +type CloneRefreshInput struct { + SnapshotID *string `json:"snapshotId,omitempty"` + UserPitrTimestamp *string `json:"userPitrTimestamp,omitempty"` + Timezone *string `json:"timeZone,omitempty"` +} + +type NameValueParams struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +type NetworksInfo struct { + Type *string `json:"type,omitempty"` + NetworkInfo []*NameValueParams `json:"networkInfo,omitempty"` + AccessType []*string `json:"accessType,omitempty"` +} + +type ClusterIntentInput struct { + ClusterName *string `json:"clusterName,omitempty"` + ClusterDescription *string `json:"clusterDescription,omitempty"` + ClusterIP *string `json:"clusterIP,omitempty"` + StorageContainer *string `json:"storageContainer,omitempty"` + AgentVMPrefix *string `json:"agentVMPrefix,omitempty"` + Port *int `json:"port,omitempty"` + Protocol *string `json:"protocol,omitempty"` + ClusterType *string `json:"clusterType,omitempty"` + Version *string `json:"version,omitempty"` + CredentialsInfo []*NameValueParams `json:"credentialsInfo,omitempty"` + AgentNetworkInfo []*NameValueParams `json:"agentNetworkInfo,omitempty"` + NetworksInfo []*NetworksInfo `json:"networksInfo,omitempty"` +} + +type DeleteClusterInput struct { + DeleteRemoteSites bool `json:"deleteRemoteSites,omitempty"` +} + +type ClusterUpdateInput struct { + Username *string `json:"username,omitempty"` + Password *string `json:"password,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + IPAddresses []*string `json:"ipAddresses,omitempty"` +} diff --git a/examples/ndb/clone_refresh/main.tf b/examples/ndb/clone_refresh/main.tf new file mode 100644 index 00000000..7bbd731e --- /dev/null +++ b/examples/ndb/clone_refresh/main.tf @@ -0,0 +1,32 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## resource to refresh clone with snapshot id + +resource "nutanix_ndb_clone_refresh" "acctest-managed"{ + clone_id = "{{ clone_id }}" + snapshot_id = "{{ snapshot_id }}" + timezone = "Asia/Calcutta" +} + +## resource to refresh clone with userpitr timestamp + +resource "nutanix_ndb_clone_refresh" "acctest-managed"{ + clone_id = "{{ clone_id }}" + user_pitr_stamp = "{{ timestamp }}" + timezone = "Asia/Calcutta" +} diff --git a/examples/ndb/clone_refresh/terraform.tfvars b/examples/ndb/clone_refresh/terraform.tfvars new file mode 100644 index 00000000..4f5de990 --- /dev/null +++ b/examples/ndb/clone_refresh/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/clone_refresh/variables.tf b/examples/ndb/clone_refresh/variables.tf new file mode 100644 index 00000000..1a0cb89b --- /dev/null +++ b/examples/ndb/clone_refresh/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/examples/ndb/cluster/main.tf b/examples/ndb/cluster/main.tf new file mode 100644 index 00000000..f7503f09 --- /dev/null +++ b/examples/ndb/cluster/main.tf @@ -0,0 +1,45 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## resource to register cluster in ndb + +resource "nutanix_ndb_cluster" "clsname" { + name= "cls-name" + description = "cluster description" + cluster_ip = "{{ clusterIP }}" + username= "{{ cluster username }}" + password = "{{ cluster password }}" + storage_container = "{{ storage container}}" + agent_network_info{ + dns = "{{ dns }}" + ntp = "{{ ntp }}" + } + networks_info{ + type = "DHCP" + network_info{ + vlan_name = "vlan_static" + static_ip = "{{ staticIP }}" + gateway = "{{ Gateway }}" + subnet_mask="{{ subnetMask }}" + } + access_type = [ + "PRISM", + "DSIP", + "DBSERVER" + ] + } +} \ No newline at end of file diff --git a/examples/ndb/cluster/terraform.tfvars b/examples/ndb/cluster/terraform.tfvars new file mode 100644 index 00000000..4f5de990 --- /dev/null +++ b/examples/ndb/cluster/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/cluster/variables.tf b/examples/ndb/cluster/variables.tf new file mode 100644 index 00000000..1a0cb89b --- /dev/null +++ b/examples/ndb/cluster/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/examples/ndb/dbservervm_register/main.tf b/examples/ndb/dbservervm_register/main.tf new file mode 100644 index 00000000..1abd167c --- /dev/null +++ b/examples/ndb/dbservervm_register/main.tf @@ -0,0 +1,31 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + + +## resource to register dbserver vm +resource "nutanix_ndb_register_dbserver" "name" { + vm_ip= "{{ vmip }}" + nxcluster_id = "{{ cluster_id }}" + username= "{{ era_driver_user}}" + password="{{ password }}" + database_type = "postgres_database" + postgres_database{ + listener_port = 5432 + // directory where the PostgreSQL database software is installed + postgres_software_home= "{{ directory }}" + } +} \ No newline at end of file diff --git a/examples/ndb/dbservervm_register/terraform.tfvars b/examples/ndb/dbservervm_register/terraform.tfvars new file mode 100644 index 00000000..4f5de990 --- /dev/null +++ b/examples/ndb/dbservervm_register/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/dbservervm_register/variables.tf b/examples/ndb/dbservervm_register/variables.tf new file mode 100644 index 00000000..1a0cb89b --- /dev/null +++ b/examples/ndb/dbservervm_register/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/examples/ndb/maintenance_window/main.tf b/examples/ndb/maintenance_window/main.tf new file mode 100644 index 00000000..1f2a20b5 --- /dev/null +++ b/examples/ndb/maintenance_window/main.tf @@ -0,0 +1,41 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + + +## resource to create maintenance window with Weekly as recurrence + +resource nutanix_ndb_maintenance_window minWin{ + name = "test" + description = "this is desc" + recurrence = "WEEKLY" + duration = 2 + day_of_week = "TUESDAY" + start_time = "17:04:47" +} + + +## resource to create maintenance window with Monthly as recurrence + +resource nutanix_ndb_maintenance_window acctest-managed{ + name = "test" + description = "this is desc" + recurrence = "MONTHLY" + duration = 2 + day_of_week = "TUESDAY" + start_time = "17:04:47" + week_of_month= 4 +} \ No newline at end of file diff --git a/examples/ndb/maintenance_window/terraform.tfvars b/examples/ndb/maintenance_window/terraform.tfvars new file mode 100644 index 00000000..4f5de990 --- /dev/null +++ b/examples/ndb/maintenance_window/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/maintenance_window/variables.tf b/examples/ndb/maintenance_window/variables.tf new file mode 100644 index 00000000..1a0cb89b --- /dev/null +++ b/examples/ndb/maintenance_window/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/examples/ndb/network/main.tf b/examples/ndb/network/main.tf new file mode 100644 index 00000000..66b0a7df --- /dev/null +++ b/examples/ndb/network/main.tf @@ -0,0 +1,44 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + + +#resource to create network +resource "nutanix_ndb_network" "name" { + name= "test-sub" + type="Static" + cluster_id = "{{ cluster_id }}" + gateway= "{{ gatway for the vlan }}" + subnet_mask = "{{ subnet mask for the vlan}}" + primary_dns = " {{ primary dns for the vlan }}" + secondary_dns= "{{secondary dns for the vlan }}" + ip_pools{ + start_ip = "{{ starting address range}}" + end_ip = "{{ ending address range }}" + } +} + +#data source to get network +data "nutanix_ndb_network" "net"{ + name = "{{ name of network }}" +} + +data "nutanix_ndb_network" "net"{ + id = "{{ id of network }}" +} + +#data source to get List of networks +data "nutanix_ndb_networks" "nets"{ } \ No newline at end of file diff --git a/examples/ndb/network/terraform.tfvars b/examples/ndb/network/terraform.tfvars new file mode 100644 index 00000000..4f5de990 --- /dev/null +++ b/examples/ndb/network/terraform.tfvars @@ -0,0 +1,4 @@ +#define values to the variables to be used in terraform file_username = "admin" +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/network/variables.tf b/examples/ndb/network/variables.tf new file mode 100644 index 00000000..1a0cb89b --- /dev/null +++ b/examples/ndb/network/variables.tf @@ -0,0 +1,10 @@ +#define the type of variables to be used in terraform file +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} diff --git a/examples/ndb/tags/main.tf b/examples/ndb/tags/main.tf new file mode 100644 index 00000000..83cb6534 --- /dev/null +++ b/examples/ndb/tags/main.tf @@ -0,0 +1,34 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## resource to create tag in ndb + +resource "nutanix_ndb_tag" "name" { + name= "testst-up" + description = "this is desc ok" + entity_type = "DATABASE" + required=true +} + +## resource to deprecate the tag +resource "nutanix_ndb_tag" "name" { + name= "testst-up" + description = "this is desc ok" + entity_type = "DATABASE" + required=true + status = "DEPRECATED" +} \ No newline at end of file diff --git a/examples/ndb/tags/terraform.tfvars b/examples/ndb/tags/terraform.tfvars new file mode 100644 index 00000000..3bf972a7 --- /dev/null +++ b/examples/ndb/tags/terraform.tfvars @@ -0,0 +1,3 @@ +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/tags/variables.tf b/examples/ndb/tags/variables.tf new file mode 100644 index 00000000..32cdf2d4 --- /dev/null +++ b/examples/ndb/tags/variables.tf @@ -0,0 +1,9 @@ +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} \ No newline at end of file diff --git a/examples/ndb/time_machine_clusters/main.tf b/examples/ndb/time_machine_clusters/main.tf new file mode 100644 index 00000000..561f8dfc --- /dev/null +++ b/examples/ndb/time_machine_clusters/main.tf @@ -0,0 +1,24 @@ +terraform{ + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = "1.8.0" + } + } +} + +#definig nutanix configuration +provider "nutanix"{ + ndb_username = var.ndb_username + ndb_password = var.ndb_password + ndb_endpoint = var.ndb_endpoint + insecure = true +} + +## resource to add cluster in time machines for snapshot destination with SLAs + +resource "nutanix_ndb_tms_cluster" "cls" { + time_machine_id = "{{ tms_id }}" + nx_cluster_id = "{{ cluster_id }}" + sla_id = "{{ sla_id }}" +} \ No newline at end of file diff --git a/examples/ndb/time_machine_clusters/terraform.tfvars b/examples/ndb/time_machine_clusters/terraform.tfvars new file mode 100644 index 00000000..3bf972a7 --- /dev/null +++ b/examples/ndb/time_machine_clusters/terraform.tfvars @@ -0,0 +1,3 @@ +ndb_password = "password" +ndb_endpoint = "10.xx.xx.xx" +ndb_username = "username" diff --git a/examples/ndb/time_machine_clusters/variables.tf b/examples/ndb/time_machine_clusters/variables.tf new file mode 100644 index 00000000..32cdf2d4 --- /dev/null +++ b/examples/ndb/time_machine_clusters/variables.tf @@ -0,0 +1,9 @@ +variable "ndb_username" { + type = string +} +variable "ndb_password" { + type = string +} +variable "ndb_endpoint" { + type = string +} \ No newline at end of file diff --git a/nutanix/common_era_schema.go b/nutanix/common_era_schema.go index 5e7b1ca6..f8c82659 100644 --- a/nutanix/common_era_schema.go +++ b/nutanix/common_era_schema.go @@ -601,7 +601,13 @@ func expandIPInfos(pr []interface{}) []*era.IPInfos { } if addr, ok := val["ip_addresses"]; ok { - IPInfo.IPAddresses = utils.StringSlice(addr.([]string)) + res := make([]*string, 0) + ips := addr.([]interface{}) + + for _, v := range ips { + res = append(res, utils.StringPtr(v.(string))) + } + IPInfo.IPAddresses = res } IPInfos = append(IPInfos, IPInfo) diff --git a/nutanix/data_source_nutanix_ndb_clone.go b/nutanix/data_source_nutanix_ndb_clone.go index ec4f6148..cf9f0ce0 100644 --- a/nutanix/data_source_nutanix_ndb_clone.go +++ b/nutanix/data_source_nutanix_ndb_clone.go @@ -65,10 +65,6 @@ func dataSourceNutanixNDBClone() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -87,18 +83,6 @@ func dataSourceNutanixNDBClone() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "era_created": { - Type: schema.TypeBool, - Computed: true, - }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - "placeholder": { - Type: schema.TypeBool, - Computed: true, - }, "database_name": { Type: schema.TypeString, Computed: true, @@ -136,14 +120,6 @@ func dataSourceNutanixNDBClone() *schema.Resource { Computed: true, }, "info": dataSourceEraDatabaseInfo(), - "group_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "metadata": dataSourceEraDBInstanceMetadata(), "metric": { Type: schema.TypeMap, Computed: true, @@ -151,10 +127,6 @@ func dataSourceNutanixNDBClone() *schema.Resource { Type: schema.TypeString, }, }, - "category": { - Type: schema.TypeString, - Computed: true, - }, "parent_database_id": { Type: schema.TypeString, Computed: true, @@ -181,13 +153,6 @@ func dataSourceNutanixNDBClone() *schema.Resource { Type: schema.TypeString, }, }, - "database_group_state_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, }, } } @@ -244,10 +209,6 @@ func dataSourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } - if err := d.Set("owner_id", resp.Ownerid); err != nil { - return diag.FromErr(err) - } - if err := d.Set("description", resp.Description); err != nil { return diag.FromErr(err) } @@ -275,14 +236,6 @@ func dataSourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } - if err := d.Set("internal", resp.Internal); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("placeholder", resp.Placeholder); err != nil { - return diag.FromErr(err) - } - if err := d.Set("database_name", resp.Databasename); err != nil { return diag.FromErr(err) } @@ -323,22 +276,10 @@ func dataSourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } - if err := d.Set("group_info", resp.GroupInfo); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { - return diag.FromErr(err) - } - if err := d.Set("metric", resp.Metric); err != nil { return diag.FromErr(err) } - if err := d.Set("category", resp.Category); err != nil { - return diag.FromErr(err) - } - if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { return diag.FromErr(err) } @@ -370,10 +311,6 @@ func dataSourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, if err := d.Set("databases", resp.Databases); err != nil { return diag.FromErr(err) } - if err := d.Set("database_group_state_info", resp.DatabaseGroupStateInfo); err != nil { - return diag.FromErr(err) - } - d.SetId(resp.ID) return nil diff --git a/nutanix/data_source_nutanix_ndb_clones.go b/nutanix/data_source_nutanix_ndb_clones.go index 1696c57e..572ccaf4 100644 --- a/nutanix/data_source_nutanix_ndb_clones.go +++ b/nutanix/data_source_nutanix_ndb_clones.go @@ -68,10 +68,6 @@ func dataSourceNutanixNDBClones() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -90,18 +86,6 @@ func dataSourceNutanixNDBClones() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "era_created": { - Type: schema.TypeBool, - Computed: true, - }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - "placeholder": { - Type: schema.TypeBool, - Computed: true, - }, "database_name": { Type: schema.TypeString, Computed: true, @@ -139,14 +123,6 @@ func dataSourceNutanixNDBClones() *schema.Resource { Computed: true, }, "info": dataSourceEraDatabaseInfo(), - "group_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "metadata": dataSourceEraDBInstanceMetadata(), "metric": { Type: schema.TypeMap, Computed: true, @@ -154,10 +130,6 @@ func dataSourceNutanixNDBClones() *schema.Resource { Type: schema.TypeString, }, }, - "category": { - Type: schema.TypeString, - Computed: true, - }, "parent_database_id": { Type: schema.TypeString, Computed: true, @@ -184,13 +156,6 @@ func dataSourceNutanixNDBClones() *schema.Resource { Type: schema.TypeString, }, }, - "database_group_state_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, }, }, }, diff --git a/nutanix/data_source_nutanix_ndb_cluster.go b/nutanix/data_source_nutanix_ndb_cluster.go index 4006b064..c4a55b46 100644 --- a/nutanix/data_source_nutanix_ndb_cluster.go +++ b/nutanix/data_source_nutanix_ndb_cluster.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" ) func dataSourceNutanixEraCluster() *schema.Resource { @@ -149,8 +150,17 @@ func dataSourceNutanixEraCluster() *schema.Resource { Computed: true, }, "entity_counts": { - Type: schema.TypeString, + Type: schema.TypeList, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "db_servers": { + Type: schema.TypeInt, + Computed: true, + }, + "engine_counts": engineCountSchema(), + }, + }, }, "healthy": { Type: schema.TypeBool, @@ -260,7 +270,7 @@ func dataSourceNutanixEraClusterRead(ctx context.Context, d *schema.ResourceData return diag.FromErr(err) } - if err := d.Set("entity_counts", resp.Entitycounts); err != nil { + if err := d.Set("entity_counts", flattenEntityCounts(resp.EntityCounts)); err != nil { return diag.FromErr(err) } if err := d.Set("healthy", resp.Healthy); err != nil { @@ -270,3 +280,137 @@ func dataSourceNutanixEraClusterRead(ctx context.Context, d *schema.ResourceData d.SetId(*resp.ID) return nil } + +func flattenEntityCounts(pr *era.EntityCounts) []interface{} { + if pr != nil { + res := make([]interface{}, 0) + + entity := map[string]interface{}{} + + entity["db_servers"] = pr.DBServers + entity["engine_counts"] = flattenEngineCounts(pr.EngineCounts) + + res = append(res, entity) + return res + } + return nil +} + +func flattenEngineCounts(pr *era.EngineCounts) []interface{} { + if pr != nil { + engineCounts := make([]interface{}, 0) + engine := map[string]interface{}{} + + engine["mariadb_database"] = flattenProfileTmsCount(pr.MariadbDatabase) + engine["mongodb_database"] = flattenProfileTmsCount(pr.MongodbDatabase) + engine["mysql_database"] = flattenProfileTmsCount(pr.MySQLDatabase) + engine["oracle_database"] = flattenProfileTmsCount(pr.OracleDatabase) + engine["postgres_database"] = flattenProfileTmsCount(pr.PostgresDatabase) + engine["saphana_database"] = flattenProfileTmsCount(pr.SaphanaDatabase) + engine["sqlserver_database"] = flattenProfileTmsCount(pr.SqlserverDatabase) + + engineCounts = append(engineCounts, engine) + return engineCounts + } + return nil +} + +func flattenProfileTmsCount(pr *era.ProfileTimeMachinesCount) []interface{} { + if pr != nil { + engineCounts := make([]interface{}, 0) + count := map[string]interface{}{} + + count["profiles"] = flattenProfilesCount(pr.Profiles) + count["time_machines"] = pr.TimeMachines + engineCounts = append(engineCounts, count) + return engineCounts + } + return nil +} + +func flattenProfilesCount(pr *era.ProfilesEntity) []interface{} { + if pr != nil { + profileCounts := make([]interface{}, 0) + count := map[string]interface{}{} + + count["compute"] = pr.Compute + count["database_parameter"] = pr.DatabaseParameter + count["software"] = pr.Software + count["network"] = pr.Network + count["storage"] = pr.Storage + count["windows_domain"] = pr.WindowsDomain + + profileCounts = append(profileCounts, count) + return profileCounts + } + return nil +} + +func engineCountSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oracle_database": profileTimeMachineCountSchema(), + "postgres_database": profileTimeMachineCountSchema(), + "mongodb_database": profileTimeMachineCountSchema(), + "sqlserver_database": profileTimeMachineCountSchema(), + "saphana_database": profileTimeMachineCountSchema(), + "mariadb_database": profileTimeMachineCountSchema(), + "mysql_database": profileTimeMachineCountSchema(), + }, + }, + } +} + +func profileTimeMachineCountSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "profiles": profilesCountSchema(), + "time_machines": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + } +} + +func profilesCountSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "windows_domain": { + Type: schema.TypeInt, + Computed: true, + }, + "software": { + Type: schema.TypeInt, + Computed: true, + }, + "compute": { + Type: schema.TypeInt, + Computed: true, + }, + "network": { + Type: schema.TypeInt, + Computed: true, + }, + "storage": { + Type: schema.TypeInt, + Computed: true, + }, + "database_parameter": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + } +} diff --git a/nutanix/data_source_nutanix_ndb_clusters.go b/nutanix/data_source_nutanix_ndb_clusters.go index 10dd9293..30f14603 100644 --- a/nutanix/data_source_nutanix_ndb_clusters.go +++ b/nutanix/data_source_nutanix_ndb_clusters.go @@ -147,8 +147,17 @@ func dataSourceNutanixEraClusters() *schema.Resource { Computed: true, }, "entity_counts": { - Type: schema.TypeString, + Type: schema.TypeList, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "db_servers": { + Type: schema.TypeInt, + Computed: true, + }, + "engine_counts": engineCountSchema(), + }, + }, }, "healthy": { Type: schema.TypeBool, @@ -209,7 +218,7 @@ func flattenClustersResponse(crsp *Era.ClusterListResponse) []map[string]interfa d["cloud_info"] = v.Cloudinfo d["resource_config"] = flattenResourceConfig(v.Resourceconfig) d["management_server_info"] = v.Managementserverinfo - d["entity_counts"] = v.Entitycounts + d["entity_counts"] = flattenEntityCounts(v.EntityCounts) d["healthy"] = v.Healthy lst = append(lst, d) } diff --git a/nutanix/data_source_nutanix_ndb_database.go b/nutanix/data_source_nutanix_ndb_database.go index 6ba87668..a4d4853b 100644 --- a/nutanix/data_source_nutanix_ndb_database.go +++ b/nutanix/data_source_nutanix_ndb_database.go @@ -29,10 +29,6 @@ func dataSourceNutanixEraDatabase() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -55,14 +51,6 @@ func dataSourceNutanixEraDatabase() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - "placeholder": { - Type: schema.TypeBool, - Computed: true, - }, "database_name": { Type: schema.TypeString, Computed: true, @@ -79,10 +67,6 @@ func dataSourceNutanixEraDatabase() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "database_status": { - Type: schema.TypeString, - Computed: true, - }, "dbserver_logical_cluster_id": { Type: schema.TypeString, Computed: true, @@ -91,23 +75,11 @@ func dataSourceNutanixEraDatabase() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "parent_time_machine_id": { - Type: schema.TypeString, - Computed: true, - }, "time_zone": { Type: schema.TypeString, Computed: true, }, "info": dataSourceEraDatabaseInfo(), - "group_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "metadata": dataSourceEraDBInstanceMetadata(), "metric": { Type: schema.TypeMap, Computed: true, @@ -115,18 +87,10 @@ func dataSourceNutanixEraDatabase() *schema.Resource { Type: schema.TypeString, }, }, - "category": { - Type: schema.TypeString, - Computed: true, - }, "parent_database_id": { Type: schema.TypeString, Computed: true, }, - "parent_source_database_id": { - Type: schema.TypeString, - Computed: true, - }, "lcm_config": dataSourceEraLCMConfig(), "time_machine": dataSourceEraTimeMachine(), "dbserver_logical_cluster": { @@ -145,13 +109,6 @@ func dataSourceNutanixEraDatabase() *schema.Resource { Type: schema.TypeString, }, }, - "database_group_state_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, }, } } @@ -176,10 +133,6 @@ func dataSourceNutanixEraDatabaseRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("owner_id", resp.Ownerid); err != nil { - return diag.FromErr(err) - } - if err := d.Set("description", resp.Description); err != nil { return diag.FromErr(err) } @@ -207,14 +160,6 @@ func dataSourceNutanixEraDatabaseRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("internal", resp.Internal); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("placeholder", resp.Placeholder); err != nil { - return diag.FromErr(err) - } - if err := d.Set("database_name", resp.Databasename); err != nil { return diag.FromErr(err) } @@ -231,10 +176,6 @@ func dataSourceNutanixEraDatabaseRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("database_status", resp.Databasestatus); err != nil { - return diag.FromErr(err) - } - if err := d.Set("dbserver_logical_cluster_id", resp.Dbserverlogicalclusterid); err != nil { return diag.FromErr(err) } @@ -243,10 +184,6 @@ func dataSourceNutanixEraDatabaseRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("parent_time_machine_id", resp.Parenttimemachineid); err != nil { - return diag.FromErr(err) - } - if err := d.Set("time_zone", resp.Timezone); err != nil { return diag.FromErr(err) } @@ -255,30 +192,14 @@ func dataSourceNutanixEraDatabaseRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("group_info", resp.GroupInfo); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { - return diag.FromErr(err) - } - if err := d.Set("metric", resp.Metric); err != nil { return diag.FromErr(err) } - if err := d.Set("category", resp.Category); err != nil { - return diag.FromErr(err) - } - if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { return diag.FromErr(err) } - if err := d.Set("parent_source_database_id", resp.ParentSourceDatabaseID); err != nil { - return diag.FromErr(err) - } - if err := d.Set("lcm_config", flattenDBLcmConfig(resp.Lcmconfig)); err != nil { return diag.FromErr(err) } @@ -304,9 +225,6 @@ func dataSourceNutanixEraDatabaseRead(ctx context.Context, d *schema.ResourceDat if err := d.Set("databases", resp.Databases); err != nil { return diag.FromErr(err) } - if err := d.Set("database_group_state_info", resp.DatabaseGroupStateInfo); err != nil { - return diag.FromErr(err) - } d.SetId(resp.ID) return nil @@ -379,9 +297,7 @@ func flattenDBNodes(pr []Era.Databasenodes) []map[string]interface{} { db["dbserver_id"] = v.Dbserverid db["description"] = v.Description db["id"] = v.ID - db["metadata"] = v.Metadata db["name"] = v.Name - db["owner_id"] = v.Ownerid db["primary"] = v.Primary db["properties"] = flattenDBInstanceProperties(v.Properties) db["protection_domain"] = flattenDBProtectionDomain(v.Protectiondomain) @@ -410,10 +326,8 @@ func flattenDBLinkedDbs(pr []Era.Linkeddatabases) []map[string]interface{} { ld["date_modified"] = v.Datemodified ld["description"] = v.Description ld["id"] = v.ID - ld["metadata"] = v.Metadata ld["metric"] = v.Metric ld["name"] = v.Name - ld["owner_id"] = v.Ownerid ld["parent_database_id"] = v.ParentDatabaseID ld["parent_linked_database_id"] = v.ParentLinkedDatabaseID ld["snapshot_id"] = v.SnapshotID @@ -649,7 +563,6 @@ func flattenDBTimeMachine(pr *Era.TimeMachine) []map[string]interface{} { tmac["id"] = pr.ID tmac["name"] = pr.Name tmac["description"] = pr.Description - tmac["owner_id"] = pr.OwnerID tmac["date_created"] = pr.DateCreated tmac["date_modified"] = pr.DateModified tmac["access_level"] = pr.AccessLevel @@ -657,10 +570,8 @@ func flattenDBTimeMachine(pr *Era.TimeMachine) []map[string]interface{} { tmac["tags"] = flattenDBTags(pr.Tags) tmac["clustered"] = pr.Clustered tmac["clone"] = pr.Clone - tmac["internal"] = pr.Internal tmac["database_id"] = pr.DatabaseID tmac["type"] = pr.Type - tmac["category"] = pr.Category tmac["status"] = pr.Status tmac["ea_status"] = pr.EaStatus tmac["scope"] = pr.Scope @@ -674,7 +585,6 @@ func flattenDBTimeMachine(pr *Era.TimeMachine) []map[string]interface{} { tmac["sla_update_in_progress"] = pr.SLAUpdateInProgress tmac["sla"] = flattenDBSLA(pr.SLA) tmac["schedule"] = flattenSchedule(pr.Schedule) - tmac["metadata"] = flattenTimeMachineMetadata(pr.Metadata) res = append(res, tmac) return res @@ -1205,10 +1115,6 @@ func dataSourceEraTimeMachine() *schema.Schema { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -1231,10 +1137,6 @@ func dataSourceEraTimeMachine() *schema.Schema { Type: schema.TypeBool, Computed: true, }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, "database_id": { Type: schema.TypeString, Computed: true, @@ -1243,10 +1145,6 @@ func dataSourceEraTimeMachine() *schema.Schema { Type: schema.TypeString, Computed: true, }, - "category": { - Type: schema.TypeString, - Computed: true, - }, "status": { Type: schema.TypeString, Computed: true, @@ -1567,158 +1465,6 @@ func dataSourceEraTimeMachine() *schema.Schema { }, }, }, - "metadata": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secure_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "deregister_info": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "message": { - Type: schema.TypeString, - Computed: true, - }, - "operations": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "capability_reset_time": { - Type: schema.TypeString, - Computed: true, - }, - "auto_heal": { - Type: schema.TypeBool, - Computed: true, - }, - "auto_heal_snapshot_count": { - Type: schema.TypeInt, - Computed: true, - }, - "auto_heal_log_catchup_count": { - Type: schema.TypeInt, - Computed: true, - }, - "first_snapshot_captured": { - Type: schema.TypeBool, - Computed: true, - }, - "first_snapshot_dispatched": { - Type: schema.TypeBool, - Computed: true, - }, - "last_snapshot_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_auto_snapshot_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_snapshot_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "last_auto_snapshot_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "last_successful_snapshot_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "snapshot_successive_failure_count": { - Type: schema.TypeInt, - Computed: true, - }, - "last_heal_snapshot_operation": { - Type: schema.TypeString, - Computed: true, - }, - "last_log_catchup_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_successful_log_catchup_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "last_log_catchup_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "log_catchup_successive_failure_count": { - Type: schema.TypeInt, - Computed: true, - }, - "last_pause_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_pause_by_force": { - Type: schema.TypeBool, - Computed: true, - }, - "last_resume_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_pause_reason": { - Type: schema.TypeString, - Computed: true, - }, - "state_before_restore": { - Type: schema.TypeString, - Computed: true, - }, - "last_health_alerted_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_ea_breakdown_time": { - Type: schema.TypeString, - Computed: true, - }, - "authorized_dbservers": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "last_heal_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_heal_system_triggered": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, - }, }, }, } @@ -1742,10 +1488,6 @@ func dataSourceEraDatabaseNodes() *schema.Schema { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -1791,13 +1533,6 @@ func dataSourceEraDatabaseNodes() *schema.Schema { Type: schema.TypeString, Computed: true, }, - "metadata": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, "info": { Type: schema.TypeList, Computed: true, @@ -1958,10 +1693,6 @@ func dataSourceEraLinkedDatabases() *schema.Schema { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -1996,13 +1727,6 @@ func dataSourceEraLinkedDatabases() *schema.Schema { }, }, }, - "metadata": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, "metric": { Type: schema.TypeMap, Computed: true, diff --git a/nutanix/data_source_nutanix_ndb_database_test.go b/nutanix/data_source_nutanix_ndb_database_test.go index 8e268df0..dec840d0 100644 --- a/nutanix/data_source_nutanix_ndb_database_test.go +++ b/nutanix/data_source_nutanix_ndb_database_test.go @@ -14,9 +14,7 @@ func TestAccEraDatabaseDataSource_basic(t *testing.T) { { Config: testAccEraDatabaseDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.nutanix_ndb_database.test", "metadata.#", "1"), resource.TestCheckResourceAttr("data.nutanix_ndb_database.test", "time_zone", "UTC"), - resource.TestCheckResourceAttrSet("data.nutanix_ndb_database.test", "placeholder"), resource.TestCheckResourceAttrSet("data.nutanix_ndb_database.test", "name"), resource.TestCheckResourceAttrSet("data.nutanix_ndb_database.test", "linked_databases.#"), ), diff --git a/nutanix/data_source_nutanix_ndb_databases.go b/nutanix/data_source_nutanix_ndb_databases.go index 108bfa17..82f2aae9 100644 --- a/nutanix/data_source_nutanix_ndb_databases.go +++ b/nutanix/data_source_nutanix_ndb_databases.go @@ -37,10 +37,6 @@ func dataSourceNutanixEraDatabases() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -59,18 +55,6 @@ func dataSourceNutanixEraDatabases() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "era_created": { - Type: schema.TypeBool, - Computed: true, - }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - "placeholder": { - Type: schema.TypeBool, - Computed: true, - }, "database_name": { Type: schema.TypeString, Computed: true, @@ -87,10 +71,6 @@ func dataSourceNutanixEraDatabases() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "database_status": { - Type: schema.TypeString, - Computed: true, - }, "dbserver_logical_cluster_id": { Type: schema.TypeString, Computed: true, @@ -99,22 +79,11 @@ func dataSourceNutanixEraDatabases() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "parent_time_machine_id": { - Type: schema.TypeString, - Computed: true, - }, "time_zone": { Type: schema.TypeString, Computed: true, }, - "info": dataSourceEraDatabaseInfo(), - "group_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, + "info": dataSourceEraDatabaseInfo(), "metadata": dataSourceEraDBInstanceMetadata(), "metric": { Type: schema.TypeMap, @@ -123,18 +92,10 @@ func dataSourceNutanixEraDatabases() *schema.Resource { Type: schema.TypeString, }, }, - "category": { - Type: schema.TypeString, - Computed: true, - }, "parent_database_id": { Type: schema.TypeString, Computed: true, }, - "parent_source_database_id": { - Type: schema.TypeString, - Computed: true, - }, "lcm_config": dataSourceEraLCMConfig(), "time_machine": dataSourceEraTimeMachine(), "dbserver_logical_cluster": { @@ -153,13 +114,6 @@ func dataSourceNutanixEraDatabases() *schema.Resource { Type: schema.TypeString, }, }, - "database_group_state_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, }, }, }, @@ -205,34 +159,24 @@ func flattenDatabaseIntancesList(db *era.ListDatabaseInstance) []map[string]inte for _, data := range *db { d := map[string]interface{}{} - d["category"] = data.Category d["clone"] = data.Clone d["clustered"] = data.Clustered - d["database_group_state_info"] = data.DatabaseGroupStateInfo d["database_cluster_type"] = data.Databaseclustertype d["database_name"] = data.Databasename d["database_nodes"] = flattenDBNodes(data.Databasenodes) d["databases"] = data.Databases - d["database_status"] = data.Databasestatus d["date_created"] = data.Datecreated d["date_modified"] = data.Datemodified d["dbserver_logical_cluster"] = data.Dbserverlogicalcluster d["dbserver_logical_cluster_id"] = data.Dbserverlogicalclusterid d["description"] = data.Description - d["group_info"] = data.GroupInfo d["id"] = data.ID d["info"] = flattenDBInfo(data.Info) - d["internal"] = data.Internal d["lcm_config"] = flattenDBLcmConfig(data.Lcmconfig) d["linked_databases"] = flattenDBLinkedDbs(data.Linkeddatabases) - d["metadata"] = flattenDBInstanceMetadata(data.Metadata) d["metric"] = data.Metric d["name"] = data.Name - d["owner_id"] = data.Ownerid d["parent_database_id"] = data.ParentDatabaseID - d["parent_source_database_id"] = data.ParentSourceDatabaseID - d["parent_time_machine_id"] = data.Parenttimemachineid - d["placeholder"] = data.Placeholder d["properties"] = flattenDBInstanceProperties(data.Properties) d["status"] = data.Status d["tags"] = flattenDBTags(data.Tags) diff --git a/nutanix/data_source_nutanix_ndb_databases_test.go b/nutanix/data_source_nutanix_ndb_databases_test.go index 911be937..c7f7abe0 100644 --- a/nutanix/data_source_nutanix_ndb_databases_test.go +++ b/nutanix/data_source_nutanix_ndb_databases_test.go @@ -14,7 +14,6 @@ func TestAccEraDatabasesDataSource_basic(t *testing.T) { { Config: testAccEraDatabasesDataSourceConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.metadata.#"), resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.time_zone"), resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.#"), resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.id"), @@ -33,7 +32,6 @@ func TestAccEraDatabasesDataSource_ByFilters(t *testing.T) { { Config: testAccEraDatabasesDataSourceConfigByFilters(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.metadata.#"), resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.time_zone"), resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.#"), resource.TestCheckResourceAttrSet("data.nutanix_ndb_databases.test", "database_instances.0.id"), diff --git a/nutanix/data_source_nutanix_ndb_dbserver.go b/nutanix/data_source_nutanix_ndb_dbserver.go new file mode 100644 index 00000000..b08e4cb9 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_dbserver.go @@ -0,0 +1,545 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func dataSourceNutanixNDBDBServer() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBDBServerRead, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "ip": { + Type: schema.TypeString, + Optional: true, + }, + "vm_cluster_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "vm_cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "dbserver_cluster_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + // computed + "description": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + + "value": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + }, + }, + }, + "tags": dataSourceEraDBInstanceTags(), + "vm_cluster_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "fqdns": { + Type: schema.TypeString, + Computed: true, + }, + "mac_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "client_id": { + Type: schema.TypeString, + Computed: true, + }, + "era_drive_id": { + Type: schema.TypeString, + Computed: true, + }, + "era_version": { + Type: schema.TypeString, + Computed: true, + }, + "vm_timezone": { + Type: schema.TypeString, + Computed: true, + }, + "vm_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + }, + "operations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "os_type": { + Type: schema.TypeString, + Computed: true, + }, + "os_version": { + Type: schema.TypeString, + Computed: true, + }, + "distribution": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }}, + "network_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_name": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_type": { + Type: schema.TypeString, + Computed: true, + }, + "era_configured": { + Type: schema.TypeBool, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_mask": { + Type: schema.TypeString, + Computed: true, + }, + "hostname": { + Type: schema.TypeString, + Computed: true, + }, + "device_name": { + Type: schema.TypeString, + Computed: true, + }, + "mac_address": { + Type: schema.TypeString, + Computed: true, + }, + "flags": { + Type: schema.TypeString, + Computed: true, + }, + "mtu": { + Type: schema.TypeString, + Computed: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "default_gateway_device": { + Type: schema.TypeBool, + Computed: true, + }, + "access_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_type": { + Type: schema.TypeString, + Computed: true, + }, + "destination_subnet": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "is_server_driven": { + Type: schema.TypeBool, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "query_count": { + Type: schema.TypeInt, + Computed: true, + }, + "database_type": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_invalid_ea_state": { + Type: schema.TypeBool, + Computed: true, + }, + "working_directory": { + Type: schema.TypeString, + Computed: true, + }, + "valid_diagnostic_bundle_state": { + Type: schema.TypeBool, + Computed: true, + }, + "windows_db_server": { + Type: schema.TypeBool, + Computed: true, + }, + "associated_time_machine_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "access_key_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceNutanixNDBDBServerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + filterReq := &era.DBServerFilterRequest{} + + if name, ok := d.GetOk("name"); ok { + filterReq.Name = utils.StringPtr(name.(string)) + } + if id, ok := d.GetOk("id"); ok { + filterReq.ID = utils.StringPtr(id.(string)) + } + if ip, ok := d.GetOk("ip"); ok { + filterReq.IP = utils.StringPtr(ip.(string)) + } + if vmClsName, ok := d.GetOk("vm_cluster_name"); ok { + filterReq.VMClusterName = utils.StringPtr(vmClsName.(string)) + } + if vmClsid, ok := d.GetOk("vm_cluster_id"); ok { + filterReq.VMClusterID = utils.StringPtr(vmClsid.(string)) + } + if nxclsID, ok := d.GetOk("nx_cluster_id"); ok { + filterReq.NxClusterID = utils.StringPtr(nxclsID.(string)) + } + if dbserver, ok := d.GetOk("dbserver_cluster_id"); ok { + filterReq.DBServerClusterID = utils.StringPtr(dbserver.(string)) + } + + resp, err := conn.Service.GetDBServerVM(ctx, filterReq) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + if err := d.Set("access_level", resp.AccessLevel); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("vm_cluster_uuid", resp.VMClusterUUID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("ip_addresses", utils.StringValueSlice(resp.IPAddresses)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("fqdns", resp.Fqdns); err != nil { + return diag.FromErr(err) + } + if err := d.Set("mac_addresses", utils.StringValueSlice(resp.MacAddresses)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + if err := d.Set("client_id", resp.ClientID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("era_drive_id", resp.EraDriveID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("era_version", resp.EraVersion); err != nil { + return diag.FromErr(err) + } + if err := d.Set("vm_timezone", resp.VMTimeZone); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("clustered", resp.Clustered); err != nil { + return diag.FromErr(err) + } + if err := d.Set("is_server_driven", resp.IsServerDriven); err != nil { + return diag.FromErr(err) + } + if err := d.Set("protection_domain_id", resp.ProtectionDomainID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("query_count", resp.QueryCount); err != nil { + return diag.FromErr(err) + } + if err := d.Set("database_type", resp.DatabaseType); err != nil { + return diag.FromErr(err) + } + if err := d.Set("dbserver_invalid_ea_state", resp.DbserverInValidEaState); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("working_directory", resp.WorkingDirectory); err != nil { + return diag.FromErr(err) + } + if err := d.Set("valid_diagnostic_bundle_state", resp.ValidDiagnosticBundleState); err != nil { + return diag.FromErr(err) + } + if err := d.Set("windows_db_server", resp.WindowsDBServer); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("associated_time_machine_ids", utils.StringValueSlice(resp.AssociatedTimeMachineIds)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("access_key_id", resp.AccessKeyID); err != nil { + return diag.FromErr(err) + } + + props := []interface{}{} + for _, prop := range resp.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + if err := d.Set("properties", props); err != nil { + return diag.FromErr(err) + } + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("vm_info", flattenDBServerVMInfo(resp.VMInfo)); err != nil { + return diag.FromErr(err) + } + + d.SetId(*resp.ID) + return nil +} + +func flattenDBServerMetadata(pr *era.DBServerMetadata) []interface{} { + if pr != nil { + metaList := make([]interface{}, 0) + + meta := map[string]interface{}{} + + meta["secure_info"] = pr.Secureinfo + meta["info"] = pr.Info + meta["deregister_info"] = flattenDeRegiserInfo(pr.Deregisterinfo) + meta["database_type"] = pr.Databasetype + meta["physical_era_drive"] = pr.Physicaleradrive + meta["clustered"] = pr.Clustered + meta["single_instance"] = pr.Singleinstance + meta["era_drive_initialized"] = pr.Eradriveinitialised + meta["provision_operation_id"] = pr.Provisionoperationid + meta["marked_for_deletion"] = pr.Markedfordeletion + if pr.Associatedtimemachines != nil { + meta["associated_time_machines"] = utils.StringValueSlice(pr.Associatedtimemachines) + } + meta["software_snaphot_interval"] = pr.Softwaresnaphotinterval + + metaList = append(metaList, meta) + return metaList + } + return nil +} + +func flattenDBServerVMInfo(pr *era.VMInfo) []interface{} { + if pr != nil { + infoList := make([]interface{}, 0) + info := map[string]interface{}{} + + info["secure_info"] = pr.SecureInfo + info["info"] = pr.Info + info["deregister_info"] = flattenDeRegiserInfo(pr.DeregisterInfo) + info["os_type"] = pr.OsType + info["os_version"] = pr.OsVersion + info["distribution"] = pr.Distribution + info["network_info"] = flattenVMNetworkInfo(pr.NetworkInfo) + + infoList = append(infoList, info) + return infoList + } + return nil +} + +func flattenVMNetworkInfo(pr []*era.NetworkInfo) []interface{} { + if len(pr) > 0 { + netList := make([]interface{}, len(pr)) + + for k, v := range pr { + nwt := make(map[string]interface{}) + + nwt["vlan_name"] = v.VlanName + nwt["vlan_uuid"] = v.VlanUUID + nwt["vlan_type"] = v.VlanType + nwt["era_configured"] = v.EraConfigured + nwt["gateway"] = v.Gateway + nwt["subnet_mask"] = v.SubnetMask + nwt["hostname"] = v.Hostname + nwt["device_name"] = v.DeviceName + nwt["mac_address"] = v.MacAddress + nwt["flags"] = v.Flags + nwt["mtu"] = v.Mtu + nwt["ip_addresses"] = utils.StringValueSlice(v.IPAddresses) + nwt["default_gateway_device"] = v.DefaultGatewayDevice + nwt["access_info"] = flattenAccessInfo(v.AccessInfo) + + netList[k] = nwt + } + return netList + } + return nil +} + +func flattenAccessInfo(pr []*era.AccessInfo) []interface{} { + if len(pr) > 0 { + accessList := make([]interface{}, len(pr)) + + for k, v := range pr { + access := make(map[string]interface{}) + + access["access_type"] = v.AccessType + access["destination_subnet"] = v.DestinationSubnet + + accessList[k] = access + } + return accessList + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_dbserver_test.go b/nutanix/data_source_nutanix_ndb_dbserver_test.go new file mode 100644 index 00000000..b34f56d1 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_dbserver_test.go @@ -0,0 +1,69 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraDBServerVMDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDBServerVMDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "properties.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "vm_info.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "vm_cluster_uuid"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "status"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "windows_db_server"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "working_directory"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "mac_addresses.#"), + ), + }, + }, + }) +} + +func TestAccEraDBServerVMDataSource_ByName(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDBServerVMDataSourceConfigByName(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "properties.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "vm_info.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "vm_cluster_uuid"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "status"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "windows_db_server"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "working_directory"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbserver.dbserver", "mac_addresses.#"), + ), + }, + }, + }) +} + +func testAccEraDBServerVMDataSourceConfig() string { + return ` + data "nutanix_ndb_dbservers" "dbservers"{} + + data "nutanix_ndb_dbserver" "dbserver"{ + id = data.nutanix_ndb_dbservers.dbservers.dbservers.0.id + } + ` +} + +func testAccEraDBServerVMDataSourceConfigByName() string { + return ` + data "nutanix_ndb_dbservers" "dbservers"{} + + data "nutanix_ndb_dbserver" "dbserver"{ + name = data.nutanix_ndb_dbservers.dbservers.dbservers.0.name + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_dbservers.go b/nutanix/data_source_nutanix_ndb_dbservers.go new file mode 100644 index 00000000..2b2c9084 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_dbservers.go @@ -0,0 +1,368 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func dataSourceNutanixNDBDBServers() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBDBServersRead, + Schema: map[string]*schema.Schema{ + "dbservers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + + "value": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + }, + }, + }, + "tags": dataSourceEraDBInstanceTags(), + "vm_cluster_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "fqdns": { + Type: schema.TypeString, + Computed: true, + }, + "mac_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "client_id": { + Type: schema.TypeString, + Computed: true, + }, + "era_drive_id": { + Type: schema.TypeString, + Computed: true, + }, + "era_version": { + Type: schema.TypeString, + Computed: true, + }, + "vm_timezone": { + Type: schema.TypeString, + Computed: true, + }, + "vm_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secure_info": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "info": dataSourceEraDatabaseInfo(), + "deregister_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + }, + "operations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "os_type": { + Type: schema.TypeString, + Computed: true, + }, + "os_version": { + Type: schema.TypeString, + Computed: true, + }, + "distribution": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }}, + "network_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_name": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_type": { + Type: schema.TypeString, + Computed: true, + }, + "era_configured": { + Type: schema.TypeBool, + Computed: true, + }, + "gateway": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_mask": { + Type: schema.TypeString, + Computed: true, + }, + "hostname": { + Type: schema.TypeString, + Computed: true, + }, + "device_name": { + Type: schema.TypeString, + Computed: true, + }, + "mac_address": { + Type: schema.TypeString, + Computed: true, + }, + "flags": { + Type: schema.TypeString, + Computed: true, + }, + "mtu": { + Type: schema.TypeString, + Computed: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "default_gateway_device": { + Type: schema.TypeBool, + Computed: true, + }, + "access_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_type": { + Type: schema.TypeString, + Computed: true, + }, + "destination_subnet": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "clustered": { + Type: schema.TypeBool, + Computed: true, + }, + "is_server_driven": { + Type: schema.TypeBool, + Computed: true, + }, + "protection_domain_id": { + Type: schema.TypeString, + Computed: true, + }, + "query_count": { + Type: schema.TypeInt, + Computed: true, + }, + "database_type": { + Type: schema.TypeString, + Computed: true, + }, + "dbserver_invalid_ea_state": { + Type: schema.TypeBool, + Computed: true, + }, + "working_directory": { + Type: schema.TypeString, + Computed: true, + }, + "valid_diagnostic_bundle_state": { + Type: schema.TypeBool, + Computed: true, + }, + "windows_db_server": { + Type: schema.TypeBool, + Computed: true, + }, + "associated_time_machine_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "access_key_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBDBServersRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ListDBServerVM(ctx) + if err != nil { + return diag.FromErr(err) + } + if e := d.Set("dbservers", flattenDBServerVMResponse(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era dbservers: %+v", er) + } + d.SetId(uuid) + + return nil +} + +func flattenDBServerVMResponse(pr *era.ListDBServerVMResponse) []interface{} { + if pr != nil { + lst := []interface{}{} + + for _, v := range *pr { + vms := map[string]interface{}{} + + vms["id"] = v.ID + vms["name"] = v.Name + vms["description"] = v.Description + vms["date_created"] = v.DateCreated + vms["date_modified"] = v.DateModified + vms["access_level"] = v.AccessLevel + if v.Properties != nil { + props := []interface{}{} + for _, prop := range v.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + vms["properties"] = props + } + vms["tags"] = flattenDBTags(v.Tags) + vms["vm_cluster_uuid"] = v.VMClusterUUID + vms["ip_addresses"] = utils.StringValueSlice(v.IPAddresses) + vms["fqdns"] = v.Fqdns + vms["mac_addresses"] = utils.StringValueSlice(v.MacAddresses) + vms["type"] = v.Type + vms["status"] = v.Status + vms["client_id"] = v.ClientID + vms["era_drive_id"] = v.EraDriveID + vms["era_version"] = v.EraVersion + vms["vm_timezone"] = v.VMTimeZone + vms["vm_info"] = flattenDBServerVMInfo(v.VMInfo) + vms["clustered"] = v.Clustered + vms["is_server_driven"] = v.IsServerDriven + vms["protection_domain_id"] = v.ProtectionDomainID + vms["query_count"] = v.QueryCount + vms["database_type"] = v.DatabaseType + vms["dbserver_invalid_ea_state"] = v.DbserverInValidEaState + vms["working_directory"] = v.WorkingDirectory + vms["valid_diagnostic_bundle_state"] = v.ValidDiagnosticBundleState + vms["windows_db_server"] = v.WindowsDBServer + vms["associated_time_machine_ids"] = utils.StringValueSlice(v.AssociatedTimeMachineIds) + vms["access_key_id"] = v.AccessKeyID + + lst = append(lst, vms) + } + return lst + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_dbservers_test.go b/nutanix/data_source_nutanix_ndb_dbservers_test.go new file mode 100644 index 00000000..6a03788a --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_dbservers_test.go @@ -0,0 +1,38 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEradbserversVMDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEradbserversVMDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.properties.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.vm_info.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.vm_info.0.network_info.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.vm_cluster_uuid"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.status"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.windows_db_server"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.working_directory"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.mac_addresses.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.protection_domain_id"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.ip_addresses.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_dbservers.dbservers", "dbservers.0.era_version"), + ), + }, + }, + }) +} + +func testAccEradbserversVMDataSourceConfig() string { + return ` + data "nutanix_ndb_dbservers" "dbservers"{} + ` +} diff --git a/nutanix/data_source_nutanix_ndb_maintenance_window.go b/nutanix/data_source_nutanix_ndb_maintenance_window.go new file mode 100644 index 00000000..84b362b0 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_maintenance_window.go @@ -0,0 +1,373 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBMaintenanceWindow() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBMaintenanceWindowRead, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "recurrence": { + Type: schema.TypeString, + Computed: true, + }, + "duration": { + Type: schema.TypeInt, + Computed: true, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + }, + "day_of_week": { + Type: schema.TypeString, + Computed: true, + }, + "week_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "threshold": { + Type: schema.TypeString, + Computed: true, + }, + "hour": { + Type: schema.TypeInt, + Computed: true, + }, + "minute": { + Type: schema.TypeInt, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "tags": dataSourceEraDBInstanceTags(), + "status": { + Type: schema.TypeString, + Computed: true, + }, + "next_run_time": { + Type: schema.TypeString, + Computed: true, + }, + "entity_task_assoc": EntityTaskAssocSchema(), + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceNutanixNDBMaintenanceWindowRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + maintainenanceWindowID := d.Get("id") + + resp, err := conn.Service.ReadMaintenanceWindow(ctx, maintainenanceWindowID.(string)) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.OwnerID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + if err := d.Set("access_level", resp.AccessLevel); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + if err := d.Set("next_run_time", resp.NextRunTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("entity_task_assoc", flattenEntityTaskAssoc(resp.EntityTaskAssoc)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("timezone", resp.Timezone); err != nil { + return diag.FromErr(err) + } + + props := []interface{}{} + for _, prop := range resp.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + if err := d.Set("properties", props); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("schedule", flattenMaintenanceSchedule(resp.Schedule)); err != nil { + return diag.FromErr(err) + } + + d.SetId(maintainenanceWindowID.(string)) + return nil +} + +func flattenEntityTaskAssoc(pr []*era.MaintenanceTasksResponse) []interface{} { + if len(pr) > 0 { + tasks := make([]interface{}, 0) + + for _, v := range pr { + entity := map[string]interface{}{} + + entity["access_level"] = v.AccessLevel + entity["date_created"] = v.DateCreated + entity["date_modified"] = v.DateModified + entity["description"] = v.Description + entity["entity"] = v.Entity + entity["entity_id"] = v.EntityID + entity["entity_type"] = v.EntityType + entity["id"] = v.ID + entity["maintenance_window_id"] = v.MaintenanceWindowID + entity["maintenance_window_owner_id"] = v.MaintenanceWindowOwnerID + entity["name"] = v.Name + entity["owner_id"] = v.OwnerID + entity["payload"] = flattenEntityTaskPayload(v.Payload) + entity["status"] = v.Status + entity["task_type"] = v.TaskType + + if v.Tags != nil { + entity["tags"] = flattenDBTags(v.Tags) + } + + if v.Properties != nil { + props := []interface{}{} + for _, prop := range v.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + entity["properties"] = props + } + + tasks = append(tasks, entity) + } + return tasks + } + return nil +} + +func flattenEntityTaskPayload(pr *era.Payload) []interface{} { + if pr != nil { + res := make([]interface{}, 0) + + payload := map[string]interface{}{} + + payload["pre_post_command"] = flattenPrePostCommand(pr.PrePostCommand) + res = append(res, payload) + + return res + } + return nil +} + +func flattenPrePostCommand(pr *era.PrePostCommand) []interface{} { + if pr != nil { + comms := make([]interface{}, 0) + command := map[string]interface{}{} + + command["post_command"] = pr.PostCommand + command["pre_command"] = pr.PreCommand + + comms = append(comms, command) + return comms + } + return nil +} + +func EntityTaskAssocSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "tags": dataSourceEraDBInstanceTags(), + "maintenance_window_id": { + Type: schema.TypeString, + Computed: true, + }, + "maintenance_window_owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "entity_id": { + Type: schema.TypeString, + Computed: true, + }, + "entity_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "task_type": { + Type: schema.TypeString, + Computed: true, + }, + "payload": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pre_post_command": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pre_command": { + Type: schema.TypeString, + Computed: true, + }, + "post_command": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "entity": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + } +} diff --git a/nutanix/data_source_nutanix_ndb_maintenance_window_test.go b/nutanix/data_source_nutanix_ndb_maintenance_window_test.go new file mode 100644 index 00000000..7aab84d2 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_maintenance_window_test.go @@ -0,0 +1,36 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraMaintenanceWindowDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraMaintenanceWindowDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_maintenance_window.test", "name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_maintenance_window.test", "properties.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_maintenance_window.test", "schedule.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_maintenance_window.test", "description"), + resource.TestCheckResourceAttr("data.nutanix_ndb_maintenance_window.test", "status", "ACTIVE"), + ), + }, + }, + }) +} + +func testAccEraMaintenanceWindowDataSourceConfig() string { + return ` + data "nutanix_ndb_maintenance_windows" "window"{ } + + data "nutanix_ndb_maintenance_window" "test"{ + id = data.nutanix_ndb_maintenance_windows.window.maintenance_windows.0.id + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_maintenance_windows_list.go b/nutanix/data_source_nutanix_ndb_maintenance_windows_list.go new file mode 100644 index 00000000..08f73d7b --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_maintenance_windows_list.go @@ -0,0 +1,188 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBMaintenanceWindows() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBMaintenanceWindowsRead, + Schema: map[string]*schema.Schema{ + "maintenance_windows": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "recurrence": { + Type: schema.TypeString, + Computed: true, + }, + "duration": { + Type: schema.TypeInt, + Computed: true, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + }, + "day_of_week": { + Type: schema.TypeString, + Computed: true, + }, + "week_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "threshold": { + Type: schema.TypeString, + Computed: true, + }, + "hour": { + Type: schema.TypeInt, + Computed: true, + }, + "minute": { + Type: schema.TypeInt, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "tags": dataSourceEraDBInstanceTags(), + "status": { + Type: schema.TypeString, + Computed: true, + }, + "next_run_time": { + Type: schema.TypeString, + Computed: true, + }, + "entity_task_assoc": EntityTaskAssocSchema(), + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBMaintenanceWindowsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ListMaintenanceWindow(ctx) + if err != nil { + return diag.FromErr(err) + } + if e := d.Set("maintenance_windows", flattenMaintenanceWindowsResponse(resp)); err != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era clusters: %+v", err) + } + d.SetId(uuid) + + return nil +} + +func flattenMaintenanceWindowsResponse(pr *era.ListMaintenanceWindowResponse) []interface{} { + if pr != nil { + windowResp := make([]interface{}, 0) + for _, v := range *pr { + window := map[string]interface{}{} + window["id"] = v.ID + window["name"] = v.Name + window["description"] = v.Description + window["schedule"] = flattenMaintenanceSchedule(v.Schedule) + window["owner_id"] = v.OwnerID + window["date_created"] = v.DateCreated + window["date_modified"] = v.DateModified + window["access_level"] = v.AccessLevel + window["tags"] = flattenDBTags(v.Tags) + window["status"] = v.Status + window["next_run_time"] = v.NextRunTime + window["entity_task_assoc"] = flattenEntityTaskAssoc(v.EntityTaskAssoc) + window["timezone"] = v.Timezone + if v.Properties != nil { + props := []interface{}{} + for _, prop := range v.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + window["properties"] = props + } + + windowResp = append(windowResp, window) + } + + return windowResp + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_maintenance_windows_list_test.go b/nutanix/data_source_nutanix_ndb_maintenance_windows_list_test.go new file mode 100644 index 00000000..1e5bfff4 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_maintenance_windows_list_test.go @@ -0,0 +1,33 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraMaintenanceWindowsDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraMaintenanceWindowsDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_maintenance_windows.test", "maintenance_windows.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_maintenance_windows.test", "maintenance_windows.0.name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_maintenance_windows.test", "maintenance_windows.0.properties.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_maintenance_windows.test", "maintenance_windows.0.schedule.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_maintenance_windows.test", "maintenance_windows.0.description"), + resource.TestCheckResourceAttr("data.nutanix_ndb_maintenance_windows.test", "maintenance_windows.0.status", "ACTIVE"), + ), + }, + }, + }) +} + +func testAccEraMaintenanceWindowsDataSourceConfig() string { + return ` + data "nutanix_ndb_maintenance_windows" "test"{ } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_network.go b/nutanix/data_source_nutanix_ndb_network.go new file mode 100644 index 00000000..a6bd66fd --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_network.go @@ -0,0 +1,140 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceNutanixEraNetwork() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraNetworkRead, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "managed": { + Type: schema.TypeBool, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "stretched_vlan_id": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "properties_map": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_subnet_mask": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_primary_dns": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_secondary_dns": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_gateway": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} +func dataSourceNutanixEraNetworkRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + name, nok := d.GetOk("name") + networkID, iok := d.GetOk("id") + + if !nok && !iok { + return diag.Errorf("either name or id is required to get the network details") + } + + resp, err := conn.Service.GetNetwork(ctx, networkID.(string), name.(string)) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + if err := d.Set("managed", resp.Managed); err != nil { + return diag.FromErr(err) + } + if err := d.Set("cluster_id", resp.ClusterID); err != nil { + return diag.FromErr(err) + } + props := []interface{}{} + for _, prop := range resp.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + "secure": prop.Secure, + }) + } + if err := d.Set("properties", props); err != nil { + return diag.FromErr(err) + } + + if resp.PropertiesMap != nil { + d.Set("properties_map", flattenPropertiesMap(resp.PropertiesMap)) + } + + if resp.StretchedVlanID != nil { + d.Set("stretched_vlan_id", resp.StretchedVlanID) + } + + d.SetId(*resp.ID) + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_network_test.go b/nutanix/data_source_nutanix_ndb_network_test.go new file mode 100644 index 00000000..fb17db76 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_network_test.go @@ -0,0 +1,65 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraNetworkDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraNetworkDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_network.test", "name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_network.test", "cluster_id"), + resource.TestCheckResourceAttr("data.nutanix_ndb_network.test", "managed", "false"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_network.test", "properties.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_network.test", "type"), + ), + }, + }, + }) +} + +func TestAccEraNetworkDataSource_ByName(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraNetworkDataSourceConfigByName(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_network.test", "name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_network.test", "cluster_id"), + resource.TestCheckResourceAttr("data.nutanix_ndb_network.test", "managed", "false"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_network.test", "properties.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_network.test", "type"), + ), + }, + }, + }) +} + +func testAccEraNetworkDataSourceConfig() string { + return ` + data "nutanix_ndb_networks" "name" { } + + data "nutanix_ndb_network" "test" { + id = data.nutanix_ndb_networks.name.networks.0.id + } + ` +} + +func testAccEraNetworkDataSourceConfigByName() string { + return ` + data "nutanix_ndb_networks" "name" { } + + data "nutanix_ndb_network" "test" { + name = data.nutanix_ndb_networks.name.networks.0.name + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_networks.go b/nutanix/data_source_nutanix_ndb_networks.go new file mode 100644 index 00000000..4e6f367d --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_networks.go @@ -0,0 +1,159 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixEraNetworks() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixEraNetworksRead, + Schema: map[string]*schema.Schema{ + "networks": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "managed": { + Type: schema.TypeBool, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "stretched_vlan_id": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "properties_map": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_subnet_mask": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_primary_dns": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_secondary_dns": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_gateway": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixEraNetworksRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ListNetwork(ctx) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("networks", flattenNetworkListResponse(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era networks: %+v", er) + } + d.SetId(uuid) + return nil +} + +func flattenNetworkListResponse(ntw *era.ListNetworkResponse) []interface{} { + if ntw != nil { + networkList := make([]interface{}, 0) + + for _, v := range *ntw { + val := map[string]interface{}{} + val["name"] = v.Name + val["id"] = v.ID + val["type"] = v.Type + val["cluster_id"] = v.ClusterID + val["properties"] = flattenNetworkProperties(v.Properties) + if v.PropertiesMap != nil { + val["properties_map"] = flattenPropertiesMap(v.PropertiesMap) + } + if v.Managed != nil { + val["managed"] = v.Managed + } + if v.StretchedVlanID != nil { + val["stretched_vlan_id"] = v.StretchedVlanID + } + + networkList = append(networkList, val) + } + return networkList + } + return nil +} + +func flattenNetworkProperties(erp []*era.Properties) []map[string]interface{} { + if len(erp) > 0 { + res := make([]map[string]interface{}, len(erp)) + + for k, v := range erp { + ents := make(map[string]interface{}) + ents["name"] = v.Name + ents["value"] = v.Value + ents["secure"] = v.Secure + res[k] = ents + } + return res + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_networks_test.go b/nutanix/data_source_nutanix_ndb_networks_test.go new file mode 100644 index 00000000..6c666c08 --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_networks_test.go @@ -0,0 +1,32 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraNetworksDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraNetworksDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_networks.test", "networks.0.name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_networks.test", "networks.0.cluster_id"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_networks.test", "networks.0.managed"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_networks.test", "networks.0.properties.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_networks.test", "networks.0.type"), + ), + }, + }, + }) +} + +func testAccEraNetworksDataSourceConfig() string { + return ` + data "nutanix_ndb_networks" "test" { } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_snapshot.go b/nutanix/data_source_nutanix_ndb_snapshot.go index dd45803e..b61a1e15 100644 --- a/nutanix/data_source_nutanix_ndb_snapshot.go +++ b/nutanix/data_source_nutanix_ndb_snapshot.go @@ -48,10 +48,6 @@ func dataSourceNutanixNDBSnapshot() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -109,64 +105,6 @@ func dataSourceNutanixNDBSnapshot() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "metadata": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secure_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "info": dataSourceEraDatabaseInfo(), - "deregister_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "from_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "to_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "replication_retry_count": { - Type: schema.TypeInt, - Computed: true, - }, - "last_replication_retyr_source_snapshot_id": { - Type: schema.TypeString, - Computed: true, - }, - "async": { - Type: schema.TypeBool, - Computed: true, - }, - "stand_by": { - Type: schema.TypeBool, - Computed: true, - }, - "curation_retry_count": { - Type: schema.TypeInt, - Computed: true, - }, - "operations_using_snapshot": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, "software_snapshot_id": { Type: schema.TypeString, Computed: true, @@ -179,11 +117,11 @@ func dataSourceNutanixNDBSnapshot() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "santised": { + "santized": { Type: schema.TypeBool, Computed: true, }, - "santised_from_snapshot_id": { + "santized_from_snapshot_id": { Type: schema.TypeString, Computed: true, }, @@ -230,7 +168,7 @@ func dataSourceNutanixNDBSnapshot() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "santised_snapshots": { + "santized_snapshots": { Type: schema.TypeString, Computed: true, }, @@ -296,10 +234,6 @@ func dataSourceNutanixNDBSnapshotRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("owner_id", resp.OwnerID); err != nil { - return diag.FromErr(err) - } - if err := d.Set("description", resp.Description); err != nil { return diag.FromErr(err) } @@ -375,11 +309,11 @@ func dataSourceNutanixNDBSnapshotRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - // if err := d.Set("santised", resp.Sanitized); err != nil { - // return diag.FromErr(err) - // } + if err := d.Set("santized", resp.Sanitized); err != nil { + return diag.FromErr(err) + } - if err := d.Set("santised_from_snapshot_id", resp.SanitisedFromSnapshotID); err != nil { + if err := d.Set("santized_from_snapshot_id", resp.SanitizedFromSnapshotID); err != nil { return diag.FromErr(err) } @@ -423,7 +357,7 @@ func dataSourceNutanixNDBSnapshotRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("santised_snapshots", resp.SanitisedSnapshots); err != nil { + if err := d.Set("santized_snapshots", resp.SanitizedSnapshots); err != nil { return diag.FromErr(err) } @@ -447,10 +381,6 @@ func dataSourceNutanixNDBSnapshotRead(ctx context.Context, d *schema.ResourceDat return diag.FromErr(err) } - if err := d.Set("metadata", flattenClonedMetadata(resp.Metadata)); err != nil { - return diag.FromErr(err) - } - d.SetId(snapID) return nil } diff --git a/nutanix/data_source_nutanix_ndb_snapshot_test.go b/nutanix/data_source_nutanix_ndb_snapshot_test.go index 0277815c..c18c1aea 100644 --- a/nutanix/data_source_nutanix_ndb_snapshot_test.go +++ b/nutanix/data_source_nutanix_ndb_snapshot_test.go @@ -18,9 +18,7 @@ func TestAccEraSnapshotDataSource_basic(t *testing.T) { Config: testAccEraSnapshotDataSourceConfig(), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "name"), - resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "owner_id"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "properties.#"), - resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "metadata.#", "1"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "snapshot_uuid"), resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "status", "ACTIVE"), ), @@ -38,10 +36,8 @@ func TestAccEraSnapshotDataSource_WithFilters(t *testing.T) { Config: testAccEraSnapshotDataSourceConfigWithFilters(), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "name"), - resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "owner_id"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "properties.#"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "nx_cluster_id"), - resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "metadata.#", "1"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotName, "snapshot_uuid"), resource.TestCheckResourceAttr(dataSourceNDBSnapshotName, "status", "ACTIVE"), ), diff --git a/nutanix/data_source_nutanix_ndb_snapshots.go b/nutanix/data_source_nutanix_ndb_snapshots.go index 888c85e0..c032093f 100644 --- a/nutanix/data_source_nutanix_ndb_snapshots.go +++ b/nutanix/data_source_nutanix_ndb_snapshots.go @@ -42,10 +42,6 @@ func dataSourceNutanixNDBSnapshots() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -103,64 +99,6 @@ func dataSourceNutanixNDBSnapshots() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "metadata": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secure_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "info": dataSourceEraDatabaseInfo(), - "deregister_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "from_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "to_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "replication_retry_count": { - Type: schema.TypeInt, - Computed: true, - }, - "last_replication_retyr_source_snapshot_id": { - Type: schema.TypeString, - Computed: true, - }, - "async": { - Type: schema.TypeBool, - Computed: true, - }, - "stand_by": { - Type: schema.TypeBool, - Computed: true, - }, - "curation_retry_count": { - Type: schema.TypeInt, - Computed: true, - }, - "operations_using_snapshot": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, "software_snapshot_id": { Type: schema.TypeString, Computed: true, @@ -173,11 +111,11 @@ func dataSourceNutanixNDBSnapshots() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "santised": { + "santized": { Type: schema.TypeBool, Computed: true, }, - "santised_from_snapshot_id": { + "santized_from_snapshot_id": { Type: schema.TypeString, Computed: true, }, @@ -224,7 +162,7 @@ func dataSourceNutanixNDBSnapshots() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "santised_snapshots": { + "santized_snapshots": { Type: schema.TypeString, Computed: true, }, @@ -294,7 +232,6 @@ func flattenSnapshotsList(sn *era.ListSnapshots) []map[string]interface{} { snap["id"] = val.ID snap["name"] = val.Name snap["description"] = val.Description - snap["owner_id"] = val.OwnerID snap["date_created"] = val.DateCreated snap["date_modified"] = val.DateModified snap["properties"] = flattenDBInstanceProperties(val.Properties) @@ -310,12 +247,11 @@ func flattenSnapshotsList(sn *era.ListSnapshots) []map[string]interface{} { snap["type"] = val.Type snap["applicable_types"] = val.ApplicableTypes snap["snapshot_timestamp"] = val.SnapshotTimeStamp - snap["metadata"] = flattenClonedMetadata(val.Metadata) snap["software_snapshot_id"] = val.SoftwareSnapshotID snap["software_database_snapshot"] = val.SoftwareDatabaseSnapshot snap["dbserver_storage_metadata_version"] = val.DBServerStorageMetadataVersion - // snap["santised"] = val.Sanitized - snap["santised_from_snapshot_id"] = val.SanitisedFromSnapshotID + snap["santized_from_snapshot_id"] = val.SanitizedFromSnapshotID + snap["santized"] = val.Sanitized snap["timezone"] = val.TimeZone snap["processed"] = val.Processed snap["database_snapshot"] = val.DatabaseSnapshot @@ -326,7 +262,7 @@ func flattenSnapshotsList(sn *era.ListSnapshots) []map[string]interface{} { snap["dbserver_ip"] = val.DbserverIP snap["replicated_snapshots"] = val.ReplicatedSnapshots snap["software_snapshot"] = val.SoftwareSnapshot - snap["santised_snapshots"] = val.SanitisedSnapshots + snap["santized_snapshots"] = val.SanitizedSnapshots snap["snapshot_family"] = val.SnapshotFamily snap["snapshot_timestamp_date"] = val.SnapshotTimeStampDate snap["lcm_config"] = flattenDBLcmConfig(val.LcmConfig) diff --git a/nutanix/data_source_nutanix_ndb_snapshots_test.go b/nutanix/data_source_nutanix_ndb_snapshots_test.go index 2bfc67a3..5d7a4d8c 100644 --- a/nutanix/data_source_nutanix_ndb_snapshots_test.go +++ b/nutanix/data_source_nutanix_ndb_snapshots_test.go @@ -17,9 +17,7 @@ func TestAccEraSnapshotsDataSource_basic(t *testing.T) { Config: testAccEraSnapshotsDataSourceConfig(), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.name"), - resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.owner_id"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.properties.#"), - resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.metadata.#", "1"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.snapshot_uuid"), resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.status", "ACTIVE"), ), @@ -37,9 +35,7 @@ func TestAccEraSnapshotsDataSource_WithFilters(t *testing.T) { Config: testAccEraSnapshotsDataSourceConfigWithFilters(), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.name"), - resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.owner_id"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.properties.#"), - resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.metadata.#", "1"), resource.TestCheckResourceAttrSet(dataSourceNDBSnapshotsName, "snapshots.0.snapshot_uuid"), resource.TestCheckResourceAttr(dataSourceNDBSnapshotsName, "snapshots.0.status", "ACTIVE"), ), diff --git a/nutanix/data_source_nutanix_ndb_tag.go b/nutanix/data_source_nutanix_ndb_tag.go new file mode 100644 index 00000000..744b944e --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_tag.go @@ -0,0 +1,105 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceNutanixNDBTag() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBTagRead, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "required": { + Type: schema.TypeBool, + Computed: true, + }, + "entity_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "values": { + Type: schema.TypeInt, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceNutanixNDBTagRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tagID := d.Get("id") + resp, err := conn.Service.ReadTags(ctx, tagID.(string)) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner", resp.Owner); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("entity_type", resp.EntityType); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("values", resp.Values); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("required", resp.Required); err != nil { + return diag.FromErr(err) + } + + d.SetId(tagID.(string)) + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_tag_test.go b/nutanix/data_source_nutanix_ndb_tag_test.go new file mode 100644 index 00000000..764e67ff --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_tag_test.go @@ -0,0 +1,43 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraTagDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTagDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.0.name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.0.status"), + resource.TestCheckResourceAttr("data.nutanix_ndb_tag.tag", "values", "0"), + resource.TestCheckResourceAttr("data.nutanix_ndb_tag.tag", "status", "ENABLED"), + ), + }, + }, + }) +} + +func testAccEraTagDataSourceConfig() string { + return ` + resource "nutanix_ndb_tag" "acctest-managed" { + name= "test-tag" + description = "test tag description" + entity_type = "DATABASE" + required = false + } + + data "nutanix_ndb_tags" "tags"{ } + + data "nutanix_ndb_tag" "tag"{ + id = data.nutanix_ndb_tags.tags.tags.0.id + } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_tags.go b/nutanix/data_source_nutanix_ndb_tags.go new file mode 100644 index 00000000..311b11fd --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_tags.go @@ -0,0 +1,111 @@ +package nutanix + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + era "github.com/terraform-providers/terraform-provider-nutanix/client/era" +) + +func dataSourceNutanixNDBTags() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceNutanixNDBTagsRead, + Schema: map[string]*schema.Schema{ + "tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "required": { + Type: schema.TypeBool, + Computed: true, + }, + "entity_type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "values": { + Type: schema.TypeInt, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNutanixNDBTagsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ListTags(ctx) + if err != nil { + return diag.FromErr(err) + } + + if e := d.Set("tags", flattenTagsList(resp)); e != nil { + return diag.FromErr(e) + } + + uuid, er := uuid.GenerateUUID() + if er != nil { + return diag.Errorf("Error generating UUID for era tags: %+v", er) + } + d.SetId(uuid) + return nil +} + +func flattenTagsList(pr *era.ListTagsResponse) []interface{} { + if pr != nil { + tagsList := make([]interface{}, 0) + + for _, v := range *pr { + tag := map[string]interface{}{} + + tag["id"] = v.ID + tag["name"] = v.Name + tag["description"] = v.Description + tag["required"] = v.Required + tag["entity_type"] = v.EntityType + tag["status"] = v.Status + tag["owner"] = v.Owner + tag["values"] = v.Values + tag["date_created"] = v.DateCreated + tag["date_modified"] = v.DateModified + + tagsList = append(tagsList, tag) + } + return tagsList + } + return nil +} diff --git a/nutanix/data_source_nutanix_ndb_tags_test.go b/nutanix/data_source_nutanix_ndb_tags_test.go new file mode 100644 index 00000000..43868e2d --- /dev/null +++ b/nutanix/data_source_nutanix_ndb_tags_test.go @@ -0,0 +1,42 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccEraTagsDataSource_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTagsDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.#"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.0.name"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.0.status"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.0.values"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.0.status"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.0.required"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.0.status"), + resource.TestCheckResourceAttrSet("data.nutanix_ndb_tags.tags", "tags.0.required"), + ), + }, + }, + }) +} + +func testAccEraTagsDataSourceConfig() string { + return ` + resource "nutanix_ndb_tag" "acctest-managed" { + name= "test-tag" + description = "test tag description" + entity_type = "DATABASE" + required = false + } + + data "nutanix_ndb_tags" "tags"{ } + ` +} diff --git a/nutanix/data_source_nutanix_ndb_time_machine.go b/nutanix/data_source_nutanix_ndb_time_machine.go index 789aac34..3bd52ad0 100644 --- a/nutanix/data_source_nutanix_ndb_time_machine.go +++ b/nutanix/data_source_nutanix_ndb_time_machine.go @@ -395,146 +395,6 @@ func dataSourceNutanixNDBTimeMachine() *schema.Resource { }, }, }, - "metadata": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secure_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "deregister_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "capability_reset_time": { - Type: schema.TypeString, - Computed: true, - }, - "auto_heal": { - Type: schema.TypeBool, - Computed: true, - }, - "auto_heal_snapshot_count": { - Type: schema.TypeInt, - Computed: true, - }, - "auto_heal_log_catchup_count": { - Type: schema.TypeInt, - Computed: true, - }, - "first_snapshot_captured": { - Type: schema.TypeBool, - Computed: true, - }, - "first_snapshot_dispatched": { - Type: schema.TypeBool, - Computed: true, - }, - "last_snapshot_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_auto_snapshot_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_snapshot_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "last_auto_snapshot_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "last_successful_snapshot_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "snapshot_successive_failure_count": { - Type: schema.TypeInt, - Computed: true, - }, - "last_heal_snapshot_operation": { - Type: schema.TypeString, - Computed: true, - }, - "last_log_catchup_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_successful_log_catchup_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "last_log_catchup_operation_id": { - Type: schema.TypeString, - Computed: true, - }, - "log_catchup_successive_failure_count": { - Type: schema.TypeInt, - Computed: true, - }, - "last_pause_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_pause_by_force": { - Type: schema.TypeBool, - Computed: true, - }, - "last_resume_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_pause_reason": { - Type: schema.TypeString, - Computed: true, - }, - "state_before_restore": { - Type: schema.TypeString, - Computed: true, - }, - "last_health_alerted_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_ea_breakdown_time": { - Type: schema.TypeString, - Computed: true, - }, - "authorized_dbservers": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "last_heal_time": { - Type: schema.TypeString, - Computed: true, - }, - "last_heal_system_triggered": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, - }, }, } } @@ -667,10 +527,6 @@ func dataSourceNutanixNDBTimeMachineRead(ctx context.Context, d *schema.Resource return diag.FromErr(err) } - if err := d.Set("metadata", flattenTimeMachineMetadata(resp.Metadata)); err != nil { - return diag.FromErr(err) - } - d.SetId(*resp.ID) return nil } diff --git a/nutanix/data_source_nutanix_ndb_time_machine_capability.go b/nutanix/data_source_nutanix_ndb_time_machine_capability.go index 43d3bc48..dfeb6933 100644 --- a/nutanix/data_source_nutanix_ndb_time_machine_capability.go +++ b/nutanix/data_source_nutanix_ndb_time_machine_capability.go @@ -253,11 +253,11 @@ func dataSourceNutanixNDBTmsCapability() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "santised": { + "santized": { Type: schema.TypeBool, Computed: true, }, - "santised_from_snapshot_id": { + "santized_from_snapshot_id": { Type: schema.TypeString, Computed: true, }, @@ -304,7 +304,7 @@ func dataSourceNutanixNDBTmsCapability() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "santised_snapshots": { + "santized_snapshots": { Type: schema.TypeString, Computed: true, }, @@ -842,11 +842,11 @@ func dataSourceNutanixNDBTmsCapability() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "santised": { + "santized": { Type: schema.TypeBool, Computed: true, }, - "santised_from_snapshot_id": { + "santized_from_snapshot_id": { Type: schema.TypeString, Computed: true, }, @@ -893,7 +893,7 @@ func dataSourceNutanixNDBTmsCapability() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "santised_snapshots": { + "santized_snapshots": { Type: schema.TypeString, Computed: true, }, @@ -1151,7 +1151,7 @@ func flattenLastContinousSnapshot(pr *era.LastContinuousSnapshot) []map[string]i snap["metadata"] = flattenLastContinousSnapshotMetadata(pr.Metadata) snap["software_snapshot_id"] = pr.SoftwareSnapshotID snap["software_database_snapshot"] = pr.SoftwareDatabaseSnapshot - snap["santised_from_snapshot_id"] = pr.SanitisedFromSnapshotID + snap["santized_from_snapshot_id"] = pr.SanitizedFromSnapshotID snap["processed"] = pr.Processed snap["database_snapshot"] = pr.DatabaseSnapshot snap["from_timestamp"] = pr.FromTimeStamp @@ -1161,7 +1161,7 @@ func flattenLastContinousSnapshot(pr *era.LastContinuousSnapshot) []map[string]i snap["dbserver_ip"] = pr.DBserverIP snap["replicated_snapshots"] = pr.ReplicatedSnapshots snap["software_snapshot"] = pr.SoftwareSnapshot - snap["santised_snapshots"] = pr.SanitisedSnapshots + snap["santized_snapshots"] = pr.SanitizedSnapshots snap["snapshot_family"] = pr.SnapshotFamily snap["snapshot_timestamp_date"] = pr.SnapshotTimeStampDate snap["lcm_config"] = flattenDBLcmConfig(pr.LcmConfig) diff --git a/nutanix/data_source_nutanix_ndb_time_machine_test.go b/nutanix/data_source_nutanix_ndb_time_machine_test.go index 60242bc2..f6c7527e 100644 --- a/nutanix/data_source_nutanix_ndb_time_machine_test.go +++ b/nutanix/data_source_nutanix_ndb_time_machine_test.go @@ -18,7 +18,6 @@ func TestAccEraTimeMachineDataSource_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceTMName, "name"), resource.TestCheckResourceAttrSet(dataSourceTMName, "description"), - resource.TestCheckResourceAttr(dataSourceTMName, "metadata.#", "1"), resource.TestCheckResourceAttr(dataSourceTMName, "clone", "false"), resource.TestCheckResourceAttr(dataSourceTMName, "sla.#", "1"), resource.TestCheckResourceAttr(dataSourceTMName, "schedule.#", "1"), @@ -38,7 +37,6 @@ func TestAccEraTimeMachineDataSource_basicWithID(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceTMName, "name"), resource.TestCheckResourceAttrSet(dataSourceTMName, "description"), - resource.TestCheckResourceAttr(dataSourceTMName, "metadata.#", "1"), resource.TestCheckResourceAttr(dataSourceTMName, "clone", "false"), resource.TestCheckResourceAttr(dataSourceTMName, "sla.#", "1"), resource.TestCheckResourceAttr(dataSourceTMName, "schedule.#", "1"), diff --git a/nutanix/data_source_nutanix_ndb_time_machines.go b/nutanix/data_source_nutanix_ndb_time_machines.go index 3365492b..ff418334 100644 --- a/nutanix/data_source_nutanix_ndb_time_machines.go +++ b/nutanix/data_source_nutanix_ndb_time_machines.go @@ -51,7 +51,6 @@ func flattenTimeMachines(tms *era.ListTimeMachines) []map[string]interface{} { tmac["id"] = pr.ID tmac["name"] = pr.Name tmac["description"] = pr.Description - tmac["owner_id"] = pr.OwnerID tmac["date_created"] = pr.DateCreated tmac["date_modified"] = pr.DateModified tmac["access_level"] = pr.AccessLevel @@ -59,24 +58,20 @@ func flattenTimeMachines(tms *era.ListTimeMachines) []map[string]interface{} { tmac["tags"] = flattenDBTags(pr.Tags) tmac["clustered"] = pr.Clustered tmac["clone"] = pr.Clone - tmac["internal"] = pr.Internal tmac["database_id"] = pr.DatabaseID tmac["type"] = pr.Type - tmac["category"] = pr.Category tmac["status"] = pr.Status tmac["ea_status"] = pr.EaStatus tmac["scope"] = pr.Scope tmac["sla_id"] = pr.SLAID tmac["schedule_id"] = pr.ScheduleID tmac["metric"] = pr.Metric - // tmac["sla_update_metadata"] = pr.SLAUpdateMetadata tmac["database"] = pr.Database tmac["clones"] = pr.Clones tmac["source_nx_clusters"] = pr.SourceNxClusters tmac["sla_update_in_progress"] = pr.SLAUpdateInProgress tmac["sla"] = flattenDBSLA(pr.SLA) tmac["schedule"] = flattenSchedule(pr.Schedule) - tmac["metadata"] = flattenTimeMachineMetadata(pr.Metadata) lst = append(lst, tmac) } diff --git a/nutanix/data_source_nutanix_ndb_time_machines_test.go b/nutanix/data_source_nutanix_ndb_time_machines_test.go index f3f91e74..e133df38 100644 --- a/nutanix/data_source_nutanix_ndb_time_machines_test.go +++ b/nutanix/data_source_nutanix_ndb_time_machines_test.go @@ -18,7 +18,6 @@ func TestAccEraTimeMachinesDataSource_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceTMsName, "time_machines.0.name"), resource.TestCheckResourceAttrSet(dataSourceTMsName, "time_machines.0.description"), - resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.metadata.#", "1"), resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.clone", "false"), resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.sla.#", "1"), resource.TestCheckResourceAttr(dataSourceTMsName, "time_machines.0.schedule.#", "1"), diff --git a/nutanix/provider.go b/nutanix/provider.go index 2588770e..65ff7804 100644 --- a/nutanix/provider.go +++ b/nutanix/provider.go @@ -203,6 +203,14 @@ func Provider() *schema.Provider { "nutanix_ndb_snapshot": dataSourceNutanixNDBSnapshot(), "nutanix_ndb_snapshots": dataSourceNutanixNDBSnapshots(), "nutanix_ndb_tms_capability": dataSourceNutanixNDBTmsCapability(), + "nutanix_ndb_maintenance_window": dataSourceNutanixNDBMaintenanceWindow(), + "nutanix_ndb_maintenance_windows": dataSourceNutanixNDBMaintenanceWindows(), + "nutanix_ndb_tag": dataSourceNutanixNDBTag(), + "nutanix_ndb_tags": dataSourceNutanixNDBTags(), + "nutanix_ndb_network": dataSourceNutanixEraNetwork(), + "nutanix_ndb_networks": dataSourceNutanixEraNetworks(), + "nutanix_ndb_dbserver": dataSourceNutanixNDBDBServer(), + "nutanix_ndb_dbservers": dataSourceNutanixNDBDBServers(), }, ResourcesMap: map[string]*schema.Resource{ "nutanix_virtual_machine": resourceNutanixVirtualMachine(), @@ -244,6 +252,16 @@ func Provider() *schema.Provider { "nutanix_ndb_clone": resourceNutanixNDBClone(), "nutanix_ndb_authorize_dbserver": resourceNutanixNDBAuthorizeDBServer(), "nutanix_ndb_linked_databases": resourceNutanixNDBLinkedDB(), + "nutanix_ndb_maintenance_window": resourceNutanixNDBMaintenanceWindow(), + "nutanix_ndb_maintenance_task": resourceNutanixNDBMaintenanceTask(), + "nutanix_ndb_tms_cluster": resourceNutanixNDBTmsCluster(), + "nutanix_ndb_tag": resourceNutanixNDBTags(), + "nutanix_ndb_network": resourceNutanixNDBNetwork(), + "nutanix_ndb_dbserver_vm": resourceNutanixNDBServerVM(), + "nutanix_ndb_register_dbserver": resourceNutanixNDBRegisterDBServer(), + "nutanix_ndb_stretched_vlan": resourceNutanixNDBStretchedVlan(), + "nutanix_ndb_clone_refresh": resourceNutanixNDBCloneRefresh(), + "nutanix_ndb_cluster": resourceNutanixNDBCluster(), }, ConfigureContextFunc: providerConfigure, } diff --git a/nutanix/resource_nutanix_ndb_clone.go b/nutanix/resource_nutanix_ndb_clone.go index 4a0e250a..883b958b 100644 --- a/nutanix/resource_nutanix_ndb_clone.go +++ b/nutanix/resource_nutanix_ndb_clone.go @@ -17,7 +17,13 @@ func resourceNutanixNDBClone() *schema.Resource { ReadContext: resourceNutanixNDBCloneRead, UpdateContext: resourceNutanixNDBCloneUpdate, DeleteContext: resourceNutanixNDBCloneDelete, - + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraProvisionTimeout), + Delete: schema.DefaultTimeout(EraProvisionTimeout), + }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, Schema: map[string]*schema.Schema{ "time_machine_id": { Type: schema.TypeString, @@ -247,6 +253,42 @@ func resourceNutanixNDBClone() *schema.Resource { }, "actionarguments": actionArgumentsSchema(), + // delete arguments for clone resource. + "delete": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "remove": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "soft_remove": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "forced": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "delete_time_machine": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "delete_logical_cluster": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "remove_logical_cluster": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, // Computed values "properties": { @@ -268,11 +310,6 @@ func resourceNutanixNDBClone() *schema.Resource { }, }, }, - - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -286,18 +323,6 @@ func resourceNutanixNDBClone() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "era_created": { - Type: schema.TypeBool, - Computed: true, - }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - "placeholder": { - Type: schema.TypeBool, - Computed: true, - }, "database_name": { Type: schema.TypeString, Computed: true, @@ -314,19 +339,7 @@ func resourceNutanixNDBClone() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "database_status": { - Type: schema.TypeString, - Computed: true, - }, "info": dataSourceEraDatabaseInfo(), - "group_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "metadata": dataSourceEraDBInstanceMetadata(), "metric": { Type: schema.TypeMap, Computed: true, @@ -334,22 +347,10 @@ func resourceNutanixNDBClone() *schema.Resource { Type: schema.TypeString, }, }, - "category": { - Type: schema.TypeString, - Computed: true, - }, - "parent_time_machine_id": { - Type: schema.TypeString, - Computed: true, - }, "parent_database_id": { Type: schema.TypeString, Computed: true, }, - "parent_source_database_id": { - Type: schema.TypeString, - Computed: true, - }, "time_machine": dataSourceEraTimeMachine(), "dbserver_logical_cluster": { Type: schema.TypeMap, @@ -424,7 +425,7 @@ func resourceNutanixNDBCloneCreate(ctx context.Context, d *schema.ResourceData, return diag.Errorf("error waiting for time machine clone (%s) to create: %s", resp.Entityid, errWaitTask) } - log.Printf("NDB clone with %s id created successfully", d.Id()) + log.Printf("NDB clone with %s id is created successfully", d.Id()) return resourceNutanixNDBCloneRead(ctx, d, meta) } @@ -477,14 +478,6 @@ func resourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, me return diag.FromErr(err) } - if err := d.Set("internal", resp.Internal); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("placeholder", resp.Placeholder); err != nil { - return diag.FromErr(err) - } - if err := d.Set("database_name", resp.Databasename); err != nil { return diag.FromErr(err) } @@ -501,22 +494,10 @@ func resourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, me return diag.FromErr(err) } - if err := d.Set("database_status", resp.Databasestatus); err != nil { - return diag.FromErr(err) - } - if err := d.Set("dbserver_logical_cluster_id", resp.Dbserverlogicalclusterid); err != nil { return diag.FromErr(err) } - if err := d.Set("time_machine_id", resp.Timemachineid); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("parent_time_machine_id", resp.Parenttimemachineid); err != nil { - return diag.FromErr(err) - } - if err := d.Set("time_zone", resp.Timezone); err != nil { return diag.FromErr(err) } @@ -525,30 +506,14 @@ func resourceNutanixNDBCloneRead(ctx context.Context, d *schema.ResourceData, me return diag.FromErr(err) } - if err := d.Set("group_info", resp.GroupInfo); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { - return diag.FromErr(err) - } - if err := d.Set("metric", resp.Metric); err != nil { return diag.FromErr(err) } - if err := d.Set("category", resp.Category); err != nil { - return diag.FromErr(err) - } - if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { return diag.FromErr(err) } - if err := d.Set("parent_source_database_id", resp.ParentSourceDatabaseID); err != nil { - return diag.FromErr(err) - } - if err := d.Set("lcm_config", flattenDBLcmConfig(resp.Lcmconfig)); err != nil { return diag.FromErr(err) } @@ -608,7 +573,7 @@ func resourceNutanixNDBCloneUpdate(ctx context.Context, d *schema.ResourceData, } if res != nil { - log.Printf("NDB clone with %s id update successfully", d.Id()) + log.Printf("NDB clone with %s id is updated successfully", d.Id()) } return resourceNutanixNDBCloneRead(ctx, d, meta) @@ -622,15 +587,36 @@ func resourceNutanixNDBCloneDelete(ctx context.Context, d *schema.ResourceData, dbID := d.Id() - req := era.DeleteDatabaseRequest{ - Delete: true, - Remove: false, - Softremove: false, - Forced: false, - Deletetimemachine: true, - Deletelogicalcluster: true, + req := &era.DeleteDatabaseRequest{} + + if delete, ok := d.GetOk("delete"); ok { + req.Delete = delete.(bool) } - res, err := conn.Service.DeleteClone(ctx, dbID, &req) + + if remove, ok := d.GetOk("remove"); ok { + req.Remove = remove.(bool) + } + + if softremove, ok := d.GetOk("soft_remove"); ok { + req.Softremove = softremove.(bool) + } + + if forced, ok := d.GetOk("forced"); ok { + req.Forced = forced.(bool) + } + + if deltms, ok := d.GetOk("delete_time_machine"); ok { + req.Deletetimemachine = deltms.(bool) + } + + if dellogicalcls, ok := d.GetOk("delete_logical_cluster"); ok { + req.Deletelogicalcluster = dellogicalcls.(bool) + } + if remlogicalcls, ok := d.GetOk("remove_logical_cluster"); ok { + req.Deletelogicalcluster = remlogicalcls.(bool) + } + + res, err := conn.Service.DeleteClone(ctx, dbID, req) if err != nil { return diag.FromErr(err) } @@ -651,14 +637,14 @@ func resourceNutanixNDBCloneDelete(ctx context.Context, d *schema.ResourceData, Pending: []string{"PENDING"}, Target: []string{"COMPLETED", "FAILED"}, Refresh: eraRefresh(ctx, conn, opReq), - Timeout: d.Timeout(schema.TimeoutCreate), + Timeout: d.Timeout(schema.TimeoutDelete), Delay: eraDelay, } if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for clone Instance (%s) to unregister: %s", res.Entityid, errWaitTask) } - log.Printf("NDB clone with %s id deleted successfully", d.Id()) + log.Printf("NDB clone with %s id is deleted successfully", d.Id()) return nil } diff --git a/nutanix/resource_nutanix_ndb_clone_refresh.go b/nutanix/resource_nutanix_ndb_clone_refresh.go new file mode 100644 index 00000000..93506d60 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_clone_refresh.go @@ -0,0 +1,114 @@ +package nutanix + +import ( + "context" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +var ( + EraRefreshCloneTimeout = 15 * time.Minute +) + +func resourceNutanixNDBCloneRefresh() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBCloneRefreshCreate, + ReadContext: resourceNutanixNDBCloneRefreshRead, + UpdateContext: resourceNutanixNDBCloneRefreshUpdate, + DeleteContext: resourceNutanixNDBCloneRefreshDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraRefreshCloneTimeout), + }, + Schema: map[string]*schema.Schema{ + "clone_id": { + Type: schema.TypeString, + Required: true, + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"user_pitr_timestamp"}, + }, + "user_pitr_timestamp": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"snapshot_id"}, + }, + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "Asia/Calcutta", + }, + }, + } +} + +func resourceNutanixNDBCloneRefreshCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.CloneRefreshInput{} + cloneID := "" + if clone, ok := d.GetOk("clone_id"); ok { + cloneID = clone.(string) + } + + if snapshotID, ok := d.GetOk("snapshot_id"); ok { + req.SnapshotID = utils.StringPtr(snapshotID.(string)) + } + + if userPitrTime, ok := d.GetOk("user_pitr_timestamp"); ok { + req.UserPitrTimestamp = utils.StringPtr(userPitrTime.(string)) + } + + if timezone, ok := d.GetOk("timezone"); ok { + req.Timezone = utils.StringPtr(timezone.(string)) + } + + resp, err := conn.Service.RefreshClone(ctx, req, cloneID) + if err != nil { + return diag.FromErr(err) + } + + // Get Operation ID from response of clone refresh and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for db refresh clone (%s) to create: %s", resp.Entityid, errWaitTask) + } + log.Printf("NDB clone Refresh with %s id is completed successfully", d.Id()) + d.SetId(resp.Operationid) + return nil +} + +func resourceNutanixNDBCloneRefreshRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func resourceNutanixNDBCloneRefreshUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func resourceNutanixNDBCloneRefreshDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} diff --git a/nutanix/resource_nutanix_ndb_clone_refresh_test.go b/nutanix/resource_nutanix_ndb_clone_refresh_test.go new file mode 100644 index 00000000..5dcb4649 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_clone_refresh_test.go @@ -0,0 +1,43 @@ +package nutanix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameCloneRefresh = "nutanix_ndb_clone_refresh.acctest-managed" + +func TestAccEra_CloneRefreshbasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraCloneRefreshConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resourceNameCloneRefresh, "snapshot_id"), + resource.TestCheckResourceAttrSet(resourceNameCloneRefresh, "timezone"), + ), + }, + }, + }) +} + +func testAccEraCloneRefreshConfig() string { + return ` + data "nutanix_ndb_clones" "clones"{ } + + data "nutanix_ndb_time_machines" "test1" {} + + data "nutanix_ndb_tms_capability" "test"{ + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + } + + resource "nutanix_ndb_clone_refresh" "acctest-managed"{ + clone_id = data.nutanix_ndb_clones.clones.clones.0.id + snapshot_id = data.nutanix_ndb_tms_capability.test.capability.1.snapshots.0.id + timezone = "Asia/Calcutta" + } + ` +} diff --git a/nutanix/resource_nutanix_ndb_clone_test.go b/nutanix/resource_nutanix_ndb_clone_test.go index 61f35e50..ca17cdba 100644 --- a/nutanix/resource_nutanix_ndb_clone_test.go +++ b/nutanix/resource_nutanix_ndb_clone_test.go @@ -27,8 +27,6 @@ func TestAccEra_Clonebasic(t *testing.T) { resource.TestCheckResourceAttr(resourceClone, "clone", "true"), resource.TestCheckResourceAttrSet(resourceClone, "date_created"), resource.TestCheckResourceAttrSet(resourceClone, "database_name"), - resource.TestCheckResourceAttrSet(resourceClone, "database_status"), - resource.TestCheckResourceAttrSet(resourceClone, "metadata.#"), resource.TestCheckResourceAttrSet(resourceClone, "database_nodes.#"), resource.TestCheckResourceAttrSet(resourceClone, "linked_databases.#"), ), diff --git a/nutanix/resource_nutanix_ndb_cluster.go b/nutanix/resource_nutanix_ndb_cluster.go new file mode 100644 index 00000000..7b55fc0a --- /dev/null +++ b/nutanix/resource_nutanix_ndb_cluster.go @@ -0,0 +1,620 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBCluster() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBClusterCreate, + ReadContext: resourceNutanixNDBClusterRead, + UpdateContext: resourceNutanixNDBClusterUpdate, + DeleteContext: resourceNutanixNDBClusterDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "cluster_ip": { + Type: schema.TypeString, + Required: true, + }, + "username": { + Type: schema.TypeString, + Required: true, + }, + "password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + "storage_container": { + Type: schema.TypeString, + Required: true, + }, + "agent_vm_prefix": { + Type: schema.TypeString, + Optional: true, + Default: "EraAgent", + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Default: "9440", + }, + "protocol": { + Type: schema.TypeString, + Optional: true, + Default: "https", + }, + "cluster_type": { + Type: schema.TypeString, + Optional: true, + Default: "NTNX", + }, + "version": { + Type: schema.TypeString, + Optional: true, + Default: "v2", + }, + "agent_network_info": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns": { + Type: schema.TypeString, + Optional: true, + }, + "ntp": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "networks_info": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + }, + "network_info": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_name": { + Type: schema.TypeString, + Optional: true, + }, + "static_ip": { + Type: schema.TypeString, + Optional: true, + }, + "gateway": { + Type: schema.TypeString, + Optional: true, + }, + "subnet_mask": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "access_type": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + + // computed + "id": { + Type: schema.TypeString, + Computed: true, + }, + "unique_name": { + Type: schema.TypeString, + Computed: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "fqdns": { + Type: schema.TypeString, + Computed: true, + }, + "nx_cluster_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "cloud_type": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "hypervisor_type": { + Type: schema.TypeString, + Computed: true, + }, + "hypervisor_version": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ref_id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "reference_count": { + Type: schema.TypeInt, + Computed: true, + }, + "cloud_info": { + Type: schema.TypeString, + Computed: true, + }, + "resource_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_threshold_percentage": { + Type: schema.TypeFloat, + Computed: true, + }, + "memory_threshold_percentage": { + Type: schema.TypeFloat, + Computed: true, + }, + }, + }, + }, + "management_server_info": { + Type: schema.TypeString, + Computed: true, + }, + "entity_counts": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "db_servers": { + Type: schema.TypeInt, + Computed: true, + }, + "engine_counts": engineCountSchema(), + }, + }, + }, + "healthy": { + Type: schema.TypeBool, + Computed: true, + }, + }, + } +} + +func resourceNutanixNDBClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.ClusterIntentInput{} + + if name, ok := d.GetOk("name"); ok { + req.ClusterName = utils.StringPtr(name.(string)) + } + if desc, ok := d.GetOk("description"); ok { + req.ClusterDescription = utils.StringPtr(desc.(string)) + } + if clsip, ok := d.GetOk("cluster_ip"); ok { + req.ClusterIP = utils.StringPtr(clsip.(string)) + } + if storageContainer, ok := d.GetOk("storage_container"); ok { + req.StorageContainer = utils.StringPtr(storageContainer.(string)) + } + if protocol, ok := d.GetOk("protocol"); ok { + req.Protocol = utils.StringPtr(protocol.(string)) + } + if agentPrefix, ok := d.GetOk("agent_vm_prefix"); ok { + req.AgentVMPrefix = utils.StringPtr(agentPrefix.(string)) + } + if port, ok := d.GetOk("port"); ok { + req.Port = utils.IntPtr(port.(int)) + } + if clsType, ok := d.GetOk("cluster_type"); ok { + req.ClusterType = utils.StringPtr(clsType.(string)) + } + if version, ok := d.GetOk("version"); ok { + req.Version = utils.StringPtr(version.(string)) + } + + if username, ok := d.GetOk("username"); ok { + creds := make([]*era.NameValueParams, 0) + + creds = append(creds, &era.NameValueParams{ + Name: utils.StringPtr("username"), + Value: utils.StringPtr(username.(string)), + }) + creds = append(creds, &era.NameValueParams{ + Name: utils.StringPtr("password"), + Value: utils.StringPtr(d.Get("password").(string)), + }) + + req.CredentialsInfo = creds + } + if agentNetInfo, ok := d.GetOk("agent_network_info"); ok { + req.AgentNetworkInfo = expandCredentialInfo(agentNetInfo.([]interface{})) + } + if netinfo, ok := d.GetOk("networks_info"); ok { + req.NetworksInfo = expandNetworkInfo(netinfo.([]interface{})) + } + // api to create cluster + resp, err := conn.Service.CreateCluster(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + // Get Operation ID from response of Cluster and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for cluster (%s) to register: %s", resp.Entityid, errWaitTask) + } + + clsName := d.Get("name") + // api to fetch clusters based on name + getResp, er := conn.Service.GetCluster(ctx, "", clsName.(string)) + if er != nil { + return diag.FromErr(er) + } + d.SetId(*getResp.ID) + log.Printf("NDB cluster with %s id is registered successfully", d.Id()) + return resourceNutanixNDBClusterRead(ctx, d, meta) +} + +func resourceNutanixNDBClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.GetCluster(ctx, d.Id(), "") + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("id", resp.ID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("unique_name", resp.Uniquename); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("ip_addresses", resp.Ipaddresses); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("fqdns", resp.Fqdns); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("nx_cluster_uuid", resp.Nxclusteruuid); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + if err := d.Set("cloud_type", resp.Cloudtype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_created", resp.Datecreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.Datemodified); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.Ownerid); err != nil { + return diag.FromErr(err) + } + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("version", resp.Version); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("hypervisor_type", resp.Hypervisortype); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("hypervisor_version", resp.Hypervisorversion); err != nil { + return diag.FromErr(err) + } + if err := d.Set("properties", flattenClusterProperties(resp.Properties)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("reference_count", resp.Referencecount); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("username", resp.Username); err != nil { + return diag.FromErr(err) + } + if err := d.Set("cloud_info", resp.Cloudinfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("resource_config", flattenResourceConfig(resp.Resourceconfig)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("management_server_info", resp.Managementserverinfo); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("entity_counts", flattenEntityCounts(resp.EntityCounts)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("healthy", resp.Healthy); err != nil { + return diag.FromErr(err) + } + return nil +} + +func resourceNutanixNDBClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.ClusterUpdateInput{} + + resp, err := conn.Service.GetCluster(ctx, d.Id(), "") + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + req.Name = resp.Name + req.Description = resp.Description + req.IPAddresses = resp.Ipaddresses + } + + if d.HasChange("name") { + req.Name = utils.StringPtr(d.Get("name").(string)) + } + if d.HasChange("description") { + req.Description = utils.StringPtr(d.Get("description").(string)) + } + if d.HasChange("cluster_ip") { + ips := make([]*string, 0) + clsIPs := d.Get("cluster_ip").([]interface{}) + + for _, v := range clsIPs { + ips = append(ips, utils.StringPtr(v.(string))) + } + req.IPAddresses = ips + } + + if d.HasChange("username") { + req.Username = utils.StringPtr(d.Get("username").(string)) + } + if d.HasChange("password") { + req.Password = utils.StringPtr(d.Get("password").(string)) + } + + // call update cluster API + _, er := conn.Service.UpdateCluster(ctx, req, d.Id()) + if er != nil { + return diag.FromErr(er) + } + + log.Printf("NDB cluster with %s id is updated successfully", d.Id()) + return resourceNutanixNDBClusterRead(ctx, d, meta) +} + +func resourceNutanixNDBClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.DeleteClusterInput{ + DeleteRemoteSites: false, + } + + resp, err := conn.Service.DeleteCluster(ctx, req, d.Id()) + if err != nil { + return diag.FromErr(err) + } + log.Printf("Operation to delete cluster with id %s has started, operation id: %s", d.Id(), resp.Operationid) + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Cluster GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for cluster (%s) to delete: %s", resp.Entityid, errWaitTask) + } + log.Printf("NDB cluster with %s id is deleted successfully", d.Id()) + return nil +} + +func expandCredentialInfo(pr []interface{}) []*era.NameValueParams { + if len(pr) > 0 { + creds := make([]*era.NameValueParams, 0) + + cred := pr[0].(map[string]interface{}) + for key, v := range cred { + creds = append(creds, &era.NameValueParams{ + Name: utils.StringPtr(key), + Value: utils.StringPtr(v.(string)), + }) + } + return creds + } + return nil +} + +func expandNetworkInfo(pr []interface{}) []*era.NetworksInfo { + if len(pr) > 0 { + networkInfo := make([]*era.NetworksInfo, 0) + + for _, v := range pr { + val := v.(map[string]interface{}) + netInfo := &era.NetworksInfo{} + if netType, ok := val["type"]; ok { + netInfo.Type = utils.StringPtr(netType.(string)) + } + if infos, ok := val["network_info"]; ok { + netInfo.NetworkInfo = expandClusterNetworkInfo(infos.([]interface{})) + } + if accessType, ok := val["access_type"]; ok { + accessList := accessType.([]interface{}) + res := make([]*string, 0) + for _, v := range accessList { + res = append(res, utils.StringPtr(v.(string))) + } + netInfo.AccessType = res + } + networkInfo = append(networkInfo, netInfo) + } + return networkInfo + } + return nil +} + +func expandClusterNetworkInfo(pr []interface{}) []*era.NameValueParams { + if len(pr) > 0 { + networkInfos := make([]*era.NameValueParams, 0) + + for _, v := range pr { + val := v.(map[string]interface{}) + + if vlan, ok := val["vlan_name"]; ok { + networkInfos = append(networkInfos, &era.NameValueParams{ + Name: utils.StringPtr("vlanName"), + Value: utils.StringPtr(vlan.(string)), + }) + } + + if vlan, ok := val["static_ip"]; ok { + networkInfos = append(networkInfos, &era.NameValueParams{ + Name: utils.StringPtr("staticIP"), + Value: utils.StringPtr(vlan.(string)), + }) + } + + if vlan, ok := val["gateway"]; ok { + networkInfos = append(networkInfos, &era.NameValueParams{ + Name: utils.StringPtr("gateway"), + Value: utils.StringPtr(vlan.(string)), + }) + } + + if vlan, ok := val["subnet_mask"]; ok { + networkInfos = append(networkInfos, &era.NameValueParams{ + Name: utils.StringPtr("subnetMask"), + Value: utils.StringPtr(vlan.(string)), + }) + } + } + return networkInfos + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_cluster_test.go b/nutanix/resource_nutanix_ndb_cluster_test.go new file mode 100644 index 00000000..3c85e49f --- /dev/null +++ b/nutanix/resource_nutanix_ndb_cluster_test.go @@ -0,0 +1,94 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNDBCluster = "nutanix_ndb_cluster.acctest-managed" + +func TestAccEra_Clusterbasic(t *testing.T) { + r := randIntBetween(25, 35) + name := fmt.Sprintf("testcluster-%d", r) + updatedName := fmt.Sprintf("testcluster-updated-%d", r) + desc := "this is cluster desc" + updatedDesc := "updated description for cluster" + storageContainer := testVars.NDB.RegisterClusterInfo.StorageContainer + clusterIP := testVars.NDB.RegisterClusterInfo.ClusterIP + username := testVars.NDB.RegisterClusterInfo.Username + password := testVars.NDB.RegisterClusterInfo.Password + staticIP := testVars.NDB.RegisterClusterInfo.StaticIP + subnetMask := testVars.NDB.RegisterClusterInfo.SubnetMask + gateway := testVars.NDB.RegisterClusterInfo.Gateway + dns := testVars.NDB.RegisterClusterInfo.DNS + ntp := testVars.NDB.RegisterClusterInfo.NTP + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraClusterConfig(name, desc, clusterIP, username, password, staticIP, subnetMask, gateway, dns, ntp, storageContainer), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNDBCluster, "name", name), + resource.TestCheckResourceAttr(resourceNDBCluster, "description", desc), + resource.TestCheckResourceAttrSet(resourceNDBCluster, "unique_name"), + resource.TestCheckResourceAttr(resourceNDBCluster, "cloud_type", "NTNX"), + resource.TestCheckResourceAttr(resourceNDBCluster, "status", "UP"), + resource.TestCheckResourceAttr(resourceNDBCluster, "healthy", "true"), + resource.TestCheckResourceAttrSet(resourceNDBCluster, "properties.#"), + resource.TestCheckResourceAttr(resourceNDBCluster, "hypervisor_type", "AHV"), + resource.TestCheckResourceAttr(resourceNDBCluster, "version", "v2"), + ), + }, + { + Config: testAccEraClusterConfig(updatedName, updatedDesc, clusterIP, username, password, staticIP, subnetMask, gateway, dns, ntp, storageContainer), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNDBCluster, "name", updatedName), + resource.TestCheckResourceAttr(resourceNDBCluster, "description", updatedDesc), + resource.TestCheckResourceAttrSet(resourceNDBCluster, "unique_name"), + resource.TestCheckResourceAttr(resourceNDBCluster, "cloud_type", "NTNX"), + resource.TestCheckResourceAttr(resourceNDBCluster, "status", "UP"), + resource.TestCheckResourceAttr(resourceNDBCluster, "healthy", "true"), + resource.TestCheckResourceAttrSet(resourceNDBCluster, "properties.#"), + resource.TestCheckResourceAttr(resourceNDBCluster, "hypervisor_type", "AHV"), + resource.TestCheckResourceAttr(resourceNDBCluster, "version", "v2"), + ), + }, + }, + }) +} + +func testAccEraClusterConfig(name, desc, cluster, user, pass, static, mask, gateway, dns, ntp, container string) string { + return fmt.Sprintf( + ` + resource "nutanix_ndb_cluster" "acctest-managed" { + name= "%[1]s" + description = "%[2]s" + cluster_ip = "%[3]s" + username= "%[4]s" + password = "%[5]s" + storage_container = "%[11]s" + agent_network_info{ + dns = "%[9]s" + ntp = "%[10]s" + } + networks_info{ + type = "DHCP" + network_info{ + vlan_name = "vlan_static" + static_ip = "%[6]s" + gateway = "%[8]s" + subnet_mask="%[7]s" + } + access_type = [ + "PRISM", + "DSIP", + "DBSERVER" + ] + } + } + `, name, desc, cluster, user, pass, static, mask, gateway, dns, ntp, container, + ) +} diff --git a/nutanix/resource_nutanix_ndb_database.go b/nutanix/resource_nutanix_ndb_database.go index a9459102..0a06de83 100644 --- a/nutanix/resource_nutanix_ndb_database.go +++ b/nutanix/resource_nutanix_ndb_database.go @@ -28,6 +28,8 @@ func resourceDatabaseInstance() *schema.Resource { DeleteContext: deleteDatabaseInstance, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(EraProvisionTimeout), + Update: schema.DefaultTimeout(EraProvisionTimeout), + Delete: schema.DefaultTimeout(EraProvisionTimeout), }, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, @@ -226,6 +228,10 @@ func resourceDatabaseInstance() *schema.Resource { Type: schema.TypeString, Required: true, }, + "cluster_description": { + Type: schema.TypeString, + Optional: true, + }, "patroni_cluster_name": { Type: schema.TypeString, Required: true, @@ -318,13 +324,78 @@ func resourceDatabaseInstance() *schema.Resource { }, }, }, + "cluster_info": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_ip_infos": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nx_cluster_id": { + Type: schema.TypeString, + Required: true, + }, + "ip_infos": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_type": { + Type: schema.TypeString, + Optional: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + // delete arguments for database instance + "delete": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "remove": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "soft_remove": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "forced": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "delete_time_machine": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "delete_logical_cluster": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, // Computed values - - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -338,18 +409,6 @@ func resourceDatabaseInstance() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "era_created": { - Type: schema.TypeBool, - Computed: true, - }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - "placeholder": { - Type: schema.TypeBool, - Computed: true, - }, "database_name": { Type: schema.TypeString, Computed: true, @@ -366,10 +425,6 @@ func resourceDatabaseInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "database_status": { - Type: schema.TypeString, - Computed: true, - }, "dbserver_logical_cluster_id": { Type: schema.TypeString, Computed: true, @@ -378,23 +433,11 @@ func resourceDatabaseInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "parent_time_machine_id": { - Type: schema.TypeString, - Computed: true, - }, "time_zone": { Type: schema.TypeString, Computed: true, }, "info": dataSourceEraDatabaseInfo(), - "group_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "metadata": dataSourceEraDBInstanceMetadata(), "metric": { Type: schema.TypeMap, Computed: true, @@ -402,18 +445,10 @@ func resourceDatabaseInstance() *schema.Resource { Type: schema.TypeString, }, }, - "category": { - Type: schema.TypeString, - Computed: true, - }, "parent_database_id": { Type: schema.TypeString, Computed: true, }, - "parent_source_database_id": { - Type: schema.TypeString, - Computed: true, - }, "lcm_config": dataSourceEraLCMConfig(), "time_machine": dataSourceEraTimeMachine(), "dbserver_logical_cluster": { @@ -500,6 +535,7 @@ func buildEraRequest(d *schema.ResourceData) (*era.ProvisionDatabaseRequest, err VMPassword: utils.StringPtr(d.Get("vm_password").(string)), Tags: expandTags(d.Get("tags").([]interface{})), MaintenanceTasks: expandMaintenanceTasks(d.Get("maintenance_tasks").([]interface{})), + ClusterInfo: expandClusterInfo(d.Get("cluster_info").([]interface{})), }, nil } @@ -557,14 +593,6 @@ func readDatabaseInstance(ctx context.Context, d *schema.ResourceData, m interfa return diag.FromErr(err) } - if err := d.Set("internal", resp.Internal); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("placeholder", resp.Placeholder); err != nil { - return diag.FromErr(err) - } - if err := d.Set("database_name", resp.Databasename); err != nil { return diag.FromErr(err) } @@ -581,10 +609,6 @@ func readDatabaseInstance(ctx context.Context, d *schema.ResourceData, m interfa return diag.FromErr(err) } - if err := d.Set("database_status", resp.Databasestatus); err != nil { - return diag.FromErr(err) - } - if err := d.Set("dbserver_logical_cluster_id", resp.Dbserverlogicalclusterid); err != nil { return diag.FromErr(err) } @@ -593,10 +617,6 @@ func readDatabaseInstance(ctx context.Context, d *schema.ResourceData, m interfa return diag.FromErr(err) } - if err := d.Set("parent_time_machine_id", resp.Parenttimemachineid); err != nil { - return diag.FromErr(err) - } - if err := d.Set("time_zone", resp.Timezone); err != nil { return diag.FromErr(err) } @@ -605,30 +625,14 @@ func readDatabaseInstance(ctx context.Context, d *schema.ResourceData, m interfa return diag.FromErr(err) } - if err := d.Set("group_info", resp.GroupInfo); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("metadata", flattenDBInstanceMetadata(resp.Metadata)); err != nil { - return diag.FromErr(err) - } - if err := d.Set("metric", resp.Metric); err != nil { return diag.FromErr(err) } - if err := d.Set("category", resp.Category); err != nil { - return diag.FromErr(err) - } - if err := d.Set("parent_database_id", resp.ParentDatabaseID); err != nil { return diag.FromErr(err) } - if err := d.Set("parent_source_database_id", resp.ParentSourceDatabaseID); err != nil { - return diag.FromErr(err) - } - if err := d.Set("lcm_config", flattenDBLcmConfig(resp.Lcmconfig)); err != nil { return diag.FromErr(err) } @@ -702,15 +706,33 @@ func deleteDatabaseInstance(ctx context.Context, d *schema.ResourceData, m inter dbID := d.Id() - req := era.DeleteDatabaseRequest{ - Delete: true, - Remove: false, - Softremove: false, - Forced: false, - Deletetimemachine: true, - Deletelogicalcluster: true, + req := &era.DeleteDatabaseRequest{} + + if delete, ok := d.GetOk("delete"); ok { + req.Delete = delete.(bool) + } + + if remove, ok := d.GetOk("remove"); ok { + req.Remove = remove.(bool) } - res, err := conn.Service.DeleteDatabase(ctx, &req, dbID) + + if softremove, ok := d.GetOk("soft_remove"); ok { + req.Softremove = softremove.(bool) + } + + if forced, ok := d.GetOk("forced"); ok { + req.Forced = forced.(bool) + } + + if deltms, ok := d.GetOk("delete_time_machine"); ok { + req.Deletetimemachine = deltms.(bool) + } + + if dellogicalcls, ok := d.GetOk("delete_logical_cluster"); ok { + req.Deletelogicalcluster = dellogicalcls.(bool) + } + + res, err := conn.Service.DeleteDatabase(ctx, req, dbID) if err != nil { return diag.FromErr(err) } @@ -731,13 +753,14 @@ func deleteDatabaseInstance(ctx context.Context, d *schema.ResourceData, m inter Pending: []string{"PENDING"}, Target: []string{"COMPLETED", "FAILED"}, Refresh: eraRefresh(ctx, conn, opReq), - Timeout: d.Timeout(schema.TimeoutCreate), + Timeout: d.Timeout(schema.TimeoutDelete), Delay: eraDelay, } if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for db Instance (%s) to delete: %s", res.Entityid, errWaitTask) } + log.Printf("NDB database with %s id is deleted successfully", d.Id()) return nil } @@ -843,6 +866,13 @@ func expandActionArguments(d *schema.ResourceData) []*era.Actionarguments { }) } + if clsDesc, pok := val["cluster_description"]; pok && len(clsDesc.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "cluster_description", + Value: clsDesc, + }) + } + if patroniClsName, pok := val["patroni_cluster_name"]; pok && len(patroniClsName.(string)) > 0 { args = append(args, &era.Actionarguments{ Name: "patroni_cluster_name", @@ -991,3 +1021,38 @@ func expandMaintenanceTasks(pr []interface{}) *era.MaintenanceTasks { } return nil } + +func expandClusterInfo(pr []interface{}) *era.ClusterInfo { + if len(pr) > 0 { + clsInfos := &era.ClusterInfo{} + val := pr[0].(map[string]interface{}) + + if clsip, ok := val["cluster_ip_infos"]; ok { + clsInfos.ClusterIPInfos = expandClusterIPInfos(clsip.([]interface{})) + } + return clsInfos + } + return nil +} + +func expandClusterIPInfos(pr []interface{}) []*era.ClusterIPInfos { + if len(pr) > 0 { + ipinfos := make([]*era.ClusterIPInfos, 0) + + for _, v := range pr { + val := v.(map[string]interface{}) + info := &era.ClusterIPInfos{} + + if clsid, ok := val["nx_cluster_id"]; ok { + info.NxClusterID = utils.StringPtr(clsid.(string)) + } + + if ips, ok := val["ip_infos"]; ok { + info.IPInfos = expandIPInfos(ips.([]interface{})) + } + ipinfos = append(ipinfos, info) + } + return ipinfos + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_database_restore.go b/nutanix/resource_nutanix_ndb_database_restore.go index fb792a4e..cb8b3112 100644 --- a/nutanix/resource_nutanix_ndb_database_restore.go +++ b/nutanix/resource_nutanix_ndb_database_restore.go @@ -17,6 +17,9 @@ func resourceNutanixNDBDatabaseRestore() *schema.Resource { ReadContext: resourceNutanixNDBDatabaseRestoreRead, UpdateContext: resourceNutanixNDBDatabaseRestoreUpdate, DeleteContext: resourceNutanixNDBDatabaseRestoreDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraProvisionTimeout), + }, Schema: map[string]*schema.Schema{ "database_id": { Type: schema.TypeString, @@ -76,10 +79,6 @@ func resourceNutanixNDBDatabaseRestore() *schema.Resource { }, }, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -93,18 +92,6 @@ func resourceNutanixNDBDatabaseRestore() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "era_created": { - Type: schema.TypeBool, - Computed: true, - }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - "placeholder": { - Type: schema.TypeBool, - Computed: true, - }, "database_name": { Type: schema.TypeString, Computed: true, @@ -125,10 +112,6 @@ func resourceNutanixNDBDatabaseRestore() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "database_status": { - Type: schema.TypeString, - Computed: true, - }, "dbserver_logical_cluster_id": { Type: schema.TypeString, Computed: true, @@ -137,19 +120,7 @@ func resourceNutanixNDBDatabaseRestore() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "parent_time_machine_id": { - Type: schema.TypeString, - Computed: true, - }, "info": dataSourceEraDatabaseInfo(), - "group_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "metadata": dataSourceEraDBInstanceMetadata(), "metric": { Type: schema.TypeMap, Computed: true, @@ -157,18 +128,10 @@ func resourceNutanixNDBDatabaseRestore() *schema.Resource { Type: schema.TypeString, }, }, - "category": { - Type: schema.TypeString, - Computed: true, - }, "parent_database_id": { Type: schema.TypeString, Computed: true, }, - "parent_source_database_id": { - Type: schema.TypeString, - Computed: true, - }, "lcm_config": dataSourceEraLCMConfig(), "time_machine": dataSourceEraTimeMachine(), "dbserver_logical_cluster": { @@ -252,7 +215,7 @@ func resourceNutanixNDBDatabaseRestoreCreate(ctx context.Context, d *schema.Reso } d.SetId(resp.Operationid) - log.Printf("NDB database restore with %s id created successfully", d.Id()) + log.Printf("NDB database restore with %s id is performed successfully", d.Id()) return resourceNutanixNDBDatabaseRestoreRead(ctx, d, meta) } diff --git a/nutanix/resource_nutanix_ndb_database_scale.go b/nutanix/resource_nutanix_ndb_database_scale.go index bbaedad3..211fbe73 100644 --- a/nutanix/resource_nutanix_ndb_database_scale.go +++ b/nutanix/resource_nutanix_ndb_database_scale.go @@ -17,6 +17,9 @@ func resourceNutanixNDBScaleDatabase() *schema.Resource { ReadContext: resourceNutanixNDBScaleDatabaseRead, UpdateContext: resourceNutanixNDBScaleDatabaseUpdate, DeleteContext: resourceNutanixNDBScaleDatabaseDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraProvisionTimeout), + }, Schema: map[string]*schema.Schema{ "database_uuid": { Type: schema.TypeString, @@ -76,10 +79,6 @@ func resourceNutanixNDBScaleDatabase() *schema.Resource { }, }, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -93,18 +92,6 @@ func resourceNutanixNDBScaleDatabase() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "era_created": { - Type: schema.TypeBool, - Computed: true, - }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - "placeholder": { - Type: schema.TypeBool, - Computed: true, - }, "database_name": { Type: schema.TypeString, Computed: true, @@ -121,10 +108,6 @@ func resourceNutanixNDBScaleDatabase() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "database_status": { - Type: schema.TypeString, - Computed: true, - }, "dbserver_logical_cluster_id": { Type: schema.TypeString, Computed: true, @@ -133,23 +116,11 @@ func resourceNutanixNDBScaleDatabase() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "parent_time_machine_id": { - Type: schema.TypeString, - Computed: true, - }, "time_zone": { Type: schema.TypeString, Computed: true, }, "info": dataSourceEraDatabaseInfo(), - "group_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "metadata": dataSourceEraDBInstanceMetadata(), "metric": { Type: schema.TypeMap, Computed: true, @@ -157,10 +128,6 @@ func resourceNutanixNDBScaleDatabase() *schema.Resource { Type: schema.TypeString, }, }, - "category": { - Type: schema.TypeString, - Computed: true, - }, "parent_database_id": { Type: schema.TypeString, Computed: true, @@ -263,7 +230,7 @@ func resourceNutanixNDBScaleDatabaseCreate(ctx context.Context, d *schema.Resour } d.SetId(resp.Operationid) - log.Printf("NDB database scale with %s id created successfully", d.Id()) + log.Printf("NDB database with %s id is scaled successfully", d.Id()) return resourceNutanixNDBScaleDatabaseRead(ctx, d, meta) } diff --git a/nutanix/resource_nutanix_ndb_database_scale_test.go b/nutanix/resource_nutanix_ndb_database_scale_test.go index 8e9288f1..984baaeb 100644 --- a/nutanix/resource_nutanix_ndb_database_scale_test.go +++ b/nutanix/resource_nutanix_ndb_database_scale_test.go @@ -20,7 +20,6 @@ func TestAccEra_Scalebasic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceNameScaleDB, "application_type", "postgres_database"), resource.TestCheckResourceAttr(resourceNameScaleDB, "data_storage_size", storageSize), - resource.TestCheckResourceAttr(resourceNameScaleDB, "metadata.#", "1"), resource.TestCheckResourceAttrSet(resourceNameScaleDB, "name"), resource.TestCheckResourceAttrSet(resourceNameScaleDB, "description"), ), diff --git a/nutanix/resource_nutanix_ndb_database_snapshot.go b/nutanix/resource_nutanix_ndb_database_snapshot.go index 2aa4c7a7..e3cabc60 100644 --- a/nutanix/resource_nutanix_ndb_database_snapshot.go +++ b/nutanix/resource_nutanix_ndb_database_snapshot.go @@ -17,6 +17,13 @@ func resourceNutanixNDBDatabaseSnapshot() *schema.Resource { ReadContext: resourceNutanixNDBDatabaseSnapshotRead, UpdateContext: resourceNutanixNDBDatabaseSnapshotUpdate, DeleteContext: resourceNutanixNDBDatabaseSnapshotDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraProvisionTimeout), + Delete: schema.DefaultTimeout(EraProvisionTimeout), + }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, Schema: map[string]*schema.Schema{ "time_machine_id": { Type: schema.TypeString, @@ -57,10 +64,6 @@ func resourceNutanixNDBDatabaseSnapshot() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -114,64 +117,6 @@ func resourceNutanixNDBDatabaseSnapshot() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "metadata": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "secure_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "info": dataSourceEraDatabaseInfo(), - "deregister_info": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "from_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "to_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "replication_retry_count": { - Type: schema.TypeInt, - Computed: true, - }, - "last_replication_retyr_source_snapshot_id": { - Type: schema.TypeString, - Computed: true, - }, - "async": { - Type: schema.TypeBool, - Computed: true, - }, - "stand_by": { - Type: schema.TypeBool, - Computed: true, - }, - "curation_retry_count": { - Type: schema.TypeInt, - Computed: true, - }, - "operations_using_snapshot": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, "software_snapshot_id": { Type: schema.TypeString, Computed: true, @@ -184,11 +129,11 @@ func resourceNutanixNDBDatabaseSnapshot() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "santised": { + "santized": { Type: schema.TypeBool, Computed: true, }, - "santised_from_snapshot_id": { + "santized_from_snapshot_id": { Type: schema.TypeString, Computed: true, }, @@ -235,7 +180,7 @@ func resourceNutanixNDBDatabaseSnapshot() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "santised_snapshots": { + "santized_snapshots": { Type: schema.TypeString, Computed: true, }, @@ -361,7 +306,7 @@ func resourceNutanixNDBDatabaseSnapshotCreate(ctx context.Context, d *schema.Res } } d.SetId(uniqueID) - log.Printf("NDB database snapshot with %s id created successfully", d.Id()) + log.Printf("NDB database snapshot with %s id is created successfully", d.Id()) return resourceNutanixNDBDatabaseSnapshotRead(ctx, d, meta) } @@ -378,10 +323,6 @@ func resourceNutanixNDBDatabaseSnapshotRead(ctx context.Context, d *schema.Resou return diag.FromErr(err) } - if err := d.Set("owner_id", resp.OwnerID); err != nil { - return diag.FromErr(err) - } - if err := d.Set("description", resp.Description); err != nil { return diag.FromErr(err) } @@ -457,11 +398,11 @@ func resourceNutanixNDBDatabaseSnapshotRead(ctx context.Context, d *schema.Resou return diag.FromErr(err) } - // if err := d.Set("santised", resp.Sanitized); err != nil { - // return diag.FromErr(err) - // } + if err := d.Set("santized", resp.Sanitized); err != nil { + return diag.FromErr(err) + } - if err := d.Set("santised_from_snapshot_id", resp.SanitisedFromSnapshotID); err != nil { + if err := d.Set("santized_from_snapshot_id", resp.SanitizedFromSnapshotID); err != nil { return diag.FromErr(err) } @@ -505,7 +446,7 @@ func resourceNutanixNDBDatabaseSnapshotRead(ctx context.Context, d *schema.Resou return diag.FromErr(err) } - if err := d.Set("santised_snapshots", resp.SanitisedSnapshots); err != nil { + if err := d.Set("santized_snapshots", resp.SanitizedSnapshots); err != nil { return diag.FromErr(err) } @@ -529,9 +470,6 @@ func resourceNutanixNDBDatabaseSnapshotRead(ctx context.Context, d *schema.Resou return diag.FromErr(err) } - if err := d.Set("metadata", flattenClonedMetadata(resp.Metadata)); err != nil { - return diag.FromErr(err) - } return nil } @@ -560,7 +498,7 @@ func resourceNutanixNDBDatabaseSnapshotUpdate(ctx context.Context, d *schema.Res } } - log.Printf("NDB database snapshot with %s id updated successfully", d.Id()) + log.Printf("NDB database snapshot with %s id is updated successfully", d.Id()) return resourceNutanixNDBDatabaseSnapshotRead(ctx, d, meta) } @@ -585,14 +523,15 @@ func resourceNutanixNDBDatabaseSnapshotDelete(ctx context.Context, d *schema.Res Pending: []string{"PENDING"}, Target: []string{"COMPLETED", "FAILED"}, Refresh: eraRefresh(ctx, conn, opReq), - Timeout: d.Timeout(schema.TimeoutCreate), + Timeout: d.Timeout(schema.TimeoutDelete), Delay: eraDelay, } if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { - return diag.Errorf("error waiting for snapshot (%s) to delete: %s", resp.Entityid, errWaitTask) + return diag.Errorf("error waiting for snapshot (%s) to delete: %s", resp.Entityid, errWaitTask) } + log.Printf("NDB database snapshot with %s id is deleted successfully", d.Id()) d.SetId("") return nil } diff --git a/nutanix/resource_nutanix_ndb_database_snapshot_test.go b/nutanix/resource_nutanix_ndb_database_snapshot_test.go index e804719b..e76dbbaa 100644 --- a/nutanix/resource_nutanix_ndb_database_snapshot_test.go +++ b/nutanix/resource_nutanix_ndb_database_snapshot_test.go @@ -21,7 +21,6 @@ func TestAccEra_Snapshotbasic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceNameSnapshotDB, "name", name), resource.TestCheckResourceAttr(resourceNameSnapshotDB, "remove_schedule_in_days", removalIndays), - resource.TestCheckResourceAttr(resourceNameSnapshotDB, "metadata.#", "1"), resource.TestCheckResourceAttr(resourceNameSnapshotDB, "database_snapshot", "false"), ), }, @@ -41,7 +40,6 @@ func TestAccEra_Snapshot_ReplicateToClusters(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceNameSnapshotDB, "name", name), resource.TestCheckResourceAttr(resourceNameSnapshotDB, "remove_schedule_in_days", removalIndays), - resource.TestCheckResourceAttr(resourceNameSnapshotDB, "metadata.#", "1"), resource.TestCheckResourceAttr(resourceNameSnapshotDB, "database_snapshot", "false"), resource.TestCheckResourceAttr(resourceNameSnapshotDB, "replicate_to_clusters.#", "2"), ), diff --git a/nutanix/resource_nutanix_ndb_dbservervm.go b/nutanix/resource_nutanix_ndb_dbservervm.go new file mode 100644 index 00000000..728ac843 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_dbservervm.go @@ -0,0 +1,607 @@ +package nutanix + +import ( + "context" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +var ( + EraDBProvisionTimeout = 30 * time.Minute +) + +func resourceNutanixNDBServerVM() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBServerVMCreate, + ReadContext: resourceNutanixNDBServerVMRead, + UpdateContext: resourceNutanixNDBServerVMUpdate, + DeleteContext: resourceNutanixNDBServerVMDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraDBProvisionTimeout), + Delete: schema.DefaultTimeout(EraDBProvisionTimeout), + }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + Schema: map[string]*schema.Schema{ + "database_type": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "software_profile_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"time_machine_id"}, + RequiredWith: []string{"software_profile_version_id"}, + }, + "software_profile_version_id": { + Type: schema.TypeString, + Optional: true, + }, + "time_machine_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"software_profile_id"}, + }, + "snapshot_id": { + Type: schema.TypeString, + Optional: true, + }, + "timezone": { + Type: schema.TypeString, + Optional: true, + }, + "network_profile_id": { + Type: schema.TypeString, + Required: true, + }, + "compute_profile_id": { + Type: schema.TypeString, + Required: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Required: true, + }, + "vm_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + "latest_snapshot": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "postgres_database": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vm_name": { + Type: schema.TypeString, + Required: true, + }, + "client_public_key": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "credentials": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + }, + "password": { + Type: schema.TypeString, + Required: true, + }, + "label": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "maintenance_tasks": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "maintenance_window_id": { + Type: schema.TypeString, + Optional: true, + }, + "tasks": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "task_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"OS_PATCHING", "DB_PATCHING"}, false), + }, + "pre_command": { + Type: schema.TypeString, + Optional: true, + }, + "post_command": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + // delete arguments for database server vm + "delete": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "remove": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "soft_remove": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "delete_vgs": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "delete_vm_snapshots": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + // computed + "name": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + + "value": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + }, + }, + }, + "tags": dataSourceEraDBInstanceTags(), + "dbserver_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "vm_cluster_name": { + Type: schema.TypeString, + Computed: true, + }, + "vm_cluster_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "fqdns": { + Type: schema.TypeString, + Computed: true, + }, + "mac_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "client_id": { + Type: schema.TypeString, + Computed: true, + }, + "era_drive_id": { + Type: schema.TypeString, + Computed: true, + }, + "era_version": { + Type: schema.TypeString, + Computed: true, + }, + "vm_timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceNutanixNDBServerVMCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.DBServerInputRequest{} + + // build request for dbServerVMs + if err := buildDBServerVMRequest(d, req); err != nil { + return diag.FromErr(err) + } + + // api to create request + + resp, err := conn.Service.CreateDBServerVM(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(resp.Entityid) + + // Get Operation ID from response of Response and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for db Server VM (%s) to create: %s", resp.Entityid, errWaitTask) + } + log.Printf("NDB database Server VM with %s id is created successfully", d.Id()) + return resourceNutanixNDBServerVMRead(ctx, d, meta) +} + +func resourceNutanixNDBServerVMRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ReadDBServerVM(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if err = d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + + props := []interface{}{} + for _, prop := range resp.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + if err := d.Set("properties", props); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("dbserver_cluster_id", resp.DbserverClusterID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("vm_cluster_name", resp.VMClusterName); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("vm_cluster_uuid", resp.VMClusterUUID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("ip_addresses", utils.StringValueSlice(resp.IPAddresses)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("fqdns", resp.Fqdns); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("mac_addresses", utils.StringValueSlice(resp.MacAddresses)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("client_id", resp.ClientID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("era_drive_id", resp.EraDriveID); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("era_version", resp.EraVersion); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("vm_timezone", resp.VMTimeZone); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func resourceNutanixNDBServerVMUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.UpdateDBServerVMRequest{} + + // setting default values + req.ResetName = utils.BoolPtr(false) + req.ResetDescription = utils.BoolPtr(false) + req.ResetCredential = utils.BoolPtr(false) + req.ResetTags = utils.BoolPtr(false) + + if d.HasChange("description") { + req.Description = utils.StringPtr(d.Get("description").(string)) + req.ResetDescription = utils.BoolPtr(true) + } + + if d.HasChange("postgres_database") { + ps := d.Get("postgres_database").([]interface{})[0].(map[string]interface{}) + + vmName := ps["vm_name"] + req.Name = utils.StringPtr(vmName.(string)) + req.ResetName = utils.BoolPtr(true) + } + + if d.HasChange("tags") { + req.Tags = expandTags(d.Get("tags").([]interface{})) + req.ResetTags = utils.BoolPtr(true) + } + + if d.HasChange("credential") { + req.ResetCredential = utils.BoolPtr(true) + + creds := d.Get("credentials") + credList := creds.([]interface{}) + + credArgs := []*era.VMCredentials{} + + for _, v := range credList { + val := v.(map[string]interface{}) + cred := &era.VMCredentials{} + if username, ok := val["username"]; ok { + cred.Username = utils.StringPtr(username.(string)) + } + + if pass, ok := val["password"]; ok { + cred.Password = utils.StringPtr(pass.(string)) + } + + if label, ok := val["label"]; ok { + cred.Label = utils.StringPtr(label.(string)) + } + + credArgs = append(credArgs, cred) + } + req.Credentials = credArgs + } + + resp, err := conn.Service.UpdateDBServerVM(ctx, req, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + if err = d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + } + + log.Printf("NDB database with %s id updated successfully", d.Id()) + return nil +} + +func resourceNutanixNDBServerVMDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.DeleteDBServerVMRequest{} + if delete, ok := d.GetOk("delete"); ok { + req.Delete = delete.(bool) + } + if remove, ok := d.GetOk("remove"); ok { + req.Remove = remove.(bool) + } + if softremove, ok := d.GetOk("soft_remove"); ok { + req.SoftRemove = softremove.(bool) + } + if deleteVgs, ok := d.GetOk("delete_vgs"); ok { + req.DeleteVgs = deleteVgs.(bool) + } + if deleteVMSnaps, ok := d.GetOk("delete_vm_snapshots"); ok { + req.DeleteVMSnapshots = deleteVMSnaps.(bool) + } + + res, err := conn.Service.DeleteDBServerVM(ctx, req, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + log.Printf("Operation to delete dbserver vm with id %s has started, operation id: %s", d.Id(), res.Operationid) + opID := res.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Cluster GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for db server VM (%s) to delete: %s", res.Entityid, errWaitTask) + } + log.Printf("NDB database Server VM with %s id is deleted successfully", d.Id()) + return nil +} + +func buildDBServerVMRequest(d *schema.ResourceData, res *era.DBServerInputRequest) error { + if dbType, ok := d.GetOk("database_type"); ok { + res.DatabaseType = utils.StringPtr(dbType.(string)) + } + + if softwareProfile, ok := d.GetOk("software_profile_id"); ok { + res.SoftwareProfileID = utils.StringPtr(softwareProfile.(string)) + } + + if softwareVersion, ok := d.GetOk("software_profile_version_id"); ok { + res.SoftwareProfileVersionID = utils.StringPtr(softwareVersion.(string)) + } + + if LatestSnapshot, ok := d.GetOk("latest_snapshot"); ok { + res.LatestSnapshot = LatestSnapshot.(bool) + } + + if timeMachine, ok := d.GetOk("time_machine_id"); ok { + res.TimeMachineID = utils.StringPtr(timeMachine.(string)) + + // if snapshot id is provided + if snapshotid, ok := d.GetOk("snapshot_id"); ok { + res.SnapshotID = utils.StringPtr(snapshotid.(string)) + res.LatestSnapshot = false + } else { + res.LatestSnapshot = true + } + } + + if NetworkProfile, ok := d.GetOk("network_profile_id"); ok { + res.NetworkProfileID = utils.StringPtr(NetworkProfile.(string)) + } + + if ComputeProfile, ok := d.GetOk("compute_profile_id"); ok { + res.ComputeProfileID = utils.StringPtr(ComputeProfile.(string)) + } + + if ClusterID, ok := d.GetOk("nx_cluster_id"); ok { + res.NxClusterID = utils.StringPtr(ClusterID.(string)) + } + + if VMPass, ok := d.GetOk("vm_password"); ok { + res.VMPassword = utils.StringPtr(VMPass.(string)) + } + + if desc, ok := d.GetOk("description"); ok { + res.Description = utils.StringPtr(desc.(string)) + } + + if postgresDatabase, ok := d.GetOk("postgres_database"); ok && len(postgresDatabase.([]interface{})) > 0 { + res.ActionArguments = expandDBServerPostgresInput(postgresDatabase.([]interface{})) + } + + if maintenance, ok := d.GetOk("maintenance_tasks"); ok { + res.MaintenanceTasks = expandMaintenanceTasks(maintenance.([]interface{})) + } + return nil +} + +func expandDBServerPostgresInput(pr []interface{}) []*era.Actionarguments { + if len(pr) > 0 { + args := make([]*era.Actionarguments, 0) + + for _, v := range pr { + val := v.(map[string]interface{}) + + if vmName, ok := val["vm_name"]; ok { + args = append(args, &era.Actionarguments{ + Name: "vm_name", + Value: vmName, + }) + } + if clientKey, ok := val["client_public_key"]; ok && len(clientKey.(string)) > 0 { + args = append(args, &era.Actionarguments{ + Name: "client_public_key", + Value: clientKey, + }) + } + } + return args + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_dbservervm_register.go b/nutanix/resource_nutanix_ndb_dbservervm_register.go new file mode 100644 index 00000000..299ef3b2 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_dbservervm_register.go @@ -0,0 +1,447 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBRegisterDBServer() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBRegisterDBServerCreate, + ReadContext: resourceNutanixNDBRegisterDBServerRead, + UpdateContext: resourceNutanixNDBRegisterDBServerUpdate, + DeleteContext: resourceNutanixNDBRegisterDBServerDelete, + Schema: map[string]*schema.Schema{ + "database_type": { + Type: schema.TypeString, + Required: true, + }, + "vm_ip": { + Type: schema.TypeString, + Required: true, + }, + "nxcluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "working_directory": { + Type: schema.TypeString, + Optional: true, + Default: "/tmp", + }, + "username": { + Type: schema.TypeString, + Optional: true, + }, + "password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + "ssh_key": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + "forced_install": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "postgres_database": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "listener_port": { + Type: schema.TypeString, + Optional: true, + }, + "postgres_software_home": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "credentials": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + }, + "password": { + Type: schema.TypeString, + Required: true, + }, + "label": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "update_name_description_in_cluster": { + Type: schema.TypeBool, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + // delete values + "delete": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "remove": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "soft_remove": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "delete_vgs": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "delete_vm_snapshots": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + // computed + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + + "value": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + }, + }, + }, + "tags": dataSourceEraDBInstanceTags(), + "era_created": { + Type: schema.TypeBool, + Computed: true, + }, + "internal": { + Type: schema.TypeBool, + Computed: true, + }, + "dbserver_cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "vm_cluster_name": { + Type: schema.TypeString, + Computed: true, + }, + "vm_cluster_uuid": { + Type: schema.TypeString, + Computed: true, + }, + "ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "fqdns": { + Type: schema.TypeString, + Computed: true, + }, + "mac_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "client_id": { + Type: schema.TypeString, + Computed: true, + }, + "era_drive_id": { + Type: schema.TypeString, + Computed: true, + }, + "era_version": { + Type: schema.TypeString, + Computed: true, + }, + "vm_timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceNutanixNDBRegisterDBServerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.DBServerRegisterInput{} + + // build request for dbServerVMs + if err := buildRegisterDBServerVMRequest(d, req); err != nil { + return diag.FromErr(err) + } + + // api to register dbserver + resp, err := conn.Service.RegisterDBServerVM(ctx, req) + if err != nil { + return diag.FromErr(err) + } + d.SetId(resp.Entityid) + + // Get Operation ID from response of Response and poll for the operation to get completed. + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Operation GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for db Server VM (%s) to register: %s", resp.Entityid, errWaitTask) + } + log.Printf("NDB database Server VM with %s id is registered successfully", d.Id()) + return resourceNutanixNDBRegisterDBServerRead(ctx, d, meta) +} +func resourceNutanixNDBRegisterDBServerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return resourceNutanixNDBServerVMRead(ctx, d, meta) +} +func resourceNutanixNDBRegisterDBServerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.UpdateDBServerVMRequest{} + + // default for update request + req.ResetName = utils.BoolPtr(false) + req.ResetDescription = utils.BoolPtr(false) + req.ResetCredential = utils.BoolPtr(false) + req.ResetTags = utils.BoolPtr(true) + req.ResetDescriptionInNxCluster = utils.BoolPtr(false) + req.ResetNameInNxCluster = utils.BoolPtr(false) + + if d.HasChange("name") { + req.Name = utils.StringPtr(d.Get("name").(string)) + req.ResetName = utils.BoolPtr(true) + } + + if d.HasChange("description") { + req.Description = utils.StringPtr(d.Get("description").(string)) + req.ResetDescription = utils.BoolPtr(true) + } + + if _, ok := d.GetOkExists("update_name_description_in_cluster"); ok { + req.ResetDescriptionInNxCluster = utils.BoolPtr(true) + req.ResetNameInNxCluster = utils.BoolPtr(true) + } + + if d.HasChange("credential") { + req.ResetCredential = utils.BoolPtr(true) + + creds := d.Get("credentials") + credList := creds.([]interface{}) + + credArgs := []*era.VMCredentials{} + + for _, v := range credList { + val := v.(map[string]interface{}) + cred := &era.VMCredentials{} + if username, ok := val["username"]; ok { + cred.Username = utils.StringPtr(username.(string)) + } + + if pass, ok := val["password"]; ok { + cred.Password = utils.StringPtr(pass.(string)) + } + + if label, ok := val["label"]; ok { + cred.Label = utils.StringPtr(label.(string)) + } + + credArgs = append(credArgs, cred) + } + req.Credentials = credArgs + } + + resp, err := conn.Service.UpdateDBServerVM(ctx, req, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + if err = d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + } + log.Printf("NDB database Server VM with %s id is updated successfully", d.Id()) + return resourceNutanixNDBRegisterDBServerRead(ctx, d, meta) +} +func resourceNutanixNDBRegisterDBServerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.DeleteDBServerVMRequest{} + if delete, ok := d.GetOk("delete"); ok { + req.Delete = delete.(bool) + } + if remove, ok := d.GetOk("remove"); ok { + req.Remove = remove.(bool) + } + if softremove, ok := d.GetOk("soft_remove"); ok { + req.SoftRemove = softremove.(bool) + } + if deleteVgs, ok := d.GetOk("delete_vgs"); ok { + req.DeleteVgs = deleteVgs.(bool) + } + if deleteVMSnaps, ok := d.GetOk("delete_vm_snapshots"); ok { + req.DeleteVMSnapshots = deleteVMSnaps.(bool) + } + + resp, err := conn.Service.DeleteDBServerVM(ctx, req, d.Id()) + if err != nil { + return diag.FromErr(err) + } + log.Printf("Operation to delete dbserver vm with id %s has started, operation id: %s", d.Id(), resp.Operationid) + opID := resp.Operationid + if opID == "" { + return diag.Errorf("error: operation ID is an empty string") + } + opReq := era.GetOperationRequest{ + OperationID: opID, + } + + log.Printf("polling for operation with id: %s\n", opID) + + // Poll for operation here - Cluster GET Call + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"COMPLETED", "FAILED"}, + Refresh: eraRefresh(ctx, conn, opReq), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: eraDelay, + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for db server VM (%s) to delete: %s", resp.Entityid, errWaitTask) + } + log.Printf("NDB database Server VM with %s id is deleted successfully", d.Id()) + return nil +} + +func buildRegisterDBServerVMRequest(d *schema.ResourceData, req *era.DBServerRegisterInput) error { + if dbType, ok := d.GetOk("database_type"); ok { + req.DatabaseType = utils.StringPtr(dbType.(string)) + } + + if vmip, ok := d.GetOk("vm_ip"); ok { + req.VMIP = utils.StringPtr(vmip.(string)) + } + + if nxcls, ok := d.GetOk("nxcluster_id"); ok { + req.NxClusterUUID = utils.StringPtr(nxcls.(string)) + } + if user, ok := d.GetOk("username"); ok { + req.Username = utils.StringPtr(user.(string)) + } + if pass, ok := d.GetOk("password"); ok { + req.Password = utils.StringPtr(pass.(string)) + } + if sshkey, ok := d.GetOk("ssh_key"); ok { + req.SSHPrivateKey = utils.StringPtr(sshkey.(string)) + } + if workd, ok := d.GetOk("working_directory"); ok { + req.WorkingDirectory = utils.StringPtr(workd.(string)) + } + if forcedIns, ok := d.GetOk("forced_install"); ok { + req.ForcedInstall = utils.BoolPtr(forcedIns.(bool)) + } + if postgresType, ok := d.GetOk("postgres_database"); ok { + req.ActionArguments = expandPsRegisterDBServer(postgresType.([]interface{})) + } + return nil +} + +func expandPsRegisterDBServer(ps []interface{}) []*era.Actionarguments { + if len(ps) > 0 { + args := make([]*era.Actionarguments, 0) + + for _, v := range ps { + val := v.(map[string]interface{}) + + if listnerPort, ok := val["listener_port"]; ok { + args = append(args, &era.Actionarguments{ + Name: "listener_port", + Value: listnerPort, + }) + } + if psHome, ok := val["postgres_software_home"]; ok { + args = append(args, &era.Actionarguments{ + Name: "postgres_software_home", + Value: psHome, + }) + } + } + return args + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_dbservervm_test.go b/nutanix/resource_nutanix_ndb_dbservervm_test.go new file mode 100644 index 00000000..b0e58cc0 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_dbservervm_test.go @@ -0,0 +1,149 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameDBServer = "nutanix_ndb_dbserver_vm.acctest-managed" + +func TestAccEra_DBServerVMbasic(t *testing.T) { + r := randIntBetween(21, 30) + name := fmt.Sprintf("test-dbserver-%d", r) + desc := "this is desc" + sshKey := testVars.SSHKey + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseServerConfig(name, desc, sshKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameDBServer, "name", name), + resource.TestCheckResourceAttr(resourceNameDBServer, "description", desc), + resource.TestCheckResourceAttr(resourceNameDBServer, "status", "UP"), + resource.TestCheckResourceAttr(resourceNameDBServer, "type", "DBSERVER"), + resource.TestCheckResourceAttrSet(resourceNameDBServer, "properties.#"), + ), + }, + }, + }) +} + +func TestAccEra_DBServerVMbasicWithTimeMachine(t *testing.T) { + r := randIntBetween(31, 40) + name := fmt.Sprintf("test-dbserver-%d", r) + desc := "this is desc" + sshKey := testVars.SSHKey + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraDatabaseServerTMSConfig(name, desc, sshKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameDBServer, "name", name), + resource.TestCheckResourceAttr(resourceNameDBServer, "description", desc), + resource.TestCheckResourceAttr(resourceNameDBServer, "status", "UP"), + resource.TestCheckResourceAttr(resourceNameDBServer, "type", "DBSERVER"), + resource.TestCheckResourceAttrSet(resourceNameDBServer, "properties.#"), + ), + }, + }, + }) +} + +func testAccEraDatabaseServerConfig(name, desc, sshKey string) string { + return fmt.Sprintf(` + data "nutanix_ndb_profiles" "p"{ + } + data "nutanix_ndb_slas" "slas"{} + data "nutanix_ndb_clusters" "clusters"{} + + locals { + profiles_by_type = { + for p in data.nutanix_ndb_profiles.p.profiles : p.type => p... + } + storage_profiles = { + for p in local.profiles_by_type.Storage: p.name => p + } + compute_profiles = { + for p in local.profiles_by_type.Compute: p.name => p + } + network_profiles = { + for p in local.profiles_by_type.Network: p.name => p + } + software_profiles = { + for p in local.profiles_by_type.Software: p.name => p + } + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + + resource nutanix_ndb_dbserver_vm acctest-managed { + database_type = "postgres_database" + software_profile_id = local.software_profiles["POSTGRES_10.4_OOB"].id + software_profile_version_id = local.software_profiles["POSTGRES_10.4_OOB"].latest_version_id + compute_profile_id = local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + network_profile_id = local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + nx_cluster_id = local.clusters.EraCluster.id + vm_password = "pass" + postgres_database { + vm_name = "%[1]s" + client_public_key = "%[3]s" + } + description = "%[2]s" + + } + `, name, desc, sshKey) +} + +func testAccEraDatabaseServerTMSConfig(name, desc, sshKey string) string { + return fmt.Sprintf(` + data "nutanix_ndb_profiles" "p"{ + } + data "nutanix_ndb_slas" "slas"{} + data "nutanix_ndb_clusters" "clusters"{} + + locals { + profiles_by_type = { + for p in data.nutanix_ndb_profiles.p.profiles : p.type => p... + } + storage_profiles = { + for p in local.profiles_by_type.Storage: p.name => p + } + compute_profiles = { + for p in local.profiles_by_type.Compute: p.name => p + } + network_profiles = { + for p in local.profiles_by_type.Network: p.name => p + } + software_profiles = { + for p in local.profiles_by_type.Software: p.name => p + } + clusters = { + for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p + } + } + + data "nutanix_ndb_time_machines" "test1" {} + + resource nutanix_ndb_dbserver_vm acctest-managed { + database_type = "postgres_database" + time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id + compute_profile_id = local.compute_profiles["DEFAULT_OOB_SMALL_COMPUTE"].id + network_profile_id = local.network_profiles.DEFAULT_OOB_POSTGRESQL_NETWORK.id + nx_cluster_id = local.clusters.EraCluster.id + vm_password = "pass" + postgres_database { + vm_name = "%[1]s" + client_public_key = "%[3]s" + } + description = "%[2]s" + + } + `, name, desc, sshKey) +} diff --git a/nutanix/resource_nutanix_ndb_linked_databases.go b/nutanix/resource_nutanix_ndb_linked_databases.go index 4c910480..8c2ac199 100644 --- a/nutanix/resource_nutanix_ndb_linked_databases.go +++ b/nutanix/resource_nutanix_ndb_linked_databases.go @@ -17,6 +17,10 @@ func resourceNutanixNDBLinkedDB() *schema.Resource { ReadContext: resourceNutanixNDBLinkedDBRead, UpdateContext: resourceNutanixNDBLinkedDBUpdate, DeleteContext: resourceNutanixNDBLinkedDBDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraProvisionTimeout), + Delete: schema.DefaultTimeout(EraProvisionTimeout), + }, Schema: map[string]*schema.Schema{ "database_id": { Type: schema.TypeString, @@ -57,10 +61,6 @@ func resourceNutanixNDBLinkedDB() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "date_created": { Type: schema.TypeString, Computed: true, @@ -100,13 +100,6 @@ func resourceNutanixNDBLinkedDB() *schema.Resource { }, }, }, - "metadata": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, "metric": { Type: schema.TypeMap, Computed: true, @@ -194,7 +187,7 @@ func resourceNutanixNDBLinkedDBCreate(ctx context.Context, d *schema.ResourceDat } d.SetId(SetID) - log.Printf("NDB linked databases with %s id created successfully", d.Id()) + log.Printf("NDB linked database with %s id is created successfully", d.Id()) return resourceNutanixNDBLinkedDBRead(ctx, d, meta) } @@ -236,18 +229,12 @@ func resourceNutanixNDBLinkedDBRead(ctx context.Context, d *schema.ResourceData, if err := d.Set("info", flattenLinkedDBInfo(currentLinkedDB.Info)); err != nil { return diag.FromErr(err) } - if err := d.Set("metadata", currentLinkedDB.Metadata); err != nil { - return diag.FromErr(err) - } if err := d.Set("metric", currentLinkedDB.Metric); err != nil { return diag.FromErr(err) } if err := d.Set("name", currentLinkedDB.Name); err != nil { return diag.FromErr(err) } - if err := d.Set("owner_id", currentLinkedDB.Ownerid); err != nil { - return diag.FromErr(err) - } if err := d.Set("parent_database_id", currentLinkedDB.ParentDatabaseID); err != nil { return diag.FromErr(err) } @@ -304,14 +291,14 @@ func resourceNutanixNDBLinkedDBDelete(ctx context.Context, d *schema.ResourceDat Pending: []string{"PENDING"}, Target: []string{"COMPLETED", "FAILED"}, Refresh: eraRefresh(ctx, conn, opReq), - Timeout: d.Timeout(schema.TimeoutCreate), + Timeout: d.Timeout(schema.TimeoutDelete), Delay: eraDelay, } if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for linked db (%s) to delete: %s", d.Id(), errWaitTask) } - log.Printf("NDB linked databases with %s id deleted successfully", d.Id()) + log.Printf("NDB linked database with %s id is deleted successfully", d.Id()) return nil } diff --git a/nutanix/resource_nutanix_ndb_linked_databases_test.go b/nutanix/resource_nutanix_ndb_linked_databases_test.go index c09ad6bc..2955aa07 100644 --- a/nutanix/resource_nutanix_ndb_linked_databases_test.go +++ b/nutanix/resource_nutanix_ndb_linked_databases_test.go @@ -21,7 +21,7 @@ func TestAccEraLinkedDB_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceNameLinkedDB, "name", name), resource.TestCheckResourceAttrSet(resourceNameLinkedDB, "id"), resource.TestCheckResourceAttrSet(resourceNameLinkedDB, "status"), - resource.TestCheckResourceAttrSet(resourceNameLinkedDB, "owner_id"), + resource.TestCheckResourceAttrSet(resourceNameLinkedDB, "parent_database_id"), ), }, }, diff --git a/nutanix/resource_nutanix_ndb_log_catchups.go b/nutanix/resource_nutanix_ndb_log_catchups.go index ff815bd6..fe2ab0ca 100644 --- a/nutanix/resource_nutanix_ndb_log_catchups.go +++ b/nutanix/resource_nutanix_ndb_log_catchups.go @@ -16,6 +16,9 @@ func resourceNutanixNDBLogCatchUps() *schema.Resource { ReadContext: resourceNutanixNDBLogCatchUpsRead, UpdateContext: resourceNutanixNDBLogCatchUpsUpdate, DeleteContext: resourceNutanixNDBLogCatchUpsDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraProvisionTimeout), + }, Schema: map[string]*schema.Schema{ "time_machine_id": { Type: schema.TypeString, @@ -121,7 +124,7 @@ func resourceNutanixNDBLogCatchUpsCreate(ctx context.Context, d *schema.Resource return diag.Errorf("error waiting to perform log-catchups (%s) to create: %s", resp.Entityid, errWaitTask) } d.SetId(resp.Operationid) - log.Printf("NDB log catchup with %s id created successfully", d.Id()) + log.Printf("NDB log catchup with %s id is performed successfully", d.Id()) return nil } diff --git a/nutanix/resource_nutanix_ndb_log_catchups_test.go b/nutanix/resource_nutanix_ndb_log_catchups_test.go index 9ee93b7d..24f4e6a2 100644 --- a/nutanix/resource_nutanix_ndb_log_catchups_test.go +++ b/nutanix/resource_nutanix_ndb_log_catchups_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) -const resourceNameLogCatchDB = "nutanix_ndb_database_log_catchup.acctest-managed" +const resourceNameLogCatchDB = "nutanix_ndb_log_catchups.acctest-managed" func TestAccEra_LogCatchUpbasic(t *testing.T) { resource.Test(t, resource.TestCase{ @@ -16,8 +16,6 @@ func TestAccEra_LogCatchUpbasic(t *testing.T) { { Config: testAccEraDatabaseLogCatchUpConfig(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceNameLogCatchDB, "log_catchup_version", ""), - resource.TestCheckResourceAttr(resourceNameLogCatchDB, "database_id", ""), resource.TestCheckResourceAttrSet(resourceNameLogCatchDB, "time_machine_id"), ), }, @@ -29,7 +27,7 @@ func testAccEraDatabaseLogCatchUpConfig() string { return (` data "nutanix_ndb_time_machines" "test1" {} - resource "nutanix_ndb_log_catchups" "name" { + resource "nutanix_ndb_log_catchups" "acctest-managed" { time_machine_id = data.nutanix_ndb_time_machines.test1.time_machines.0.id } `) diff --git a/nutanix/resource_nutanix_ndb_maintenance_task.go b/nutanix/resource_nutanix_ndb_maintenance_task.go new file mode 100644 index 00000000..2820b1b1 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_maintenance_task.go @@ -0,0 +1,159 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBMaintenanceTask() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBMaintenanceTaskCreate, + ReadContext: resourceNutanixNDBMaintenanceTaskRead, + UpdateContext: resourceNutanixNDBMaintenanceTaskUpdate, + DeleteContext: resourceNutanixNDBMaintenanceTaskDelete, + Schema: map[string]*schema.Schema{ + "dbserver_id": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "dbserver_cluster": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "maintenance_window_id": { + Type: schema.TypeString, + Required: true, + }, + "tasks": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "task_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"OS_PATCHING", "DB_PATCHING"}, false), + }, + "pre_command": { + Type: schema.TypeString, + Optional: true, + }, + "post_command": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + //computed + "entity_task_association": EntityTaskAssocSchema(), + }, + } +} + +func resourceNutanixNDBMaintenanceTaskCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.MaintenanceTasksInput{} + + entities := &era.MaintenanceEntities{} + if dbserver, ok := d.GetOk("dbserver_id"); ok { + st := dbserver.([]interface{}) + sublist := make([]*string, len(st)) + + for a := range st { + sublist[a] = utils.StringPtr(st[a].(string)) + } + entities.EraDBServer = sublist + } + if dbserverCls, ok := d.GetOk("dbserver_cluster"); ok { + st := dbserverCls.([]interface{}) + sublist := make([]*string, len(st)) + + for a := range st { + sublist[a] = utils.StringPtr(st[a].(string)) + } + entities.EraDBServerCluster = sublist + } + + req.Entities = entities + + if windowID, ok := d.GetOk("maintenance_window_id"); ok { + req.MaintenanceWindowID = utils.StringPtr(windowID.(string)) + } + + taskList := make([]*era.Tasks, 0) + if task, ok := d.GetOk("tasks"); ok { + tasks := task.([]interface{}) + + for _, v := range tasks { + out := &era.Tasks{} + value := v.(map[string]interface{}) + + if taskType, ok := value["task_type"]; ok { + out.TaskType = utils.StringPtr(taskType.(string)) + } + + payload := &era.Payload{} + prepostCommand := &era.PrePostCommand{} + if preCommand, ok := value["pre_command"]; ok { + prepostCommand.PreCommand = utils.StringPtr(preCommand.(string)) + } + if postCommand, ok := value["post_command"]; ok { + prepostCommand.PostCommand = utils.StringPtr(postCommand.(string)) + } + + payload.PrePostCommand = prepostCommand + out.Payload = payload + + taskList = append(taskList, out) + } + } + req.Tasks = taskList + + _, err := conn.Service.CreateMaintenanceTask(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("error generating UUID for ndb maintenance tasks: %+v", err) + } + d.SetId(uuid) + log.Printf("NDB maintenance task with %s id is performed", d.Id()) + return resourceNutanixNDBMaintenanceTaskRead(ctx, d, meta) +} +func resourceNutanixNDBMaintenanceTaskRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + maintenanceID := d.Get("maintenance_window_id") + resp, err := conn.Service.ReadMaintenanceWindow(ctx, maintenanceID.(string)) + if err != nil { + return diag.FromErr(err) + } + + d.Set("entity_task_association", flattenEntityTaskAssoc(resp.EntityTaskAssoc)) + + return nil +} + +func resourceNutanixNDBMaintenanceTaskUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return resourceNutanixNDBMaintenanceTaskCreate(ctx, d, meta) +} +func resourceNutanixNDBMaintenanceTaskDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} diff --git a/nutanix/resource_nutanix_ndb_maintenance_task_test.go b/nutanix/resource_nutanix_ndb_maintenance_task_test.go new file mode 100644 index 00000000..6d2585c9 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_maintenance_task_test.go @@ -0,0 +1,170 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceMaintenaceTaskName = "nutanix_ndb_maintenance_task.acctest-managed" + +func TestAccEra_MaintenanceTask(t *testing.T) { + name := "test-maintenance-acc" + desc := "this is desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraMaintenanceTask(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "name", name), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "description", desc), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.#"), + resource.TestCheckResourceAttr(resourceMaintenaceTaskName, "entity_task_association.0.entity_type", "ERA_DBSERVER"), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.1.task_type"), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.0.task_type"), + ), + }, + }, + }) +} + +func TestAccEra_MaintenanceTask_Update(t *testing.T) { + name := "test-maintenance-acc" + desc := "this is desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraMaintenanceTask(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "name", name), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "description", desc), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.#"), + resource.TestCheckResourceAttr(resourceMaintenaceTaskName, "entity_task_association.0.entity_type", "ERA_DBSERVER"), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.1.task_type"), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.0.task_type"), + ), + }, + { + Config: testAccEraMaintenanceTaskUpdate(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "name", name), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "description", desc), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.#"), + resource.TestCheckResourceAttr(resourceMaintenaceTaskName, "entity_task_association.#", "1"), + resource.TestCheckResourceAttr(resourceMaintenaceTaskName, "entity_task_association.0.entity_type", "ERA_DBSERVER"), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.0.task_type"), + ), + }, + }, + }) +} + +func TestAccEra_MaintenanceTask_UpdateWithNoTask(t *testing.T) { + name := "test-maintenance-acc" + desc := "this is desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraMaintenanceTask(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "name", name), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "description", desc), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.#"), + resource.TestCheckResourceAttr(resourceMaintenaceTaskName, "entity_task_association.0.entity_type", "ERA_DBSERVER"), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.1.task_type"), + resource.TestCheckResourceAttrSet(resourceMaintenaceTaskName, "entity_task_association.0.task_type"), + ), + }, + { + Config: testAccEraMaintenanceTaskUpdateWithNoTask(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "name", name), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "description", desc), + resource.TestCheckResourceAttr(resourceMaintenaceTaskName, "entity_task_association.#", "0"), + ), + }, + }, + }) +} + +func testAccEraMaintenanceTask(name, desc string) string { + return fmt.Sprintf(` + resource nutanix_ndb_maintenance_window acctest-managed{ + name = "%[1]s" + description = "%[2]s" + recurrence = "WEEKLY" + duration = 2 + day_of_week = "TUESDAY" + start_time = "17:04:47" + } + + data "nutanix_ndb_dbservers" "dbservers"{} + + resource nutanix_ndb_maintenance_task acctest-managed{ + dbserver_id = [ + data.nutanix_ndb_dbservers.dbservers.dbservers.0.id + ] + maintenance_window_id = resource.nutanix_ndb_maintenance_window.acctest-managed.id + tasks{ + task_type = "OS_PATCHING" + } + tasks { + task_type = "DB_PATCHING" + } + } + `, name, desc) +} + +func testAccEraMaintenanceTaskUpdate(name, desc string) string { + return fmt.Sprintf(` + resource nutanix_ndb_maintenance_window acctest-managed{ + name = "%[1]s" + description = "%[2]s" + recurrence = "WEEKLY" + duration = 2 + day_of_week = "TUESDAY" + start_time = "17:04:47" + } + + data "nutanix_ndb_dbservers" "dbservers"{} + + resource nutanix_ndb_maintenance_task acctest-managed{ + dbserver_id = [ + data.nutanix_ndb_dbservers.dbservers.dbservers.0.id + ] + maintenance_window_id = resource.nutanix_ndb_maintenance_window.acctest-managed.id + tasks { + task_type = "DB_PATCHING" + } + } + `, name, desc) +} + +func testAccEraMaintenanceTaskUpdateWithNoTask(name, desc string) string { + return fmt.Sprintf(` + resource nutanix_ndb_maintenance_window acctest-managed{ + name = "%[1]s" + description = "%[2]s" + recurrence = "WEEKLY" + duration = 2 + day_of_week = "TUESDAY" + start_time = "17:04:47" + } + + data "nutanix_ndb_dbservers" "dbservers"{} + + resource nutanix_ndb_maintenance_task acctest-managed{ + dbserver_id = [ + data.nutanix_ndb_dbservers.dbservers.dbservers.0.id + ] + maintenance_window_id = resource.nutanix_ndb_maintenance_window.acctest-managed.id + } + `, name, desc) +} diff --git a/nutanix/resource_nutanix_ndb_maintenance_window.go b/nutanix/resource_nutanix_ndb_maintenance_window.go new file mode 100644 index 00000000..5ce781b3 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_maintenance_window.go @@ -0,0 +1,406 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBMaintenanceWindow() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBMaintenanceWindowCreate, + ReadContext: resourceNutanixNDBMaintenanceWindowRead, + UpdateContext: resourceNutanixNDBMaintenanceWindowUpdate, + DeleteContext: resourceNutanixNDBMaintenanceWindowDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "Asia/Calcutta", + }, + + "recurrence": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"MONTHLY", "WEEKLY"}, false), + }, + "duration": { + Type: schema.TypeInt, + Optional: true, + Default: "2", + }, + "start_time": { + Type: schema.TypeString, + Required: true, + }, + "day_of_week": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", + "FRIDAY", "SATURDAY", "SUNDAY"}, false), + }, + "week_of_month": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntInSlice([]int{1, 2, 3, 4}), + }, + + // compute + + "schedule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "recurrence": { + Type: schema.TypeString, + Computed: true, + }, + "duration": { + Type: schema.TypeInt, + Computed: true, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + }, + "day_of_week": { + Type: schema.TypeString, + Computed: true, + }, + "week_of_month": { + Type: schema.TypeInt, + Computed: true, + }, + "threshold": { + Type: schema.TypeString, + Computed: true, + }, + "hour": { + Type: schema.TypeInt, + Computed: true, + }, + "minute": { + Type: schema.TypeInt, + Computed: true, + }, + "timezone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "access_level": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Description: "List of all the properties", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "tags": dataSourceEraDBInstanceTags(), + "status": { + Type: schema.TypeString, + Computed: true, + }, + "next_run_time": { + Type: schema.TypeString, + Computed: true, + }, + "entity_task_assoc": EntityTaskAssocSchema(), + }, + } +} + +func resourceNutanixNDBMaintenanceWindowCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.MaintenanceWindowInput{} + schedule := &era.MaintenaceSchedule{} + + if name, ok := d.GetOk("name"); ok { + req.Name = utils.StringPtr(name.(string)) + } + + if desc, ok := d.GetOk("description"); ok { + req.Description = utils.StringPtr(desc.(string)) + } + + if timezone, ok := d.GetOk("timezone"); ok { + req.Timezone = utils.StringPtr(timezone.(string)) + } + + if recurrence, ok := d.GetOk("recurrence"); ok { + schedule.Recurrence = utils.StringPtr(recurrence.(string)) + } + + if duration, ok := d.GetOk("duration"); ok { + schedule.Duration = utils.IntPtr(duration.(int)) + } + + if startTime, ok := d.GetOk("start_time"); ok { + schedule.StartTime = utils.StringPtr(startTime.(string)) + } + + if dayOfWeek, ok := d.GetOk("day_of_week"); ok && len(dayOfWeek.(string)) > 0 { + schedule.DayOfWeek = utils.StringPtr(dayOfWeek.(string)) + } + + if weekOfMonth, ok := d.GetOk("week_of_month"); ok { + schedule.WeekOfMonth = utils.IntPtr(weekOfMonth.(int)) + } + + req.Schedule = schedule + + resp, err := conn.Service.CreateMaintenanceWindow(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(*resp.ID) + log.Printf("NDB Maintenance Window with %s id is created successfully", d.Id()) + return resourceNutanixNDBMaintenanceWindowRead(ctx, d, meta) +} + +func resourceNutanixNDBMaintenanceWindowRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ReadMaintenanceWindow(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("owner_id", resp.OwnerID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("date_created", resp.DateCreated); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("date_modified", resp.DateModified); err != nil { + return diag.FromErr(err) + } + if err := d.Set("access_level", resp.AccessLevel); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("status", resp.Status); err != nil { + return diag.FromErr(err) + } + if err := d.Set("next_run_time", resp.NextRunTime); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("entity_task_assoc", flattenEntityTaskAssoc(resp.EntityTaskAssoc)); err != nil { + return diag.FromErr(err) + } + + props := []interface{}{} + for _, prop := range resp.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + }) + } + if err := d.Set("properties", props); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("tags", flattenDBTags(resp.Tags)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("schedule", flattenMaintenanceSchedule(resp.Schedule)); err != nil { + return diag.FromErr(err) + } + return nil +} + +func resourceNutanixNDBMaintenanceWindowUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.MaintenanceWindowInput{} + sch := &era.MaintenaceSchedule{} + + resp, err := conn.Service.ReadMaintenanceWindow(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + req.Name = resp.Name + req.Description = resp.Description + req.Timezone = resp.Timezone + + // read schedule info + + if resp.Schedule != nil { + sch.DayOfWeek = resp.Schedule.DayOfWeek + sch.Duration = resp.Schedule.Duration + sch.StartTime = resp.Schedule.StartTime + sch.Recurrence = resp.Schedule.Recurrence + sch.WeekOfMonth = resp.Schedule.WeekOfMonth + } + } + if d.HasChange("name") { + req.Name = utils.StringPtr(d.Get("name").(string)) + req.ResetName = utils.BoolPtr(true) + } + + if d.HasChange("description") { + req.Description = utils.StringPtr(d.Get("description").(string)) + req.ResetDescription = utils.BoolPtr(true) + } + + if d.HasChange("timezone") { + req.Timezone = utils.StringPtr(d.Get("timezone").(string)) + } + + if d.HasChange("recurrence") { + sch.Recurrence = utils.StringPtr(d.Get("recurrence").(string)) + } + + if d.HasChange("duration") { + sch.Duration = utils.IntPtr(d.Get("duration").(int)) + } + + if d.HasChange("start_time") { + sch.StartTime = utils.StringPtr(d.Get("start_time").(string)) + } + + if d.HasChange("day_of_week") { + sch.DayOfWeek = utils.StringPtr(d.Get("day_of_week").(string)) + } + + if d.HasChange("week_of_month") { + sch.WeekOfMonth = utils.IntPtr(d.Get("week_of_month").(int)) + } + + req.Schedule = sch + req.ResetSchedule = utils.BoolPtr(true) + + respUpdate, err := conn.Service.UpdateMaintenaceWindow(ctx, req, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + log.Printf("NDB Maintenance Window with %s id is updated successfully", *respUpdate.ID) + return resourceNutanixNDBMaintenanceWindowRead(ctx, d, meta) +} + +func resourceNutanixNDBMaintenanceWindowDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.DeleteMaintenanceWindow(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp.Status == utils.StringPtr("success") { + log.Printf("NDB Maintenance Window with %s id is deleted successfully", d.Id()) + d.SetId("") + } + return nil +} + +func flattenMaintenanceSchedule(pr *era.MaintenaceSchedule) []map[string]interface{} { + if pr != nil { + res := make([]map[string]interface{}, 0) + + schedule := map[string]interface{}{} + + schedule["recurrence"] = pr.Recurrence + schedule["duration"] = pr.Duration + schedule["start_time"] = pr.StartTime + schedule["day_of_week"] = pr.DayOfWeek + schedule["week_of_month"] = pr.WeekOfMonth + schedule["threshold"] = pr.Threshold + schedule["hour"] = pr.Hour + schedule["minute"] = pr.Minute + schedule["timezone"] = pr.TimeZone + + res = append(res, schedule) + return res + } + return nil +} + +func expandMaintenanceSchdeule(pr []interface{}) *era.MaintenaceSchedule { + if len(pr) > 0 { + sch := &era.MaintenaceSchedule{} + + for _, v := range pr { + val := v.(map[string]interface{}) + + if recurrence, ok := val["recurrence"]; ok { + sch.Recurrence = utils.StringPtr(recurrence.(string)) + } + + if duration, ok := val["duration"]; ok { + sch.Duration = utils.IntPtr(duration.(int)) + } + + if startTime, ok := val["start_time"]; ok { + sch.StartTime = utils.StringPtr(startTime.(string)) + } + + if dayOfWeek, ok := val["day_of_week"]; ok { + sch.DayOfWeek = utils.StringPtr(dayOfWeek.(string)) + } + + if weekOfMonth, ok := val["week_of_month"]; ok && len(weekOfMonth.(string)) > 0 { + sch.WeekOfMonth = utils.IntPtr(weekOfMonth.(int)) + } + } + return sch + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_maintenance_window_test.go b/nutanix/resource_nutanix_ndb_maintenance_window_test.go new file mode 100644 index 00000000..f65d625f --- /dev/null +++ b/nutanix/resource_nutanix_ndb_maintenance_window_test.go @@ -0,0 +1,126 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceMaintenaceWindowName = "nutanix_ndb_maintenance_window.acctest-managed" + +func TestAccEra_MaintenanceWindow(t *testing.T) { + name := "test-maintenance" + desc := "this is desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraMaintenanceWindow(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "name", name), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "description", desc), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "recurrence", "WEEKLY"), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "duration", "2"), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "day_of_week", "TUESDAY"), + ), + }, + }, + }) +} + +func TestAccEra_MaintenanceWindowUpdate(t *testing.T) { + name := "test-maintenance" + updatedName := "test-maintenance-updated" + desc := "this is desc" + updatedDesc := "this desc is updated" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraMaintenanceWindow(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "name", name), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "description", desc), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "recurrence", "WEEKLY"), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "duration", "2"), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "day_of_week", "TUESDAY"), + ), + }, + { + Config: testAccEraMaintenanceWindowUpdate(updatedName, updatedDesc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "name", updatedName), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "description", updatedDesc), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "recurrence", "WEEKLY"), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "duration", "4"), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "day_of_week", "MONDAY"), + ), + }, + }, + }) +} + +func TestAccEra_MaintenanceWindow_MonthlyRecurrence(t *testing.T) { + name := "test-maintenance" + desc := "this is desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraMaintenanceWindowByMonthlyRecurrence(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "name", name), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "description", desc), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "recurrence", "MONTHLY"), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "duration", "2"), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "day_of_week", "TUESDAY"), + resource.TestCheckResourceAttr(resourceMaintenaceWindowName, "week_of_month", "4"), + ), + }, + }, + }) +} + +func testAccEraMaintenanceWindow(name, desc string) string { + return fmt.Sprintf(` + resource nutanix_ndb_maintenance_window acctest-managed{ + name = "%[1]s" + description = "%[2]s" + recurrence = "WEEKLY" + duration = 2 + day_of_week = "TUESDAY" + start_time = "17:04:47" + } + `, name, desc) +} + +func testAccEraMaintenanceWindowUpdate(name, desc string) string { + return fmt.Sprintf(` + resource nutanix_ndb_maintenance_window acctest-managed{ + name = "%[1]s" + description = "%[2]s" + recurrence = "WEEKLY" + duration = 4 + day_of_week = "MONDAY" + start_time = "17:04:47" + } + `, name, desc) +} + +func testAccEraMaintenanceWindowByMonthlyRecurrence(name, desc string) string { + return fmt.Sprintf(` + resource nutanix_ndb_maintenance_window acctest-managed{ + name = "%[1]s" + description = "%[2]s" + recurrence = "MONTHLY" + duration = 2 + day_of_week = "TUESDAY" + start_time = "17:04:47" + week_of_month= 4 + } + `, name, desc) +} diff --git a/nutanix/resource_nutanix_ndb_network.go b/nutanix/resource_nutanix_ndb_network.go new file mode 100644 index 00000000..808903b4 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_network.go @@ -0,0 +1,411 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBNetwork() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBNetworkCreate, + ReadContext: resourceNutanixNDBNetworkRead, + UpdateContext: resourceNutanixNDBNetworkUpdate, + DeleteContext: resourceNutanixNDBNetworkDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"DHCP", "Static"}, false), + }, + "cluster_id": { + Type: schema.TypeString, + Required: true, + }, + "ip_pools": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "end_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "modified_by": { + Type: schema.TypeString, + Computed: true, + }, + "addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "gateway": { + Type: schema.TypeString, + Optional: true, + }, + "subnet_mask": { + Type: schema.TypeString, + Optional: true, + }, + "primary_dns": { + Type: schema.TypeString, + Optional: true, + }, + "secondary_dns": { + Type: schema.TypeString, + Optional: true, + }, + "dns_domain": { + Type: schema.TypeString, + Optional: true, + }, + + // computed + "managed": { + Type: schema.TypeBool, + Computed: true, + }, + "stretched_vlan_id": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "properties_map": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_subnet_mask": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_primary_dns": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_secondary_dns": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_gateway": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceNutanixNDBNetworkCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.NetworkIntentInput{} + + if name, ok := d.GetOk("name"); ok { + req.Name = utils.StringPtr(name.(string)) + } + if clsID, ok := d.GetOk("cluster_id"); ok { + req.ClusterID = utils.StringPtr(clsID.(string)) + } + if netType, ok := d.GetOk("type"); ok { + req.Type = utils.StringPtr(netType.(string)) + } + if ipPools, ok := d.GetOk("ip_pools"); ok { + ipPoolList := ipPools.([]interface{}) + + poolList := make([]*era.IPPools, 0) + for _, v := range ipPoolList { + pool := &era.IPPools{} + val := v.(map[string]interface{}) + + if start, ok := val["start_ip"]; ok { + pool.StartIP = utils.StringPtr(start.(string)) + } + if end, ok := val["end_ip"]; ok { + pool.EndIP = utils.StringPtr(end.(string)) + } + poolList = append(poolList, pool) + } + req.IPPools = poolList + } + + props := make([]*era.Properties, 0) + if vlanGateway, ok := d.GetOk("gateway"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_GATEWAY"), + Value: utils.StringPtr(vlanGateway.(string)), + }) + } + + if vlanSubnetMask, ok := d.GetOk("subnet_mask"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_SUBNET_MASK"), + Value: utils.StringPtr(vlanSubnetMask.(string)), + }) + } + + if vlanPrimaryDNS, ok := d.GetOk("primary_dns"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_PRIMARY_DNS"), + Value: utils.StringPtr(vlanPrimaryDNS.(string)), + }) + } + + if vlanSecDNS, ok := d.GetOk("secondary_dns"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_SECONDARY_DNS"), + Value: utils.StringPtr(vlanSecDNS.(string)), + }) + } + + if vlanDNSDomain, ok := d.GetOk("dns_domain"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_DNS_DOMAIN"), + Value: utils.StringPtr(vlanDNSDomain.(string)), + }) + } + + req.Properties = props + // api to create network in ndb + resp, err := conn.Service.CreateNetwork(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(*resp.ID) + log.Printf("NDB Network with %s id created successfully", d.Id()) + return resourceNutanixNDBNetworkRead(ctx, d, meta) +} + +func resourceNutanixNDBNetworkRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.GetNetwork(ctx, d.Id(), "") + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + if err := d.Set("managed", resp.Managed); err != nil { + return diag.FromErr(err) + } + if err := d.Set("cluster_id", resp.ClusterID); err != nil { + return diag.FromErr(err) + } + props := []interface{}{} + for _, prop := range resp.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + "secure": prop.Secure, + }) + } + if err := d.Set("properties", props); err != nil { + return diag.FromErr(err) + } + + if resp.PropertiesMap != nil { + d.Set("properties_map", flattenPropertiesMap(resp.PropertiesMap)) + } + + if resp.IPPools != nil { + d.Set("ip_pools", flattenIPPools(resp.IPPools)) + } + return nil +} + +func resourceNutanixNDBNetworkUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + updateReq := &era.NetworkIntentInput{} + + resp, err := conn.Service.GetNetwork(ctx, d.Id(), "") + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + updateReq.Name = resp.Name + updateReq.ClusterID = resp.ClusterID + updateReq.Type = resp.Type + updateReq.Properties = resp.Properties + } + + if d.HasChange("type") { + updateReq.Type = utils.StringPtr(d.Get("type").(string)) + } + + if d.HasChange("gateway") || d.HasChange("subnet_mask") || + d.HasChange("primary_dns") || d.HasChange("secondary_dns") || d.HasChange("dns_domain") { + props := make([]*era.Properties, 0) + if vlanGateway, ok := d.GetOk("gateway"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_GATEWAY"), + Value: utils.StringPtr(vlanGateway.(string)), + }) + } + + if vlanSubnetMask, ok := d.GetOk("subnet_mask"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_SUBNET_MASK"), + Value: utils.StringPtr(vlanSubnetMask.(string)), + }) + } + + if vlanPrimaryDNS, ok := d.GetOk("primary_dns"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_PRIMARY_DNS"), + Value: utils.StringPtr(vlanPrimaryDNS.(string)), + }) + } + + if vlanSecDNS, ok := d.GetOk("secondary_dns"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_SECONDARY_DNS"), + Value: utils.StringPtr(vlanSecDNS.(string)), + }) + } + + if vlanDNSDomain, ok := d.GetOk("dns_domain"); ok { + props = append(props, &era.Properties{ + Name: utils.StringPtr("VLAN_DNS_DOMAIN"), + Value: utils.StringPtr(vlanDNSDomain.(string)), + }) + } + + updateReq.Properties = props + } + + // API to update + + _, er := conn.Service.UpdateNetwork(ctx, updateReq, d.Id()) + if er != nil { + return diag.FromErr(er) + } + log.Printf("NDB Network with %s id is updated successfully", d.Id()) + return resourceNutanixNDBNetworkRead(ctx, d, meta) +} + +func resourceNutanixNDBNetworkDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.DeleteNetwork(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp == utils.StringPtr("vLAN Successfully Removed.") { + log.Printf("NDB Network with %s id is deleted successfully", d.Id()) + d.SetId("") + } + return nil +} + +func flattenPropertiesMap(pm *era.NetworkPropertiesmap) []interface{} { + if pm != nil { + propMap := []interface{}{} + prop := map[string]interface{}{} + + prop["vlan_gateway"] = pm.VLANGateway + prop["vlan_primary_dns"] = pm.VLANPrimaryDNS + prop["vlan_secondary_dns"] = pm.VLANSecondaryDNS + prop["vlan_subnet_mask"] = pm.VLANSubnetMask + + propMap = append(propMap, prop) + return propMap + } + return nil +} + +func flattenIPPools(pools []*era.IPPools) []interface{} { + if len(pools) > 0 { + ipList := make([]interface{}, 0) + + for _, v := range pools { + ips := map[string]interface{}{} + + ips["id"] = v.ID + ips["modified_by"] = v.ModifiedBy + ips["start_ip"] = v.StartIP + ips["end_ip"] = v.EndIP + if v.IPAddresses != nil { + ipAdd := make([]interface{}, 0) + + for _, v := range v.IPAddresses { + adds := map[string]interface{}{} + + adds["ip"] = v.IP + adds["status"] = v.Status + + ipAdd = append(ipAdd, adds) + } + ips["addresses"] = ipAdd + } + + ipList = append(ipList, ips) + } + return ipList + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_profiles.go b/nutanix/resource_nutanix_ndb_profiles.go index 54deb73c..03a0163c 100644 --- a/nutanix/resource_nutanix_ndb_profiles.go +++ b/nutanix/resource_nutanix_ndb_profiles.go @@ -720,7 +720,7 @@ func resourceNutanixNDBProfileCreate(ctx context.Context, d *schema.ResourceData return diag.FromErr(er) } } - log.Printf("NDB Profile with %s id created successfully", d.Id()) + log.Printf("NDB Profile with %s id is created successfully", d.Id()) return resourceNutanixNDBProfileRead(ctx, d, meta) } @@ -895,7 +895,7 @@ func resourceNutanixNDBProfileUpdate(ctx context.Context, d *schema.ResourceData if er != nil { return diag.FromErr(er) } - log.Printf("NDB Profile with %s id updated successfully", d.Id()) + log.Printf("NDB Profile with %s id is updated successfully", d.Id()) return resourceNutanixNDBProfileRead(ctx, d, meta) } @@ -908,7 +908,7 @@ func resourceNutanixNDBProfileDelete(ctx context.Context, d *schema.ResourceData } if resp == utils.StringPtr("Profile Successfully Deleted.") { - log.Printf("NDB Profile with %s id deleted successfully", d.Id()) + log.Printf("NDB Profile with %s id is deleted successfully", d.Id()) d.SetId("") } return nil diff --git a/nutanix/resource_nutanix_ndb_register_database.go b/nutanix/resource_nutanix_ndb_register_database.go index a2a17bf4..509b7c65 100644 --- a/nutanix/resource_nutanix_ndb_register_database.go +++ b/nutanix/resource_nutanix_ndb_register_database.go @@ -17,6 +17,14 @@ func resourceNutanixNDBRegisterDatabase() *schema.Resource { ReadContext: resourceNutanixNDBRegisterDatabaseRead, UpdateContext: resourceNutanixNDBRegisterDatabaseUpdate, DeleteContext: resourceNutanixNDBRegisterDatabaseDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(EraProvisionTimeout), + Update: schema.DefaultTimeout(EraProvisionTimeout), + Delete: schema.DefaultTimeout(EraProvisionTimeout), + }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, Schema: map[string]*schema.Schema{ "database_type": { Type: schema.TypeString, @@ -142,6 +150,37 @@ func resourceNutanixNDBRegisterDatabase() *schema.Resource { }, }, + // delete values + "delete": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "remove": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "soft_remove": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "forced": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "delete_time_machine": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "delete_logical_cluster": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, // computed values "name": { @@ -168,10 +207,6 @@ func resourceNutanixNDBRegisterDatabase() *schema.Resource { }, }, }, - "owner_id": { - Type: schema.TypeString, - Computed: true, - }, "time_machine": dataSourceEraTimeMachine(), "date_created": { Type: schema.TypeString, @@ -185,18 +220,6 @@ func resourceNutanixNDBRegisterDatabase() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "era_created": { - Type: schema.TypeBool, - Computed: true, - }, - "internal": { - Type: schema.TypeBool, - Computed: true, - }, - "placeholder": { - Type: schema.TypeBool, - Computed: true, - }, "type": { Type: schema.TypeString, Computed: true, @@ -237,7 +260,6 @@ func resourceNutanixNDBRegisterDatabase() *schema.Resource { Type: schema.TypeString, }, }, - "metadata": dataSourceEraDBInstanceMetadata(), "metric": { Type: schema.TypeMap, Computed: true, @@ -305,7 +327,7 @@ func resourceNutanixNDBRegisterDatabaseCreate(ctx context.Context, d *schema.Res if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for db register (%s) to create: %s", resp.Entityid, errWaitTask) } - log.Printf("NDB register database with %s id created successfully", d.Id()) + log.Printf("NDB register database with %s id is created successfully", d.Id()) return resourceNutanixNDBRegisterDatabaseRead(ctx, d, meta) } @@ -327,15 +349,33 @@ func resourceNutanixNDBRegisterDatabaseDelete(ctx context.Context, d *schema.Res dbID := d.Id() - req := era.DeleteDatabaseRequest{ - Delete: false, - Remove: true, - Softremove: false, - Forced: false, - Deletetimemachine: true, - Deletelogicalcluster: true, + req := &era.DeleteDatabaseRequest{} + + if delete, ok := d.GetOk("delete"); ok { + req.Delete = delete.(bool) } - res, err := conn.Service.DeleteDatabase(ctx, &req, dbID) + + if remove, ok := d.GetOk("remove"); ok { + req.Remove = remove.(bool) + } + + if softremove, ok := d.GetOk("soft_remove"); ok { + req.Softremove = softremove.(bool) + } + + if forced, ok := d.GetOk("forced"); ok { + req.Forced = forced.(bool) + } + + if deltms, ok := d.GetOk("delete_time_machine"); ok { + req.Deletetimemachine = deltms.(bool) + } + + if dellogicalcls, ok := d.GetOk("delete_logical_cluster"); ok { + req.Deletelogicalcluster = dellogicalcls.(bool) + } + + res, err := conn.Service.DeleteDatabase(ctx, req, dbID) if err != nil { return diag.FromErr(err) } @@ -356,14 +396,14 @@ func resourceNutanixNDBRegisterDatabaseDelete(ctx context.Context, d *schema.Res Pending: []string{"PENDING"}, Target: []string{"COMPLETED", "FAILED"}, Refresh: eraRefresh(ctx, conn, opReq), - Timeout: d.Timeout(schema.TimeoutCreate), + Timeout: d.Timeout(schema.TimeoutDelete), Delay: eraDelay, } if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { return diag.Errorf("error waiting for unregister db Instance (%s) to delete: %s", res.Entityid, errWaitTask) } - log.Printf("NDB register database with %s id deleted successfully", d.Id()) + log.Printf("NDB register database with %s id is deleted successfully", d.Id()) return nil } diff --git a/nutanix/resource_nutanix_ndb_register_database_test.go b/nutanix/resource_nutanix_ndb_register_database_test.go deleted file mode 100644 index e626a202..00000000 --- a/nutanix/resource_nutanix_ndb_register_database_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package nutanix - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" -) - -const resourceRegisterDB = "nutanix_ndb_database.acctest-managed" - -func TestAccEra_Registerbasic(t *testing.T) { - t.Skip() - name := "test-pg-inst-tf" - desc := "this is desc" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccEraPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccEraDatabaseRegisterConfig(name, desc), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceRegisterDB, "name", name), - resource.TestCheckResourceAttr(resourceRegisterDB, "description", desc), - ), - }, - }, - }) -} - -func testAccEraDatabaseRegisterConfig(name, desc string) string { - return fmt.Sprintf(` - data "nutanix_ndb_profiles" "p"{ - } - data "nutanix_ndb_slas" "slas"{} - data "nutanix_ndb_clusters" "clusters"{} - - locals { - slas = { - for p in data.nutanix_ndb_slas.slas.slas: p.name => p - } - clusters = { - for p in data.nutanix_ndb_clusters.clusters.clusters: p.name => p - } - } - - resource "nutanix_ndb_register_database" "name" { - database_type = "postgres_database" - database_name= "%[1]s" - description = "%[2]s" - vm_username = "era" - vm_password = "pass" - vm_ip = "10.51.144.226" - nx_cluster_id = local.clusters.EraCluster.id - time_machine_info { - name= "test-pg-inst-regis" - description="tms by terraform" - slaid=local.slas["DEFAULT_OOB_BRONZE_SLA"].id - schedule { - snapshottimeofday{ - hours= 13 - minutes= 0 - seconds= 0 - } - continuousschedule{ - enabled=true - logbackupinterval= 30 - snapshotsperday=1 - } - weeklyschedule{ - enabled=true - dayofweek= "WEDNESDAY" - } - monthlyschedule{ - enabled = true - dayofmonth= "27" - } - quartelyschedule{ - enabled=true - startmonth="JANUARY" - dayofmonth= 27 - } - yearlyschedule{ - enabled= false - dayofmonth= 31 - month="DECEMBER" - } - } - } - postgress_info{ - listener_port= "5432" - db_user= "postgres" - // postgres_software_home= "/usr/pgsql-10.4" - // software_home= "/usr/pgsql-10.4" - db_password ="pass" - db_name= "testdb1" - } - } - `, name, desc) -} diff --git a/nutanix/resource_nutanix_ndb_sla.go b/nutanix/resource_nutanix_ndb_sla.go index 0edec793..56d94f57 100644 --- a/nutanix/resource_nutanix_ndb_sla.go +++ b/nutanix/resource_nutanix_ndb_sla.go @@ -129,7 +129,7 @@ func resourceNutanixNDBSlaCreate(ctx context.Context, d *schema.ResourceData, me } d.SetId(*resp.ID) - log.Printf("NDB SLA with %s id created successfully", d.Id()) + log.Printf("NDB SLA with %s id is created successfully", d.Id()) return resourceNutanixNDBSlaRead(ctx, d, meta) } @@ -259,7 +259,7 @@ func resourceNutanixNDBSlaUpdate(ctx context.Context, d *schema.ResourceData, me if err != nil { return diag.FromErr(err) } - log.Printf("NDB SLA with %s id updated successfully", d.Id()) + log.Printf("NDB SLA with %s id is updated successfully", d.Id()) return resourceNutanixNDBSlaRead(ctx, d, meta) } @@ -272,7 +272,7 @@ func resourceNutanixNDBSlaDelete(ctx context.Context, d *schema.ResourceData, me } if resp.Status == utils.StringPtr("success") { - log.Printf("NDB SLA with %s id deleted successfully", d.Id()) + log.Printf("NDB SLA with %s id is deleted successfully", d.Id()) d.SetId("") } return nil diff --git a/nutanix/resource_nutanix_ndb_software_version_profile.go b/nutanix/resource_nutanix_ndb_software_version_profile.go index 2f2e9584..22e797e3 100644 --- a/nutanix/resource_nutanix_ndb_software_version_profile.go +++ b/nutanix/resource_nutanix_ndb_software_version_profile.go @@ -3,6 +3,7 @@ package nutanix import ( "context" "log" + "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -12,12 +13,19 @@ import ( "github.com/terraform-providers/terraform-provider-nutanix/utils" ) +var ( + SoftwareVersionProfileTimeout = 15 * time.Minute +) + func resourceNutanixNDBSoftwareVersionProfile() *schema.Resource { return &schema.Resource{ CreateContext: resourceNutanixNDBSoftwareVersionProfileCreate, ReadContext: resourceNutanixNDBSoftwareVersionProfileRead, UpdateContext: resourceNutanixNDBSoftwareVersionProfileUpdate, DeleteContext: resourceNutanixNDBSoftwareVersionProfileDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(SoftwareVersionProfileTimeout), + }, Schema: map[string]*schema.Schema{ "profile_id": { Type: schema.TypeString, @@ -287,7 +295,7 @@ func resourceNutanixNDBSoftwareVersionProfileCreate(ctx context.Context, d *sche if er != nil { return diag.FromErr(er) } - log.Printf("NDB Software Version Profile with %s id created successfully", d.Id()) + log.Printf("NDB Software Version Profile with %s id is created successfully", d.Id()) return resourceNutanixNDBSoftwareVersionProfileRead(ctx, d, meta) } @@ -392,7 +400,7 @@ func resourceNutanixNDBSoftwareVersionProfileUpdate(ctx context.Context, d *sche if er != nil { return diag.FromErr(er) } - log.Printf("NDB Software Version Profile with %s id updated successfully", d.Id()) + log.Printf("NDB Software Version Profile with %s id is updated successfully", d.Id()) return resourceNutanixNDBSoftwareVersionProfileRead(ctx, d, meta) } @@ -405,7 +413,7 @@ func resourceNutanixNDBSoftwareVersionProfileDelete(ctx context.Context, d *sche } if resp == utils.StringPtr("Profile Successfully Deleted.") { - log.Printf("NDB Software Version Profile with %s id deleted successfully", d.Id()) + log.Printf("NDB Software Version Profile with %s id is deleted successfully", d.Id()) d.SetId("") } return nil diff --git a/nutanix/resource_nutanix_ndb_stretched_vlans.go b/nutanix/resource_nutanix_ndb_stretched_vlans.go new file mode 100644 index 00000000..de75edfd --- /dev/null +++ b/nutanix/resource_nutanix_ndb_stretched_vlans.go @@ -0,0 +1,342 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBStretchedVlan() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBStretchedVlanCreate, + ReadContext: resourceNutanixNDBStretchedVlanRead, + UpdateContext: resourceNutanixNDBStretchedVlanUpdate, + DeleteContext: resourceNutanixNDBStretchedVlanDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + "vlan_ids": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "metadata": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gateway": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "subnet_mask": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + //computed field + "vlans_list": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + "managed": { + Type: schema.TypeBool, + Computed: true, + }, + "stretched_vlan_id": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "secure": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "properties_map": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vlan_subnet_mask": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_primary_dns": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_secondary_dns": { + Type: schema.TypeString, + Computed: true, + }, + "vlan_gateway": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceNutanixNDBStretchedVlanCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.StretchedVlansInput{} + + if name, ok := d.GetOk("name"); ok { + req.Name = utils.StringPtr(name.(string)) + } + if desc, ok := d.GetOk("description"); ok { + req.Description = utils.StringPtr(desc.(string)) + } + if netType, ok := d.GetOk("type"); ok { + req.Type = utils.StringPtr(netType.(string)) + } + if vlanIDs, ok := d.GetOk("vlan_ids"); ok { + res := make([]*string, 0) + vlanList := vlanIDs.([]interface{}) + for _, v := range vlanList { + res = append(res, utils.StringPtr(v.(string))) + } + + req.VlanIDs = res + } + + // api to stretched vlan + + resp, err := conn.Service.CreateStretchedVlan(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(*resp.ID) + log.Printf("NDB Stretched Vlan with %s id is created successfully", d.Id()) + return resourceNutanixNDBStretchedVlanRead(ctx, d, meta) +} + +func resourceNutanixNDBStretchedVlanRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.GetStretchedVlan(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("name", resp.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("type", resp.Type); err != nil { + return diag.FromErr(err) + } + if err := d.Set("description", resp.Description); err != nil { + return diag.FromErr(err) + } + if err := d.Set("metadata", flattenStretchedVlanMetadata(resp.Metadata)); err != nil { + return diag.FromErr(err) + } + if err := d.Set("vlans_list", flattenStretchedVlans(resp.Vlans)); err != nil { + return diag.FromErr(err) + } + return nil +} + +func resourceNutanixNDBStretchedVlanUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + updateReq := &era.StretchedVlansInput{} + metadata := &era.StretchedVlanMetadata{} + // api to network api + + resp, err := conn.Service.GetStretchedVlan(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + updateReq.Name = resp.Name + updateReq.Description = resp.Name + // updateReq.Metadata = resp.Metadata + updateReq.Type = resp.Type + + // get the vlans ids + if resp.Vlans != nil { + vlans := make([]*string, 0) + for _, v := range resp.Vlans { + vlans = append(vlans, v.ID) + } + updateReq.VlanIDs = vlans + } + + if resp.Metadata != nil { + metadata = resp.Metadata + updateReq.Metadata = metadata + } + } + + if d.HasChange("name") { + updateReq.Name = utils.StringPtr(d.Get("name").(string)) + } + if d.HasChange("description") { + updateReq.Description = utils.StringPtr(d.Get("description").(string)) + } + if d.HasChange("type") { + updateReq.Type = utils.StringPtr(d.Get("type").(string)) + } + if d.HasChange("vlan_ids") { + res := make([]*string, 0) + vlanList := d.Get("vlan_ids").([]interface{}) + for _, v := range vlanList { + res = append(res, utils.StringPtr(v.(string))) + } + + updateReq.VlanIDs = res + } + if d.HasChange("metadata") { + metadataList := d.Get("metadata").([]interface{}) + for _, v := range metadataList { + val := v.(map[string]interface{}) + + if gateway, ok := val["gateway"]; ok && len(gateway.(string)) > 0 { + metadata.Gateway = utils.StringPtr(gateway.(string)) + } + if subnetMask, ok := val["subnet_mask"]; ok && len(subnetMask.(string)) > 0 { + metadata.SubnetMask = utils.StringPtr(subnetMask.(string)) + } + } + updateReq.Metadata = metadata + } + + updateResp, er := conn.Service.UpdateStretchedVlan(ctx, d.Id(), updateReq) + if er != nil { + return diag.FromErr(er) + } + + if updateResp != nil { + log.Printf("NDB Stretched Vlan with %s id is updated successfully", d.Id()) + } + return resourceNutanixNDBStretchedVlanRead(ctx, d, meta) +} + +func resourceNutanixNDBStretchedVlanDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.DeleteStretchedVlan(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp == utils.StringPtr("vLAN Successfully Removed.") { + log.Printf("NDB Stretched Vlan with %s id is deleted successfully", d.Id()) + d.SetId("") + } + return nil +} + +func flattenStretchedVlans(net []*era.NetworkIntentResponse) []interface{} { + if len(net) > 0 { + netList := make([]interface{}, len(net)) + + for _, v := range net { + nwt := map[string]interface{}{} + nwt["id"] = v.ID + nwt["name"] = v.Name + nwt["type"] = v.Type + nwt["cluster_id"] = v.ClusterID + nwt["managed"] = v.Managed + if v.Properties != nil { + props := []interface{}{} + for _, prop := range v.Properties { + props = append(props, map[string]interface{}{ + "name": prop.Name, + "value": prop.Value, + "secure": prop.Secure, + }) + } + nwt["properties"] = props + } + if v.PropertiesMap != nil { + nwt["properties_map"] = flattenPropertiesMap(v.PropertiesMap) + } + if v.StretchedVlanID != nil { + nwt["stretched_vlan_id"] = v.StretchedVlanID + } + netList = append(netList, nwt) + } + return netList + } + return nil +} + +func flattenStretchedVlanMetadata(pr *era.StretchedVlanMetadata) []interface{} { + if pr != nil { + metaList := make([]interface{}, 0) + + meta := map[string]interface{}{} + + meta["gateway"] = pr.Gateway + meta["subnet_mask"] = pr.SubnetMask + + metaList = append(metaList, meta) + return metaList + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_tags.go b/nutanix/resource_nutanix_ndb_tags.go new file mode 100644 index 00000000..2b10a2b7 --- /dev/null +++ b/nutanix/resource_nutanix_ndb_tags.go @@ -0,0 +1,215 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBTags() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBTagsCreate, + ReadContext: resourceNutanixNDBTagsRead, + UpdateContext: resourceNutanixNDBTagsUpdate, + DeleteContext: resourceNutanixNDBTagsDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "required": { + Type: schema.TypeBool, + Optional: true, + }, + "entity_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"DATABASE", "TIME_MACHINE", + "CLONE", "DATABASE_SERVER"}, false), + }, + "status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"ENABLED", "DEPRECATED"}, false), + }, + //computed values + + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "values": { + Type: schema.TypeInt, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceNutanixNDBTagsCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.CreateTagsInput{} + tagName := "" + entityType := "" + + if name, ok := d.GetOk("name"); ok { + req.Name = utils.StringPtr(name.(string)) + tagName = name.(string) + } + + if desc, ok := d.GetOk("description"); ok { + req.Description = utils.StringPtr(desc.(string)) + } + + if require, ok := d.GetOk("required"); ok { + req.Required = utils.BoolPtr(require.(bool)) + } + + if entity, ok := d.GetOk("entity_type"); ok { + req.EntityType = utils.StringPtr(entity.(string)) + entityType = entity.(string) + } + + _, err := conn.Service.CreateTags(ctx, req) + if err != nil { + return diag.FromErr(err) + } + + uniqueID := "" + // fetch all the tags + tagsListResp, er := conn.Service.ListTags(ctx) + if er != nil { + return diag.FromErr(er) + } + + for _, v := range *tagsListResp { + if tagName == utils.StringValue(v.Name) && entityType == utils.StringValue(v.EntityType) { + uniqueID = *v.ID + } + } + d.SetId(uniqueID) + log.Printf("NDB Tag with %s id is created successfully", uniqueID) + return resourceNutanixNDBTagsRead(ctx, d, meta) +} + +func resourceNutanixNDBTagsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.ReadTags(ctx, d.Id()) + + if err != nil { + return diag.FromErr(err) + } + + if err = d.Set("name", resp.Name); err != nil { + return diag.Errorf("error setting name for tag %s: %s", d.Id(), err) + } + + if err = d.Set("description", resp.Description); err != nil { + return diag.Errorf("error setting description for tag %s: %s", d.Id(), err) + } + if err = d.Set("date_created", resp.DateCreated); err != nil { + return diag.Errorf("error setting date created for tag %s: %s", d.Id(), err) + } + + if err = d.Set("date_modified", resp.DateModified); err != nil { + return diag.Errorf("error setting date modified for tag %s: %s", d.Id(), err) + } + if err = d.Set("owner", resp.Owner); err != nil { + return diag.Errorf("error setting owner id for tag %s: %s", d.Id(), err) + } + + if err = d.Set("required", resp.Required); err != nil { + return diag.Errorf("error setting required for tag %s: %s", d.Id(), err) + } + if err = d.Set("status", resp.Status); err != nil { + return diag.Errorf("error setting status for tag %s: %s", d.Id(), err) + } + + if err = d.Set("entity_type", resp.EntityType); err != nil { + return diag.Errorf("error setting entity type for tag %s: %s", d.Id(), err) + } + if err = d.Set("values", resp.Values); err != nil { + return diag.Errorf("error setting values for tag %s: %s", d.Id(), err) + } + return nil +} + +func resourceNutanixNDBTagsUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + updateReq := &era.GetTagsResponse{} + + // read the tag + + resp, err := conn.Service.ReadTags(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + updateReq.Name = resp.Name + updateReq.Description = resp.Description + updateReq.DateCreated = resp.DateCreated + updateReq.DateModified = resp.DateModified + updateReq.Owner = resp.Owner + updateReq.Required = resp.Required + updateReq.Status = resp.Status + updateReq.EntityType = resp.EntityType + updateReq.Values = resp.Values + } + + if d.HasChange("name") { + updateReq.Name = utils.StringPtr(d.Get("name").(string)) + } + if d.HasChange("description") { + updateReq.Description = utils.StringPtr(d.Get("description").(string)) + } + if d.HasChange("required") { + updateReq.Required = utils.BoolPtr(d.Get("required").(bool)) + } + if d.HasChange("status") { + updateReq.Status = utils.StringPtr(d.Get("status").(string)) + } + + updateResp, er := conn.Service.UpdateTags(ctx, updateReq, d.Id()) + if er != nil { + return diag.FromErr(er) + } + log.Printf("NDB Tag with %s id updated successfully", *updateResp.ID) + return resourceNutanixNDBTagsRead(ctx, d, meta) +} + +func resourceNutanixNDBTagsDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + resp, err := conn.Service.DeleteTags(ctx, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if resp == utils.StringPtr("Tag Successfully Deleted.") { + log.Printf("NDB Tag with %s id is deleted successfully", d.Id()) + d.SetId("") + } + return nil +} diff --git a/nutanix/resource_nutanix_ndb_tags_test.go b/nutanix/resource_nutanix_ndb_tags_test.go new file mode 100644 index 00000000..ba37069e --- /dev/null +++ b/nutanix/resource_nutanix_ndb_tags_test.go @@ -0,0 +1,92 @@ +package nutanix + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const resourceNameTag = "nutanix_ndb_tag.acctest-managed" + +func TestAccEraTag_basic(t *testing.T) { + name := "test-tag-tf" + desc := "this is tag desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTagConfig(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameTag, "name", name), + resource.TestCheckResourceAttr(resourceNameTag, "description", desc), + resource.TestCheckResourceAttr(resourceNameTag, "entity_type", "DATABASE"), + resource.TestCheckResourceAttr(resourceNameTag, "required", "false"), + resource.TestCheckResourceAttr(resourceNameTag, "status", "ENABLED"), + resource.TestCheckResourceAttrSet(resourceNameTag, "date_created"), + resource.TestCheckResourceAttrSet(resourceNameTag, "date_modified"), + ), + }, + }, + }) +} + +func TestAccEraTag_WithUpdate(t *testing.T) { + name := "test-tag-tf" + updateName := "test-tag-updated" + desc := "this is tag desc" + updatedDesc := "this is updated tag desc" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccEraPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccEraTagConfig(name, desc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameTag, "name", name), + resource.TestCheckResourceAttr(resourceNameTag, "description", desc), + resource.TestCheckResourceAttr(resourceNameTag, "entity_type", "DATABASE"), + resource.TestCheckResourceAttr(resourceNameTag, "required", "false"), + resource.TestCheckResourceAttr(resourceNameTag, "status", "ENABLED"), + resource.TestCheckResourceAttrSet(resourceNameTag, "date_created"), + resource.TestCheckResourceAttrSet(resourceNameTag, "date_modified"), + ), + }, + { + Config: testAccEraTagUpdatedConfig(updateName, updatedDesc), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceNameTag, "name", updateName), + resource.TestCheckResourceAttr(resourceNameTag, "description", updatedDesc), + resource.TestCheckResourceAttr(resourceNameTag, "entity_type", "DATABASE"), + resource.TestCheckResourceAttr(resourceNameTag, "required", "true"), + resource.TestCheckResourceAttr(resourceNameTag, "status", "ENABLED"), + resource.TestCheckResourceAttrSet(resourceNameTag, "date_created"), + resource.TestCheckResourceAttrSet(resourceNameTag, "date_modified"), + ), + }, + }, + }) +} + +func testAccEraTagConfig(name, desc string) string { + return fmt.Sprintf(` + resource "nutanix_ndb_tag" "acctest-managed" { + name= "%[1]s" + description = "%[2]s" + entity_type = "DATABASE" + required = false + } + `, name, desc) +} + +func testAccEraTagUpdatedConfig(name, desc string) string { + return fmt.Sprintf(` + resource "nutanix_ndb_tag" "acctest-managed" { + name= "%[1]s" + description = "%[2]s" + entity_type = "DATABASE" + required = true + } + `, name, desc) +} diff --git a/nutanix/resource_nutanix_ndb_time_machine_cluster.go b/nutanix/resource_nutanix_ndb_time_machine_cluster.go new file mode 100644 index 00000000..f977828c --- /dev/null +++ b/nutanix/resource_nutanix_ndb_time_machine_cluster.go @@ -0,0 +1,234 @@ +package nutanix + +import ( + "context" + "log" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-nutanix/client/era" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func resourceNutanixNDBTmsCluster() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceNutanixNDBTmsClusterCreate, + ReadContext: resourceNutanixNDBTmsClusterRead, + UpdateContext: resourceNutanixNDBTmsClusterUpdate, + DeleteContext: resourceNutanixNDBTmsClusterDelete, + Schema: map[string]*schema.Schema{ + "time_machine_id": { + Type: schema.TypeString, + Required: true, + }, + "nx_cluster_id": { + Type: schema.TypeString, + Required: true, + }, + "sla_id": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + Default: "OTHER", + }, + // computed + "status": { + Type: schema.TypeString, + Computed: true, + }, + "schedule_id": { + Type: schema.TypeString, + Computed: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "source_clusters": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "log_drive_status": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "date_modified": { + Type: schema.TypeString, + Computed: true, + }, + "log_drive_id": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "source": { + Type: schema.TypeBool, + Computed: true, + }, + }, + } +} + +func resourceNutanixNDBTmsClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.TmsClusterIntentInput{} + + tmsID := d.Get("time_machine_id") + + if nxcls, ok := d.GetOk("nx_cluster_id"); ok { + req.NxClusterID = utils.StringPtr(nxcls.(string)) + } + + if slaid, ok := d.GetOk("sla_id"); ok { + req.SLAID = utils.StringPtr(slaid.(string)) + } + + if clsType, ok := d.GetOk("type"); ok { + req.Type = utils.StringPtr(clsType.(string)) + } + + _, err := conn.Service.CreateTimeMachineCluster(ctx, tmsID.(string), req) + if err != nil { + return diag.FromErr(err) + } + + uuid, er := uuid.GenerateUUID() + + if er != nil { + return diag.Errorf("Error generating UUID for era clusters: %+v", err) + } + d.SetId(uuid) + log.Printf("NDB Time Machine Cluster with %s id is created successfully", d.Id()) + return resourceNutanixNDBTmsClusterRead(ctx, d, meta) +} + +func resourceNutanixNDBTmsClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + tmsID := d.Get("time_machine_id") + clsID := d.Get("nx_cluster_id") + resp, err := conn.Service.ReadTimeMachineCluster(ctx, tmsID.(string), clsID.(string)) + if err != nil { + return diag.FromErr(err) + } + if err = d.Set("log_drive_id", resp.LogDrive); err != nil { + return diag.Errorf("error occurred while setting log_drive_id for time machine cluster with id: %s : %s", d.Id(), err) + } + + if err = d.Set("log_drive_status", resp.LogDriveStatus); err != nil { + return diag.Errorf("error occurred while setting log_drive_status for time machine cluster with id: %s: %s", d.Id(), err) + } + + if err = d.Set("type", resp.Type); err != nil { + return diag.Errorf("error occurred while setting type for time machine cluster with id: %s: %s", d.Id(), err) + } + + if err = d.Set("description", resp.Description); err != nil { + return diag.Errorf("error occurred while setting description for time machine cluster with id: %s: %s", d.Id(), err) + } + + if err = d.Set("status", resp.Status); err != nil { + return diag.Errorf("error occurred while setting status for time machine cluster with id: %s: %s", d.Id(), err) + } + + if err = d.Set("schedule_id", resp.ScheduleID); err != nil { + return diag.Errorf("error occurred while setting schedule_id for time machine cluster with id: %s: %s", d.Id(), err) + } + + if err = d.Set("owner_id", resp.OwnerID); err != nil { + return diag.Errorf("error occurred while setting owner_id for time machine cluster with id: %s: %s", d.Id(), err) + } + + if err = d.Set("date_created", resp.DateCreated); err != nil { + return diag.Errorf("error occurred while setting date_created for time machine cluster with id: %s: %s", d.Id(), err) + } + + if err = d.Set("source", resp.Source); err != nil { + return diag.Errorf("error occurred while setting source for time machine cluster with id: %s: %s", d.Id(), err) + } + + if err = d.Set("date_modified", resp.DateModified); err != nil { + return diag.Errorf("error occurred while setting date_modified for time machine cluster with id: %s: %s", d.Id(), err) + } + if resp.SourceClusters != nil { + sourceCls := make([]*string, 0) + sourceCls = append(sourceCls, resp.SourceClusters...) + + d.Set("source_clusters", sourceCls) + } + return nil +} + +func resourceNutanixNDBTmsClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + updateReq := &era.TmsClusterIntentInput{} + + tmsID := d.Get("time_machine_id") + clsID := d.Get("nx_cluster_id") + resp, err := conn.Service.ReadTimeMachineCluster(ctx, tmsID.(string), clsID.(string)) + if err != nil { + return diag.FromErr(err) + } + + if resp != nil { + updateReq.Type = resp.Type + updateReq.NxClusterID = resp.NxClusterID + } + + if d.HasChange("sla_id") { + updateReq.SLAID = utils.StringPtr(d.Get("sla_id").(string)) + updateReq.ResetSLAID = utils.BoolPtr(true) + } + + if d.HasChange("nx_cluster_id") { + updateReq.NxClusterID = utils.StringPtr(d.Get("nx_cluster_id").(string)) + } + + // update Call for time machine cluster + + _, er := conn.Service.UpdateTimeMachineCluster(ctx, tmsID.(string), clsID.(string), updateReq) + if er != nil { + return diag.FromErr(er) + } + log.Printf("NDB Time Machine Cluster with %s id is updated successfully", d.Id()) + return resourceNutanixNDBTmsClusterRead(ctx, d, meta) +} + +func resourceNutanixNDBTmsClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*Client).Era + + req := &era.DeleteTmsClusterInput{ + DeleteReplicatedSnapshots: utils.BoolPtr(true), + DeleteReplicatedProtectionDomains: utils.BoolPtr(true), + } + + tmsID := d.Get("time_machine_id") + clsID := d.Get("nx_cluster_id") + + resp, er := conn.Service.DeleteTimeMachineCluster(ctx, tmsID.(string), clsID.(string), req) + if er != nil { + return diag.FromErr(er) + } + + if resp.Status == "" { + d.SetId("") + log.Printf("NDB Time Machine Cluster with %s id is deleted successfully", d.Id()) + } + return nil +} diff --git a/website/docs/d/ndb_clone.html.markdown b/website/docs/d/ndb_clone.html.markdown index f67feed8..df926485 100644 --- a/website/docs/d/ndb_clone.html.markdown +++ b/website/docs/d/ndb_clone.html.markdown @@ -45,16 +45,12 @@ Describes the clone present in Nutanix Database Service * `id`: cloned id * `name`: cloned name * `description`: cloned description -* `owner_id`: owner id * `date_created`: date created for clone * `date_modified`: last modified date for clone * `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. * `properties`: properties of clone * `clustered`: clustered or not * `clone`: clone or not -* `era_created`: era created -* `internal`: internal or not -* `placeholder`: placeholder of clone * `database_name`: database name * `type`: type * `database_cluster_type`: database cluster type @@ -65,10 +61,7 @@ Describes the clone present in Nutanix Database Service * `parent_time_machine_id`: parent time machine id * `time_zone`: time zone * `info`: cloned info -* `group_info`: cloned group info -* `metadata`: metadata of clone * `metric`: Metric of clone -* `category`: category * `parent_database_id`: parent database id * `parent_source_database_id`: parent source database id * `lcm_config`: LCM Config @@ -77,6 +70,5 @@ Describes the clone present in Nutanix Database Service * `database_nodes`: database nodes associated with database instance * `linked_databases`: linked databases within database instance * `databases`: database for a cloned instance -* `database_group_state_info`: database group state info See detailed information in [NDB Clone](https://www.nutanix.dev/api_references/ndb/#/2f225874df95a-get-clone-by-value-type). \ No newline at end of file diff --git a/website/docs/d/ndb_clones.html.markdown b/website/docs/d/ndb_clones.html.markdown index fe0418ba..db327218 100644 --- a/website/docs/d/ndb_clones.html.markdown +++ b/website/docs/d/ndb_clones.html.markdown @@ -47,16 +47,12 @@ List all the clone present in Nutanix Database Service * `id`: cloned id * `name`: cloned name * `description`: cloned description -* `owner_id`: owner id * `date_created`: date created for clone * `date_modified`: last modified date for clone * `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. * `properties`: properties of clone * `clustered`: clustered or not * `clone`: clone or not -* `era_created`: era created -* `internal`: internal or not -* `placeholder`: placeholder of clone * `database_name`: database name * `type`: type * `database_cluster_type`: database cluster type @@ -67,10 +63,7 @@ List all the clone present in Nutanix Database Service * `parent_time_machine_id`: parent time machine id * `time_zone`: time zone * `info`: cloned info -* `group_info`: cloned group info -* `metadata`: metadata of clone * `metric`: Metric of clone -* `category`: category * `parent_database_id`: parent database id * `parent_source_database_id`: parent source database id * `lcm_config`: LCM Config @@ -79,7 +72,6 @@ List all the clone present in Nutanix Database Service * `database_nodes`: database nodes associated with database instance * `linked_databases`: linked databases within database instance * `databases`: database for a cloned instance -* `database_group_state_info`: database group state info See detailed information in [NDB Clones](https://www.nutanix.dev/api_references/ndb/#/fc568988b42e5-get-a-list-of-all-clones). \ No newline at end of file diff --git a/website/docs/d/ndb_database.html.markdown b/website/docs/d/ndb_database.html.markdown index 7e288c15..9bb69bbc 100644 --- a/website/docs/d/ndb_database.html.markdown +++ b/website/docs/d/ndb_database.html.markdown @@ -38,36 +38,24 @@ The following attributes are exported: * `description`: - description * `date_created`: - creation date * `date_modified`: - date modified -* `owner_id`: - owner ID * `properties`: - properties * `tags`: - tags attached * `clustered`: - if clustered or not * `clone`: - if cloned -* `era_created`: - if era created -* `internal`: - if internal database -* `placeholder`: - NA * `database_name`: - database instance name * `type`: - database engine type * `status`: - status of database instance -* `database_status`: - NA * `dbserver_logical_cluster_id`: - NA * `time_machine_id`: - time machine ID -* `parent_time_machine_id`: - parent time machine ID * `time_zone`: - timezone * `info`: - info regarding disks, vm, storage, etc. -* `group_info`: - group info -* `metadata`: - metadata of database instance * `metric`: - metrics -* `category`: - category of instance * `parent_database_id`: - parent database ID -* `parent_source_database_id`: - parent source database ID * `lcm_config`: - lcm configuration * `time_machine`: - time machine related config info * `database_nodes`: - nodes info * `dbserver_logical_cluster`: - NA * `linked_databases`: - list of databases created in instance with info -* `databases`: - NA -* `database_group_state_info`: - NA See detailed information in [Database Instance](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1NDA-get-a-database-using-id). diff --git a/website/docs/d/ndb_databases.html.markdown b/website/docs/d/ndb_databases.html.markdown index d916f0cc..774e4196 100644 --- a/website/docs/d/ndb_databases.html.markdown +++ b/website/docs/d/ndb_databases.html.markdown @@ -36,36 +36,24 @@ The following attributes are exported for each database_instances: * `description`: - description * `date_created`: - creation date * `date_modified`: - date modified -* `owner_id`: - owner ID * `properties`: - properties * `tags`: - tags attached * `clustered`: - if clustered or not * `clone`: - if cloned -* `era_created`: - if era created -* `internal`: - if internal database -* `placeholder`: - NA * `database_name`: - database instance name * `type`: - database engine type * `status`: - status of database instance -* `database_status`: - NA * `dbserver_logical_cluster_id`: - NA * `time_machine_id`: - time machine ID -* `parent_time_machine_id`: - parent time machine ID * `time_zone`: - timezone * `info`: - info regarding disks, vm, storage, etc. -* `group_info`: - group info -* `metadata`: - metadata of database instance * `metric`: - metrics -* `category`: - category of instance * `parent_database_id`: - parent database ID -* `parent_source_database_id`: - parent source database ID * `lcm_config`: - lcm configuration * `time_machine`: - time machine related config info * `database_nodes`: - nodes info * `dbserver_logical_cluster`: - NA * `linked_databases`: - list of databases created in instance with info -* `databases`: - NA -* `database_group_state_info`: - NA See detailed information in [List Database Instances](https://www.nutanix.dev/api_references/era/#/b3A6MjIyMjI1MzY-get-all-source-databases). diff --git a/website/docs/d/ndb_dbserver.html.markdown b/website/docs/d/ndb_dbserver.html.markdown new file mode 100644 index 00000000..8dd20ce8 --- /dev/null +++ b/website/docs/d/ndb_dbserver.html.markdown @@ -0,0 +1,72 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_dbserver" +sidebar_current: "docs-nutanix-datasource-ndb-dbserver" +description: |- + Describes Database Server VM in Nutanix Database Service +--- + +# nutanix_ndb_dbserver + +Describes Database Server VM in Nutanix Database Service + +## Example Usage + +```hcl + data "nutanix_ndb_dbserver" "dbserver" { + id = "{{ dbserver_vm_id }}" + } + + data "nutanix_ndb_dbserver" "dbserver" { + name = "{{ dbserver_vm_name }}" + } + + data "nutanix_ndb_dbserver" "dbserver" { + ip = "{{ dbserver_vm_ip }}" + } +``` + +## Argument Reference + +The following arguments are supported: + +* `id`: (Optional) id of database server vm +* `name`: (Optional) name of database server vm +* `ip`: (Optional) ip of database server vm +* `vm_cluster_name`: (Optional) vm cluster name of database server +* `vm_cluster_id`: (Optional) vm cluster id of database server + +## Attribute Reference + +The following attributes are exported: + +* `description`: description of db server vm +* `date_created`: date created of db server vm +* `date_modified`: date modified of db server vm +* `access_level`: access level +* `properties`: properties of db server vm +* `tags`: tags for db server vm +* `vm_cluster_uuid`: clusetr uuid for dbserver vm +* `ip_addresses`: IP addresses of the dbserver vm +* `mac_addresses`: Mac addresses of dbserver vm +* `type`: Type of entity. i.e. Dbserver +* `status`: Status of Dbserver . Active or not. +* `client_id`: client id +* `era_drive_id`: era drive id +* `era_version`: era version +* `vm_timezone`: timezone of dbserver vm +* `vm_info`: info of dbserver vm +* `clustered`: clustered or not +* `is_server_driven`: is server down or not +* `protection_domain_id`: protection domain id +* `query_count`: query count +* `database_type`: database type +* `dbserver_invalid_ea_state`: dbserver invalid ea state +* `working_directory`: working directory of db server vm +* `valid_diagnostic_bundle_state`: valid diagnostic bundle state +* `windows_db_server`: window db server +* `associated_time_machine_ids`: associated time machines ids +* `access_key_id`: access key id of dbserver vm + + +See detailed information in [Database Server VM](https://www.nutanix.dev/api_references/ndb/#/c531f4158d5f5-get-a-database-server-by-value-type). diff --git a/website/docs/d/ndb_dbservers.html.markdown b/website/docs/d/ndb_dbservers.html.markdown new file mode 100644 index 00000000..bb1898ed --- /dev/null +++ b/website/docs/d/ndb_dbservers.html.markdown @@ -0,0 +1,60 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_dbservers" +sidebar_current: "docs-nutanix-datasource-ndb-dbservers" +description: |- + List of all Database Server VM in Nutanix Database Service +--- + +# nutanix_ndb_dbservers + +List of all Database Server VM in Nutanix Database Service + +## Example Usage + +```hcl + data "nutanix_ndb_dbservers" "dbservers" { } +``` + +## Argument Reference + +The following arguments are supported: + +* `dbservers`: - list of dbservers + +## Attribute Reference + +The following attributes are exported: + +* `name`: name of dbserver vm +* `description`: description of dbserver vm +* `description`: description of db server vm +* `date_created`: date created of db server vm +* `date_modified`: date modified of db server vm +* `access_level`: access level +* `properties`: properties of db server vm +* `tags`: tags for db server vm +* `vm_cluster_uuid`: clusetr uuid for dbserver vm +* `ip_addresses`: IP addresses of the dbserver vm +* `mac_addresses`: Mac addresses of dbserver vm +* `type`: Type of entity. i.e. Dbserver +* `status`: Status of Dbserver . Active or not. +* `client_id`: client id +* `era_drive_id`: era drive id +* `era_version`: era version +* `vm_timezone`: timezone of dbserver vm +* `vm_info`: info of dbserver vm +* `clustered`: clustered or not +* `is_server_driven`: is server down or not +* `protection_domain_id`: protection domain id +* `query_count`: query count +* `database_type`: database type +* `dbserver_invalid_ea_state`: dbserver invalid ea state +* `working_directory`: working directory of db server vm +* `valid_diagnostic_bundle_state`: valid diagnostic bundle state +* `windows_db_server`: window db server +* `associated_time_machine_ids`: associated time machines ids +* `access_key_id`: access key id of dbserver vm + + +See detailed information in [List of Database Server VMs](https://www.nutanix.dev/api_references/ndb/#/e4deab7ef784b-get-list-of-all-database-servers). diff --git a/website/docs/d/ndb_maintenance_window.html.markdown b/website/docs/d/ndb_maintenance_window.html.markdown new file mode 100644 index 00000000..71a6aebf --- /dev/null +++ b/website/docs/d/ndb_maintenance_window.html.markdown @@ -0,0 +1,42 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_maintenance_window" +sidebar_current: "docs-nutanix-datasource-ndb-maintenance-window" +description: |- + Describes a maintenance window in Nutanix Database Service +--- + +# nutanix_ndb_maintenance_window + +Describes a maintenance window in Nutanix Database Service + +## Example Usage + +```hcl + data "nutanix_ndb_maintenance_window" "window"{ + id = "{{ maintenance_window_id }}" + } +``` + +## Argument Reference + +The following arguments are supported: + +* `id`: (Required) Maintenance window id. + +## Attribute Reference + +The following attributes are exported: +* `name`: name of maintenance window +* `description`: description of maintenance window +* `schedule`: schedule of maintenance window +* `owner_id`: owner id of maintenance window +* `date_created`: created date of maintenance window +* `date_modified`: modified date of maintenance window +* `access_level`: access level +* `properties`: properties of maintenance window +* `tags`: tags of maintenance window +* `status`: status of maintennace window +* `next_run_time`: next run time for maintenance window to trigger +* `entity_task_assoc`: entity task association for maintenance window +* `timezone`: timezone \ No newline at end of file diff --git a/website/docs/d/ndb_maintenance_windows.html.markdown b/website/docs/d/ndb_maintenance_windows.html.markdown new file mode 100644 index 00000000..70adc8cd --- /dev/null +++ b/website/docs/d/ndb_maintenance_windows.html.markdown @@ -0,0 +1,40 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_maintenance_windows" +sidebar_current: "docs-nutanix-datasource-ndb-maintenance-windows" +description: |- + List of maintenance windows in Nutanix Database Service +--- + +# nutanix_ndb_maintenance_windows + + List of maintenance windows in Nutanix Database Service + +## Example Usage + +```hcl + data "nutanix_ndb_maintenance_windows" "windows"{ } +``` + + +## Attribute Reference + +The following attributes are exported: + +* `maintenance_windows`: List of maintenance windows + +### maintenance_windows + +* `name`: name of maintenance window +* `description`: description of maintenance window +* `schedule`: schedule of maintenance window +* `owner_id`: owner id of maintenance window +* `date_created`: created date of maintenance window +* `date_modified`: modified date of maintenance window +* `access_level`: access level +* `properties`: properties of maintenance window +* `tags`: tags of maintenance window +* `status`: status of maintennace window +* `next_run_time`: next run time for maintenance window to trigger +* `entity_task_assoc`: entity task association for maintenance window +* `timezone`: timezone \ No newline at end of file diff --git a/website/docs/d/ndb_network.html.markdown b/website/docs/d/ndb_network.html.markdown new file mode 100644 index 00000000..2fc6d462 --- /dev/null +++ b/website/docs/d/ndb_network.html.markdown @@ -0,0 +1,52 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_network" +sidebar_current: "docs-nutanix-datasource-ndb-network" +description: |- + Describes a network in Nutanix Database Service +--- + +# nutanix_ndb_network + +Describes a network in Nutanix Database Service + +## Example Usage + +```hcl + data "nutanix_ndb_network" "nw" { + name = "{{ name of network }}" + } + + data "nutanix_ndb_network" "nw" { + id = "{{ id of network }}" + } +``` + +## Argument Reference + +The following arguments are supported: + +* `name`: (Optional) name of network +* `id`: (Optional) id of network + + +## Attribute Reference + +The following attributes are exported: +* `id`: network id +* `name`: network name +* `managed`: network managed by NDB or not +* `type`: type of network +* `cluster_id`: cluster id where network is present +* `stretched_vlan_id`: stretched vlan id +* `properties`: properties of network +* `properties_map`: properties map of network + +### properties_map +* `vlan_subnet_mask`: subnet mask of vlan +* `vlan_primary_dns`: primary dns of vlan +* `vlan_secondary_dns`: secondary dns of vlan +* `vlan_gateway`: gateway of vlan + + +See detailed information in [NDB Network](https://www.nutanix.dev/api_references/ndb/#/283556b78730b-get-vlans). \ No newline at end of file diff --git a/website/docs/d/ndb_networks.html.markdown b/website/docs/d/ndb_networks.html.markdown new file mode 100644 index 00000000..44ac0408 --- /dev/null +++ b/website/docs/d/ndb_networks.html.markdown @@ -0,0 +1,42 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_networks" +sidebar_current: "docs-nutanix-datasource-ndb-networks" +description: |- + List of networks in Nutanix Database Service +--- + +# nutanix_ndb_networks + + List of networks in Nutanix Database Service + +## Example Usage + +```hcl + data "nutanix_ndb_networks" "nw" { } +``` + +## Attribute Reference +The following attributes are exported: + +* `networks`: List of networks in NDB + +### networks + +* `id`: network id +* `name`: network name +* `managed`: network managed by NDB or not +* `type`: type of network +* `cluster_id`: cluster id where network is present +* `stretched_vlan_id`: stretched vlan id +* `properties`: properties of network +* `properties_map`: properties map of network + +### properties_map +* `vlan_subnet_mask`: subnet mask of vlan +* `vlan_primary_dns`: primary dns of vlan +* `vlan_secondary_dns`: secondary dns of vlan +* `vlan_gateway`: gateway of vlan + + +See detailed information in [List of NDB Networks](https://www.nutanix.dev/api_references/ndb/#/283556b78730b-get-vlans). \ No newline at end of file diff --git a/website/docs/d/ndb_tag.html.markdown b/website/docs/d/ndb_tag.html.markdown new file mode 100644 index 00000000..c22dadac --- /dev/null +++ b/website/docs/d/ndb_tag.html.markdown @@ -0,0 +1,42 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_tag" +sidebar_current: "docs-nutanix-datasource-ndb-tag" +description: |- + Describes a tag in Nutanix Database Service +--- + +# nutanix_ndb_tag + +Describes a tag in Nutanix Database Service + +## Example Usage + +```hcl + data "nutanix_ndb_tag" "tag"{ + id = "{{ tag id }}" + } +``` + + +## Argument Reference + +The following arguments are supported: +* `id` : tag id + +## Attribute Reference + +The following attributes are exported: + +* `name`: name for the tag +* `description`: description for the tag +* `entity_type`: entity for the tag to be associated with. +* `required`: tag value for entities. +* `status`: Status of the tag +* `owner`: owner id of the tag +* `values`: value for the tag +* `date_created`: date created of the tag +* `date_modified`: modified date of tha tag + + +See detailed information in [NDB Tag](https://www.nutanix.dev/api_references/ndb/#/0a7bf3bdeed86-get-list-of-all-tags). \ No newline at end of file diff --git a/website/docs/d/ndb_tags.html.markdown b/website/docs/d/ndb_tags.html.markdown new file mode 100644 index 00000000..2b868b19 --- /dev/null +++ b/website/docs/d/ndb_tags.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_tags" +sidebar_current: "docs-nutanix-datasource-ndb-tag" +description: |- + List of tags in Nutanix Database Service +--- + +# nutanix_ndb_tags + +List of tags in Nutanix Database Service + +## Example Usage + +```hcl + data "nutanix_ndb_tags" "tags"{ } +``` + +## Attribute Reference + +The following attributes are exported: + +* `tags`: List of tags present in NDB. + +### tags +* `name`: name for the tag +* `description`: description for the tag +* `entity_type`: entity for the tag to be associated with. +* `required`: tag value for entities. +* `status`: Status of the tag +* `owner`: owner id of the tag +* `values`: value for the tag +* `date_created`: date created of the tag +* `date_modified`: modified date of tha tag + + +See detailed information in [NDB Tags](https://www.nutanix.dev/api_references/ndb/#/0a7bf3bdeed86-get-list-of-all-tags). \ No newline at end of file diff --git a/website/docs/d/ndb_time_machine.html.markdown b/website/docs/d/ndb_time_machine.html.markdown index 3bcc159a..f6fb399c 100644 --- a/website/docs/d/ndb_time_machine.html.markdown +++ b/website/docs/d/ndb_time_machine.html.markdown @@ -30,7 +30,6 @@ description: |- * `id`: time machine id * `name`: time machine name * `description`: time machine description -* `owner_id`: owner id * `date_created`: date created * `date_modified`: date modified * `access_level`: access level to time machines @@ -38,7 +37,6 @@ description: |- * `tags`: tags * `clustered`: clustered or not * `clone`: clone time machine or not -* `internal`: internal * `database_id`: database id * `type`: type of time machine * `category`: category of time machine @@ -55,6 +53,5 @@ description: |- * `sla_update_metadata`: sla update metadata * `sla`: sla info * `schedule`: schedule info -* `metadata`: metadata info See detailed information in [NDB Time Machine](https://www.nutanix.dev/api_references/ndb/#/cb7ba8c0c3284-get-time-machine-by-value-type) . \ No newline at end of file diff --git a/website/docs/d/ndb_time_machines.html.markdown b/website/docs/d/ndb_time_machines.html.markdown index 6d0d122f..bd92348e 100644 --- a/website/docs/d/ndb_time_machines.html.markdown +++ b/website/docs/d/ndb_time_machines.html.markdown @@ -27,7 +27,6 @@ List all time machines present in Nutanix Database Service * `id`: time machine id * `name`: time machine name * `description`: time machine description -* `owner_id`: owner id * `date_created`: date created * `date_modified`: date modified * `access_level`: access level to time machines @@ -35,7 +34,6 @@ List all time machines present in Nutanix Database Service * `tags`: tags * `clustered`: clustered or not * `clone`: clone time machine or not -* `internal`: internal * `database_id`: database id * `type`: type of time machine * `category`: category of time machine @@ -52,7 +50,6 @@ List all time machines present in Nutanix Database Service * `sla_update_metadata`: sla update metadata * `sla`: sla info * `schedule`: schedule info -* `metadata`: metadata info See detailed information in [NDB Time Machines](https://www.nutanix.dev/api_references/ndb/#/256497800ee3c-get-list-of-all-time-machines) . \ No newline at end of file diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 1efd1236..2d8f8b96 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -114,9 +114,43 @@ Foundation based examples : https://github.com/nutanix/terraform-provider-nutani Foundation based modules & examples : https://github.com/nutanix/terraform-provider-nutanix/blob/master/modules/foundation/ +## Nutanix Database Service (NDB) (>=v1.8.0) + +Going from 1.8.0 release of nutanix provider, some params are added to provider configuration to support Nutanix Database Service (NDB) components : + +* `ndb_username` - (Optional) This is the username for the NDB instance. This can also be specified with the `NDB_USERNAME` environment variable. +* `ndb_password` - (Optional) This is the password for the NDB instance. This can also be specified with the `NDB_PASSWORD` environment variable. +* `ndb_endpoint` - (Optional) This is the endpoint for the NDB instance. This can also be specified with the `NDB_ENDPOINT` environment variable. + +```terraform +terraform { + required_providers { + nutanix = { + source = "nutanix/nutanix" + version = ">=1.8.0" + } + } +} + +provider "nutanix" { + username = var.nutanix_username + password = var.nutanix_password + endpoint = var.nutanix_endpoint + port = var.nutanix_port + insecure = true + wait_timeout = 10 + ndb_endpoint = var.ndb_endpoint + ndb_username = var.ndb_username + ndb_password = var.ndb_password +} +``` + +NDB based examples : https://github.com/nutanix/terraform-provider-nutanix/blob/master/examples/ndb/ + ## Provider configuration required details -Going from 1.5.0-beta release of nutanix provider, fields inside provider configuration would be mandatory as per the usecase : +Going from 1.8.0-beta release of nutanix provider, fields inside provider configuration would be mandatory as per the usecase : * `Prism Central & Karbon` : For prism central and karbon related resources and data sources, `username`, `password` & `endpoint` are manadatory. * `Foundation` : For foundation related resources and data sources, `foundation_endpoint` in manadatory. +* `NDB` : For Nutanix Database Service (NDB) related resources and data sources. diff --git a/website/docs/r/ndb_clone.html.markdown b/website/docs/r/ndb_clone.html.markdown index 294cd65c..e77ddc96 100644 --- a/website/docs/r/ndb_clone.html.markdown +++ b/website/docs/r/ndb_clone.html.markdown @@ -49,8 +49,8 @@ Provides a resource to perform the clone of database instance based on the input * `node_count`: Node count. Default is 1 for single instance * `nodes`: Nodes contain info about dbservers vm * `lcm_config`: LCM Config contains the expiry details and refresh details -* `name`: Clone name -* `description`: Clone description +* `name`: database instance name +* `description`: database instance description * `nx_cluster_id`: cluster id on where clone will be present * `ssh_public_key`: ssh public key * `compute_profile_id`: specify the compute profile id @@ -66,6 +66,14 @@ Provides a resource to perform the clone of database instance based on the input * `postgresql_info`: postgresql info for the clone * `actionarguments`: (Optional) if any action arguments is required +* `delete`:- (Optional) Delete the database clone from the VM. Default value is true +* `remove`:- (Optional) Unregister the database clone from NDB. Default value is false +* `soft_remove`:- (Optional) Soft remove. Default will be false +* `forced`:- (Optional) Force delete of instance. Default is false +* `delete_time_machine`:- (Optional) Delete the database's Time Machine (snapshots/logs) from the NDB. Default value is true +* `delete_logical_cluster`:- (Optional) Delete the logical cluster. Default is true +* `remove_logical_cluster`: (Optional) remove logical cluster. Default value is false + ### nodes * `vm_name`: name for the database server VM. @@ -94,26 +102,16 @@ Structure for each action argument in actionarguments list: ## Attributes Reference -* `owner_id`: owner id * `date_created`: date created for clone * `date_modified`: last modified date for clone * `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. * `clone`: cloned or not -* `era_created`: era created or not -* `internal`: internal -* `placeholder`: placeholder * `database_name`: database name * `type`: type of clone * `database_cluster_type`: database cluster type * `status`: status of clone -* `database_status`: database status * `info`: info of clone -* `group_info`: group info of clone -* `metadata`: metadata about clone * `metric`: Stores storage info regarding size, allocatedSize, usedSize and unit of calculation that seems to have been fetched from PRISM. -* `category`:category of clone -* `parent_database_id`: parent database id -* `parent_source_database_id`: parent source database id * `dbserver_logical_cluster`: dbserver logical cluster * `database_nodes`: database nodes associated with database instance * `linked_databases`: linked databases within database instance diff --git a/website/docs/r/ndb_clone_refresh.html.markdown b/website/docs/r/ndb_clone_refresh.html.markdown new file mode 100644 index 00000000..70b432ba --- /dev/null +++ b/website/docs/r/ndb_clone_refresh.html.markdown @@ -0,0 +1,39 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_clone_refresh" +sidebar_current: "docs-nutanix-resource-ndb-clone-refresh" +description: |- + NDB allows you to create and refresh clones to a point in time either by using transactional logs or by using snapshots. This operation submits a request to perform refresh clone of the database in Nutanix database service (NDB). +--- + +# nutanix_ndb_clone_refresh + +Provides a resource to perform the refresh clone of database based on the input parameters. + +## Example Usage + +### resource to refresh clone with snapshot id + +```hcl + resource "nutanix_ndb_clone_refresh" "acctest-managed"{ + clone_id = "{{ clone_id }}" + snapshot_id = "{{ snapshot_id }}" + timezone = "Asia/Calcutta" + } +``` + +### resource to refresh clone with user pitr timestamp + +```hcl + resource "nutanix_ndb_clone_refresh" "acctest-managed"{ + clone_id = "{{ clone_id }}" + user_pitr_stamp = "{{ timestamp }}" + timezone = "Asia/Calcutta" + } +``` + +## Argument Reference +* `clone_id`: (Required) clone id +* `snapshot_id`: (Optional) snapshot id where clone has to be refreshed +* `user_pitr_stamp`: (Optional) Point in time recovery where clone has to be refreshed +* `timezone`: (Optional) timezone. Default is Asia/Calcutta. \ No newline at end of file diff --git a/website/docs/r/ndb_cluster.html.markdown b/website/docs/r/ndb_cluster.html.markdown new file mode 100644 index 00000000..c3b0faf7 --- /dev/null +++ b/website/docs/r/ndb_cluster.html.markdown @@ -0,0 +1,102 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_cluster" +sidebar_current: "docs-nutanix-resource-ndb-cluster" +description: |- + This operation submits a request to add a Nutanix cluster to Nutanix database service (NDB). +--- + +# nutanix_ndb_cluster + +Provides a resource to add a Nutanix cluster based on the input parameters. + +## Example Usage + +```hcl + resource "nutanix_ndb_clusters" "clsname" { + name= "{{ test-cluster }}" + description = "test description" + cluster_ip = "{{ cluster_ip }}" + username= "{{ username of cluster }}" + password = "{{ password of cluster }}" + storage_container = "{{ storage_container }}" + agent_network_info{ + dns = "{{ DNS servers available in the }}" + ntp = "{{ NTP servers available }}" + } + networks_info{ + type = "DHCP" + network_info{ + vlan_name = "vlan_static" + static_ip = "{{ static_ip }}" + gateway = "{{ gateway }}" + subnet_mask="{{ subnet_mask }}" + } + access_type = [ + "PRISM", + "DSIP", + "DBSERVER" + ] + } + } +``` + +## Argument Reference + +* `name`: (Required) name of the cluster to be registered +* `description`: (Optional) description of cluster +* `cluster_ip`: (Required) Prism Element IP address +* `username`: (Required) username of the Prism Element administrator +* `password`: (Required) Prism Element password +* `storage_container`: (Required) select a storage container which is used for performing database operations in the cluster +* `agent_network_info`: (Required) agent network info to register cluster +* `networks_info`: (Required) network segmentation to segment the network traffic of the agent VM. + + +### agent_network_info +* `dns` : string of DNS servers(comma separted). +* `ntp`: string of NTP servers(comma separted). + +### networks_info +* `type`: type of vlan. Supported [DHCP, Static, IPAM] +* `network_info`: network segmentation to segment the network traffic +* `access_type`: VLAN access types for which you want to configure network segmentation. Supports [PRISM, DSIP, DBSERVER ]. +Prism Element: Select this VLAN access type to configure a VLAN that the NDB agent VM can use to communicate with Prism. +Prism iSCSI Data Service. Select this VLAN access type to configure a VLAN that the agent VM can use to make connection requests to the iSCSI data services IP. +DBServer Access from NDB server. Select this VLAN access type to configure a VLAN that is used for communications between the NDB agent VM and the database server VM on the newly registered NDB server cluster. + +### network_info +* `vlan_name`: vlan name +* `static_ip`: static ip of agent network +* `gateway`: gateway of agent network +* `subnet_mask`: subnet mask of agent network + + + +## Attributes Reference +The following attributes are exported: + +* `id`: - id of cluster +* `name`: - name of cluster +* `unique_name`: - unique name of cluster +* `ip_addresses`: - IP address +* `fqdns`: - fqdn +* `nx_cluster_uuid`: - nutanix cluster uuid +* `description`: - description +* `cloud_type`: - cloud type +* `date_created`: - creation date +* `date_modified`: - date modified +* `version`: - version +* `owner_id`: - owner UUID +* `status`: - current status +* `hypervisor_type`: - hypervisor type +* `hypervisor_version`: - hypervisor version +* `properties`: - list of properties +* `reference_count`: - NA +* `username`: - username +* `password`: - password +* `cloud_info`: - cloud info +* `resource_config`: - resource related consumption info +* `management_server_info`: - NA +* `entity_counts`: - no. of entities related +* `healthy`: - if healthy status \ No newline at end of file diff --git a/website/docs/r/ndb_database.html.markdown b/website/docs/r/ndb_database.html.markdown index b134bfd0..904a1c44 100644 --- a/website/docs/r/ndb_database.html.markdown +++ b/website/docs/r/ndb_database.html.markdown @@ -290,6 +290,13 @@ The following arguments are supported: * `nodes`: - (Optional) nodes info * `postgresql_info`: - (Optional) action arguments for postgress type database. +* `delete`:- (Optional) Delete the database from the VM. Default value is true +* `remove`:- (Optional) Unregister the database from NDB. Default value is true +* `soft_remove`:- (Optional) Soft remove. Default will be false +* `forced`:- (Optional) Force delete of instance. Default is false +* `delete_time_machine`:- (Optional) Delete the database's Time Machine (snapshots/logs) from the NDB. Default value is true +* `delete_logical_cluster`:- (Optional) Delete the logical cluster. Default is true + ### actionarguments Structure for each action argument in actionarguments list: diff --git a/website/docs/r/ndb_database_restore.html.markdown b/website/docs/r/ndb_database_restore.html.markdown index 08c52395..4f1c611f 100644 --- a/website/docs/r/ndb_database_restore.html.markdown +++ b/website/docs/r/ndb_database_restore.html.markdown @@ -45,30 +45,20 @@ Provides a resource to restore the database instance based on the input paramete * `description`: description of database instance * `databasetype`: type of database * `properties`: properties of database created -* `owner_id`: owner id * `date_created`: date created for db instance * `date_modified`: date modified for instance * `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. * `clone`: whether instance is cloned or not -* `era_created`: whether era created or not -* `internal`: is internal -* `placeholder`: placeholder * `database_name`: name of database * `type`: type of database * `database_cluster_type`: database cluster type * `status`: status of instance -* `database_status`: status of database * `dbserver_logical_cluster_id`: dbserver logical cluster id * `time_machine_id`: time machine id of instance -* `parent_time_machine_id`: parent time machine id * `time_zone`: timezone on which instance is created xw * `info`: info of instance -* `group_info`: group info of instance -* `metadata`: metadata of instance * `metric`: Stores storage info regarding size, allocatedSize, usedSize and unit of calculation that seems to have been fetched from PRISM. -* `category`: category of instance * `parent_database_id`: parent database id -* `parent_source_database_id`: parent source database id * `lcm_config`: LCM config of instance * `time_machine`: Time Machine details of instance * `dbserver_logical_cluster`: dbserver logical cluster diff --git a/website/docs/r/ndb_database_scale.html.markdown b/website/docs/r/ndb_database_scale.html.markdown index 0df8bb9a..7d258ffd 100644 --- a/website/docs/r/ndb_database_scale.html.markdown +++ b/website/docs/r/ndb_database_scale.html.markdown @@ -40,30 +40,20 @@ Provides a resource to scale the database instance based on the input parameters * `description`: description of database instance * `databasetype`: type of database * `properties`: properties of database created -* `owner_id`: owner id * `date_created`: date created for db instance * `date_modified`: date modified for instance * `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. * `clone`: whether instance is cloned or not -* `era_created`: whether era created or not -* `internal`: is internal -* `placeholder`: placeholder * `database_name`: name of database * `type`: type of database * `database_cluster_type`: database cluster type * `status`: status of instance -* `database_status`: status of database * `dbserver_logical_cluster_id`: dbserver logical cluster id * `time_machine_id`: time machine id of instance -* `parent_time_machine_id`: parent time machine id * `time_zone`: timezone on which instance is created xw * `info`: info of instance -* `group_info`: group info of instance -* `metadata`: metadata of instance * `metric`: Stores storage info regarding size, allocatedSize, usedSize and unit of calculation that seems to have been fetched from PRISM. -* `category`: category of instance * `parent_database_id`: parent database id -* `parent_source_database_id`: parent source database id * `lcm_config`: LCM config of instance * `time_machine`: Time Machine details of instance * `dbserver_logical_cluster`: dbserver logical cluster diff --git a/website/docs/r/ndb_database_snapshot.html.markdown b/website/docs/r/ndb_database_snapshot.html.markdown index a9cf20a5..e1db8040 100644 --- a/website/docs/r/ndb_database_snapshot.html.markdown +++ b/website/docs/r/ndb_database_snapshot.html.markdown @@ -47,7 +47,6 @@ Provides a resource to perform the snapshot for database instance based on the i * `id`: name of snapshot * `description`: description of snapshot * `properties`: properties -* `owner_id`: owner id * `date_created`: created date * `date_modified`: modified date * `properties`: properties @@ -62,7 +61,6 @@ Provides a resource to perform the snapshot for database instance based on the i * `type`: type * `applicable_types`: Applicable types * `snapshot_timestamp`: snapshot timeStamp -* `metadata`: metadata of snapshot * `software_snapshot_id`: software snapshot id * `software_database_snapshot`: software database snapshot * `dbserver_storage_metadata_version`: dbserver storage metadata version diff --git a/website/docs/r/ndb_dbservervm.html.markdown b/website/docs/r/ndb_dbservervm.html.markdown new file mode 100644 index 00000000..f3d85187 --- /dev/null +++ b/website/docs/r/ndb_dbservervm.html.markdown @@ -0,0 +1,112 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_dbserver_vm" +sidebar_current: "docs-nutanix-resource-ndb-dbserver-vm" +description: |- + This operation submits a request to create, update and delete database server VMs in Nutanix database service (NDB). + Note: For 1.8.0 release, only postgress database type is qualified and officially supported. +--- + +# nutanix_ndb_dbserver_vm + +Provides a resource to create database server VMs based on the input parameters. For 1.8.0 release, only postgress database type is qualified and officially supported. + +## Example Usage + +### dbserver vm resource with software profile + +```hcl + resource nutanix_ndb_dbserver_vm acctest-managed { + database_type = "postgres_database" + nx_cluster_id = {{ nx_cluster_id }} + software_profile_id = {{ software_profile_id }} + software_profile_version_id = {{ software_profile_version_id }} + + // dbserver details + description = "{{ description }}" + compute_profile_id = {{ compute_profile_id }} + network_profile_id = {{ network_profile_id }} + vm_password = "{{ vm_password }}" + postgres_database { + vm_name = "test-vm" + client_public_key = "{{ public_key }}" + } + } +``` + +### dbserver vm resource with time machine +```hcl + resource nutanix_ndb_dbserver_vm acctest-managed { + database_type = "postgres_database" + nx_cluster_id = {{ nx_cluster_id }} + time_machine_id = {{ time_machine_id }} + + // dbserver details + description = "{{ description }}" + compute_profile_id = {{ compute_profile_id }} + network_profile_id = {{ network_profile_id }} + vm_password = "{{ vm_password }}" + postgres_database { + vm_name = "test-vm" + client_public_key = "{{ public_key }}" + } + } +``` + +## Argument Reference + +The following arguments are supported: + +* `database_type`: (Required) database type +* `software_profile_id`: (Optional) software profile id you want to provision a database server VM from an existing software profile.Required with software_profile_version_id. Conflicts with time_machine_id . +* `software_profile_version_id`: (Optional) SOftware Profile Version Id. +* `time_machine_id`: (Optional) Time Machine id you want to provision a database server VM by using the database and operating system software stored in a time machine. Conflicts with software_profile_id. +* `snapshot_id`: (Optional) Snapshot id. If not given, it will use latest snapshot to provision db server vm. + +* `description`: (Optional) Type a description for the database server VM. +* `compute_profile_id`: (Optional) Compute profile id. +* `network_profile_id`: (Optioanl) Network profile id. +* `vm_password`: (Optional) password of the NDB drive user account. +* `postgres_database`: (Optional) Postgres database server vm +* `maintenance_tasks`: (Optional) maintenance window configured to enable automated patching. + + +* `delete`:- (Optional) Delete the VM and associated storage. Default value is true +* `remove`:- (Optional) Unregister the database from NDB. Default value is false +* `soft_remove`:- (Optional) Soft remove. Default will be false +* `delete_vgs`:- (Optional) Delete volume grous. Default value is true +* `delete_vm_snapshots`:- (Optional) Delete the vm snapshots. Default is true + + +### postgres_database +* `vm_name`: (Required) name for the database server VM. +* `client_public_key`: (Required) use SSH public keys to access the database server VM. + +### maintenance_tasks +* `maintenance_window_id`: Associate an existing maintenance window id. NDB starts OS patching or database patching as per the schedule defined in the maintenance window. +* `tasks`: Tasks for the maintenance. +* `tasks.task_type`: use this option if you want NDB to perform database patching or OS patching automatically. Supports [ OS_PATCHING, DB_PATCHING ]. +* `tasks.pre_command`: add pre (operating system and database patching) commands. +* `tasks.post_command`:add post (operating system and database patching) commands. + + +### actionarguments + +Structure for each action argument in actionarguments list: + +* `name`: name of the dbserver vm +* `properties`: Properties of dbserver vm +* `dbserver_cluster_id`: dbserver cluster id. +* `vm_cluster_name`: cluster name for dbserver vm +* `vm_cluster_uuid`: clusetr uuid for dbserver vm +* `ip_addresses`: IP addresses of the dbserver vm +* `mac_addresses`: Mac addresses of dbserver vm +* `type`: Type of entity. i.e. Dbserver +* `status`: Status of Dbserver . Active or not. +* `client_id`: client id +* `era_drive_id`: era drive id +* `era_version`: era version +* `vm_timezone`: timezone of dbserver vm + + +See detailed information in [NDB Provision Database Server VM](https://www.nutanix.dev/api_references/ndb/#/c9126257bc0fc-provision-database-server). \ No newline at end of file diff --git a/website/docs/r/ndb_dbservervm_register.html.markdown b/website/docs/r/ndb_dbservervm_register.html.markdown new file mode 100644 index 00000000..eec12b6d --- /dev/null +++ b/website/docs/r/ndb_dbservervm_register.html.markdown @@ -0,0 +1,81 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_register_dbserver" +sidebar_current: "docs-nutanix-resource-ndb-dbservervm-register" +description: |- + This operation submits a request to register the database server VM in Nutanix database service (NDB). + Note: For 1.8.0 release, only postgress database type is qualified and officially supported. +--- + +# nutanix_ndb_register_dbserver + +Provides a resource to register database server VMs based on the input parameters. For 1.8.0 release, only postgress database type is qualified and officially supported. + +## Example Usage + +```hcl + resource "nutanix_ndb_register_dbserver" "name" { + database_type = "postgres_database" + vm_ip= "{{ vmip to register}}" + nxcluster_id = "{{ cluster_id }}" + username= "{{ username of the NDB drive user account }}" + password="{{ password of the NDB drive user account }}" + postgres_database{ + listener_port = {{ listner_port }} + postgres_software_home= "{{ path to the PostgreSQL home directory in which the PostgreSQL software is installed }}" + } + } +``` + + +## Argument Reference + +The following arguments are supported: +* `database_type`: (Required) database type i.e. postgres_database +* `vm_ip`: (Required) IP address of the database server VM +* `nxcluster_id`: (Required) cluster on which you want to register the database server VM. +* `username`: (Required) username of the NDB drive user account that has sudo access +* `password`: (Optional) password of the NDB drive user account. Conflicts with ssh_key. +* `ssh_key`: (Optional) the private key. Conflicts with password. +* `postgres_database`: (Optional) postgres info for dbserver + +* `name`: (Optional) Name of db server vm. Should be used in Update Method only. +* `description`: (Optional) description of db server vm. Should be used in update Method only . +* `update_name_description_in_cluster`: (Optional) Updates the name and description in cluster. Should be used in Update Method only. +* `working_directory`: (Optional) working directory of postgres. Default is "/tmp" +* `forced_install`: (Optional) forced install the packages. Default is true + +* `delete`:- (Optional) Delete the VM and associated storage. Default value is false +* `remove`:- (Optional) Unregister the database from NDB. Default value is true +* `soft_remove`:- (Optional) Soft remove. Default will be false +* `delete_vgs`:- (Optional) Delete volume grous. Default value is true +* `delete_vm_snapshots`:- (Optional) Delete the vm snapshots. Default is true + + +### postgres_database +* `listener_port`: (Optional) listener port of db server +* `postgres_software_home`: (Required) path to the PostgreSQL home directory in which the PostgreSQL software is installed + + +### actionarguments + +Structure for each action argument in actionarguments list: + +* `name`: name of the dbserver vm +* `properties`: Properties of dbserver vm +* `era_created`: created by era or not. +* `internal`: is internal or not. +* `dbserver_cluster_id`: dbserver cluster id. +* `vm_cluster_name`: cluster name for dbserver vm +* `vm_cluster_uuid`: clusetr uuid for dbserver vm +* `ip_addresses`: IP addresses of the dbserver vm +* `mac_addresses`: Mac addresses of dbserver vm +* `type`: Type of entity. i.e. Dbserver +* `status`: Status of Dbserver . Active or not. +* `client_id`: client id +* `era_drive_id`: era drive id +* `era_version`: era version +* `vm_timezone`: timezone of dbserver vm + + +See detailed information in [NDB Register Database Server VM](https://www.nutanix.dev/api_references/ndb/#/5bd6f03bd6ed7-register-an-existing-database-server). \ No newline at end of file diff --git a/website/docs/r/ndb_maintenance_task.html.markdown b/website/docs/r/ndb_maintenance_task.html.markdown new file mode 100644 index 00000000..b914c56d --- /dev/null +++ b/website/docs/r/ndb_maintenance_task.html.markdown @@ -0,0 +1,104 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_maintenance_task" +sidebar_current: "docs-nutanix-resource-ndb-maintenance_task" +description: |- + This operation submits a request to create, update and delete maintenance task association with database servers vms in Nutanix database service (NDB). +--- + +# nutanix_ndb_maintenance_task + +Provides a resource to associate a maintenance window with database server VM based on the input parameters. + +## Example Usage + +### resource to associated maintenance window with OS_PATCHING +```hcl + resource "nutanix_ndb_maintenance_task" "name" { + dbserver_id = [ + "{{ dbserver_vm_id }}" + ] + maintenance_window_id = "{{ maintenance_window_id }}" + tasks{ + task_type = "OS_PATCHING" + } + } +``` + +### resource to associated maintenance window with DB_PATCHING +```hcl + resource "nutanix_ndb_maintenance_task" "name" { + dbserver_id = [ + "{{ dbserver_vm_id }}" + ] + maintenance_window_id = "{{ maintenance_window_id }}" + tasks { + task_type = "DB_PATCHING" + } + } +``` + +### resource to associated maintenance window with pre and post command on each task +```hcl + resource "nutanix_ndb_maintenance_task" "name" { + dbserver_id = [ + "{{ dbserver_vm_id }}" + ] + maintenance_window_id = "{{ maintenance_window_id }}" + tasks { + task_type = "DB_PATCHING" + pre_command = "{{ pre_command for db patching }}" + post_command = "{{ post_command for db patching }}" + } + tasks{ + task_type = "OS_PATCHING" + pre_command = "{{ pre_command for os patching}}" + post_command = "{{ post_command for os patching }}" + } + } +``` + +## Argument Reference + +The following arguments are supported: + +* `maintenance_window_id`: (Required) maintenance window id which has to be associated +* `dbserver_id`: (Optional) dbserver vm id. Conflicts with "dbserver_cluster" +* `dbserver_cluster`: (Optional) dbserver cluster ids. Conflicts with "dbserver_id" +* `tasks`: (Optional) task input for Operating System Patching or Database Patching or both + +### tasks +* `task_type`: (Required) type of task. Supports [ "OS_PATCHING", "DB_PATCHING" ] +* `pre_command`: (Optional) command that you want to run before patching the OS/DB +* `post_command`: (Optional) command that you want to run after patching the OS/DB + +## Attributes Reference + +The following attributes are exported: + +* `entity_task_association`: Entity Task Association List. + + +### entity_task_association +* `id`: id of maintenance window +* `name`: name of of maintenance window +* `description`: description of maintenance window +* `owner_id`: owner id of task +* `date_created`: created date of task +* `date_modified`: modified date of task +* `access_level`: access level of tasks +* `properties`: properties of task +* `tags`: tags of task +* `maintenance_window_id`: maintenance window id +* `maintenance_window_owner_id`: maintenance window owner id +* `entity_id`: entity id +* `entity_type`: type of the entity. i.e. DBSERVER +* `status`: status of task +* `task_type`: type of the task. OS or DB +* `payload`: list of pre post commands of OS or DB task + + +### payload +* `pre_post_command`: Pre Post command of Task +* `pre_post_command.pre_command`: pre command of task +* `pre_post_command.post_command`: post command of task \ No newline at end of file diff --git a/website/docs/r/ndb_maintenance_window.html.markdown b/website/docs/r/ndb_maintenance_window.html.markdown new file mode 100644 index 00000000..d73f0305 --- /dev/null +++ b/website/docs/r/ndb_maintenance_window.html.markdown @@ -0,0 +1,66 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_maintenance_window" +sidebar_current: "docs-nutanix-resource-ndb-maintenance_window" +description: |- + A maintenance window allows you to set a schedule that is used to automate repeated maintenance tasks such as OS patching and database patching. NDB allows you to create a maintenance window and then associate the maintenance window with a list of database server VMs or an instance. This operation submits a request to create, update and delete maintenance window in Nutanix database service (NDB). +--- + +# nutanix_ndb_maintenance_window + +Provides a resource to create maintenance window based on the input parameters. + +## Example Usage + +### resource to create weekly maintenance window +```hcl + resource nutanix_ndb_maintenance_window acctest-managed { + name = "test-maintenance" + description = "desc" + duration = 3 + recurrence = "WEEKLY" + day_of_week = "TUESDAY" + start_time = "17:04:47" + } +``` + +### resource to create monthly maintenance window +```hcl + resource nutanix_ndb_maintenance_window acctest-managed{ + name = "test-maintenance" + description = "description" + duration = 2 + recurrence = "MONTHLY" + day_of_week = "TUESDAY" + start_time = "17:04:47" + week_of_month = 4 + } +``` + + +## Argument Reference +* `name`: (Required) Name for the maintenance window. +* `description`: (Optional) Description for maintenance window +* `recurrence`: (Required) Supported values [ MONTHLY, WEEKLY ] +* `start_time`: (Required) start time for maintenance window to trigger +* `duration`: (Optional) duration in hours. Default is 2 +* `day_of_week`: (Optional) Day of the week to trigger maintenance window. Supports [ MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY ] +* `week_of_month`: (Optional) week of the month. Supports [1, 2, 3, 4] . +* `timezone`: timezone . Default is Asia/Calcutta . + +### a Weekly or Monthly schedule. +* If you select Weekly, select the day and time when the maintenance window triggers. +* If you select Monthly, select a week (1st, 2nd, 3rd, or 4th ), day of the week, and a time when the maintenance window triggers. + + +## Attributes Reference +* `schedule`: schedule of maintenance window +* `owner_id`: owner id of maintenance window +* `date_created`: created date of maintenance window +* `date_modified`: modified date of maintenance window +* `access_level`: access level +* `properties`: properties of maintenance window +* `tags`: tags of maintenance window +* `status`: status of maintennace window +* `next_run_time`: next run time for maintenance window to trigger +* `entity_task_assoc`: entity task association for maintenance window diff --git a/website/docs/r/ndb_network.html.markdown b/website/docs/r/ndb_network.html.markdown new file mode 100644 index 00000000..6765eb37 --- /dev/null +++ b/website/docs/r/ndb_network.html.markdown @@ -0,0 +1,74 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_network" +sidebar_current: "docs-nutanix-resource-ndb-network" +description: |- + This operation submits a request to create, update and delete networks in Nutanix database service (NDB). +--- + +# nutanix_ndb_network + +Provides a resource to create VLANs and IP address pools that are managed both in NDB and outside NDB. + +## Example Usage + +### resource to create network for NDB +```hcl + resource "nutanix_ndb_network" "name" { + name= "test-sub" + type="Static" + cluster_id = "{{ cluster_id }}" + gateway= "{{ gatway for the vlan }}" + subnet_mask = "{{ subnet mask for the vlan}}" + primary_dns = " {{ primary dns for the vlan }}" + secondary_dns= "{{secondary dns for the vlan }}" + ip_pools{ + start_ip = "{{ starting address range}}" + end_ip = "{{ ending address range }}" + } + } +``` + +### resource to create network for NDB with dns domain +```hcl + resource "nutanix_ndb_network" "name" { + name= "test-sub" + type="Static" + cluster_id = "{{ cluster_id }}" + gateway= "{{ gatway for the vlan }}" + subnet_mask = "{{ subnet mask for the vlan}}" + primary_dns = " {{ primary dns for the vlan }}" + secondary_dns= "{{secondary dns for the vlan }}" + ip_pools{ + start_ip = "{{ starting address range}}" + end_ip = "{{ ending address range }}" + } + dns_domain = {{ dns domain }} + } +``` + +## Argument Reference +* `name`: (Required) Name of the vlan to be attached in NDB +* `type`: (Required) Vlan type. Supports [DHCP, Static] +* `cluster_id`: (Required) Select the Nutanix cluster on which you want to add the VLAN. +* `ip_pools`: (Optional) Manage IP Address Pool in NDB option if you want to assign static IP addresses to your database server VMs +* `gateway`: (Optional) Gateway for vlan. Supports in Static IP address assignment only +* `subnet_mask`: (Optional) Subnet mask for vlan. (Static IP address assignment only) +* `primary_dns`: (Optional) primary dns for vlan. (Static IP address assignment only) +* `secondary_dns`: (Optional) secondary dns for vlan. (Static IP address assignment only) +* `dns_domain`: (Optional) dns domain for vlan. (Static IP address assignment only) + +### ip_pools +* `start_ip`: (Required) starting IP address range for new database servers +* `end_ip`: (Required) ending IP address range for new database servers + + +## Attributes Reference + +* `managed`: Managed by NDB or not +* `stretched_vlan_id`: stretched vlan id +* `properties`: properties of network +* `properties_map`: properties map of network + + +See detailed information in [NDB Network](https://www.nutanix.dev/api_references/ndb/#/4a4fc22c2843d-add-a-v-lan-to-ndb). \ No newline at end of file diff --git a/website/docs/r/ndb_register_database.html.markdown b/website/docs/r/ndb_register_database.html.markdown index 0fda836f..9fbdfcc4 100644 --- a/website/docs/r/ndb_register_database.html.markdown +++ b/website/docs/r/ndb_register_database.html.markdown @@ -177,6 +177,14 @@ Provides a resource to register the database based on the input parameters. * `actionarguments`: (Optional) action arguments * `postgress_info`: (Optional) Postgress_Info for registering. + +* `delete`:- (Optional) Delete the database from the VM. Default value is false +* `remove`:- (Optional) Unregister the database from NDB. Default value is true +* `soft_remove`:- (Optional) Soft remove. Default will be false +* `forced`:- (Optional) Force delete of instance. Default is false +* `delete_time_machine`:- (Optional) Delete the database's Time Machine (snapshots/logs) from the NDB. Default value is true +* `delete_logical_cluster`:- (Optional) Delete the logical cluster. Default is true + ### postgress_info * `listener_port`: (Required) listner port of database @@ -235,14 +243,10 @@ Structure for each action argument in actionarguments list: * `description`: description of database instance * `databasetype`: type of database * `properties`: properties of database created -* `owner_id`: owner id * `date_created`: date created for db instance * `date_modified`: date modified for instance * `tags`: allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. * `clone`: whether instance is cloned or not -* `era_created`: whether era created or not -* `internal`: is internal -* `placeholder`: placeholder * `database_name`: name of database * `type`: type of database * `database_cluster_type`: database cluster type @@ -253,8 +257,6 @@ Structure for each action argument in actionarguments list: * `parent_time_machine_id`: parent time machine id * `time_zone`: timezone on which instance is created xw * `info`: info of instance -* `group_info`: group info of instance -* `metadata`: metadata of instance * `metric`: Stores storage info regarding size, allocatedSize, usedSize and unit of calculation that seems to have been fetched from PRISM. * `category`: category of instance * `parent_database_id`: parent database id diff --git a/website/docs/r/ndb_stretched_vlans.html.markdown b/website/docs/r/ndb_stretched_vlans.html.markdown new file mode 100644 index 00000000..3bb4a4b4 --- /dev/null +++ b/website/docs/r/ndb_stretched_vlans.html.markdown @@ -0,0 +1,76 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_stretched_vlan" +sidebar_current: "docs-nutanix-resource-ndb-stretched-vlan" +description: |- + This operation submits a request to create, update and delete stretched vlans in Nutanix database service (NDB). We can add a stretched VLAN to NDB by selecting the existing VLANs from each Nutanix cluster. +--- + +# nutanix_ndb_stretched_vlan + +Provides a resource to create stretched vlans based on the input parameters. + +## Example Usage + +### resource to add stretched vlan in NDB +```hcl + resource "nutanix_ndb_stretched_vlan" "name" { + name = "test-stretcName" + description = "vlan desc updated" + type = "Static" + vlan_ids = [ + "{{ vlan_id_1 }}", + "{{ vlan_id_2 }}" + ] + } +``` + +### resource to update the strteched vlan with new gateway and subnet mask +```hcl + resource "nutanix_ndb_stretched_vlan" "name" { + name = "test-stretcName" + description = "vlan desc updated" + type = "Static" + vlan_ids = [ + "{{ vlan_id_1 }}", + "{{ vlan_id_2 }}" + ] + metadata{ + gateway = "{{ gateway of vlans }}" + subnet_mask = "{{ subnet mask of vlans }}" + } + } +``` + + +## Argument Reference + +* `name`: (Required) name for the stretched VLAN +* `description`: (Optional) Description of stretched vlan +* `type`: (Required) type of vlan. static VLANs that are managed in NDB can be added to a stretched VLAN. +* `vlan_ids`: (Required) list of vlan ids to be added in NDB + +* `metadata`: (Optional) Update the stretched VLAN Gateway and Subnet Mask IP address +* `metadata.gateway`: Update the gateway of stretched vlan +* `metadata.subnet_mask`: Update the subnet_mask of stretched vlan + +## Attributes Reference +The following attributes are exported: + +* `vlans_list`: properties of vlans + +### vlans_list +* `id`: network id +* `name`: network name +* `managed`: network managed by NDB or not +* `type`: type of network +* `cluster_id`: cluster id where network is present +* `stretched_vlan_id`: stretched vlan id +* `properties`: properties of network +* `properties_map`: properties map of network + +### properties_map +* `vlan_subnet_mask`: subnet mask of vlan +* `vlan_primary_dns`: primary dns of vlan +* `vlan_secondary_dns`: secondary dns of vlan +* `vlan_gateway`: gateway of vlan \ No newline at end of file diff --git a/website/docs/r/ndb_tag.html.markdown b/website/docs/r/ndb_tag.html.markdown new file mode 100644 index 00000000..ddeccaac --- /dev/null +++ b/website/docs/r/ndb_tag.html.markdown @@ -0,0 +1,52 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_tag" +sidebar_current: "docs-nutanix-resource-ndb-tag" +description: |- + NDB allows you to assign metadata to entities (clones, time machines, databases, and database servers) by using tags. When you are cloning a database, you can associate tags with the database that you are creating. This operation submits a request to create, update and delete tags in Nutanix database service (NDB). +--- + +# nutanix_ndb_tag + +Provides a resource to create tags based on the input parameters. + +## Example Usage + +### resource to create tag +```hcl + resource "nutanix_ndb_tag" "name" { + name= "testst-up" + description = "this is desc ok" + entity_type = "DATABASE" + required=true + } +``` + +### resource to update tag with status +```hcl + resource "nutanix_ndb_tag" "name" { + name= "testst-up" + description = "this is desc ok" + entity_type = "DATABASE" + required=true + status = "DEPRECATED" + } +``` + +## Argument Reference +* `name`: (Required) name for the tag +* `description`: (Optional) description for the tag +* `entity_type`: (Required) entity for the tag to be associated with. Supported values [ DATABASE, TIME_MACHINE, CLONE, DATABASE_SERVER ]. +* `required`: (Optional) provide a tag value for entities. + +* `status`: (Optional)Status of the tag. Supported values are [ ENABLED, DEPRECATED ] + + +## Attributes Reference +* `owner`: owner id of the tag +* `values`: value for the tag +* `date_created`: date created of the tag +* `date_modified`: modified date of tha tag + + +See detailed information in [NDB Tag](https://www.nutanix.dev/api_references/ndb/#/5d6a2dc1bc153-create-a-tag). \ No newline at end of file diff --git a/website/docs/r/ndb_time_machine_cluster.html.markdown b/website/docs/r/ndb_time_machine_cluster.html.markdown new file mode 100644 index 00000000..bc1074bc --- /dev/null +++ b/website/docs/r/ndb_time_machine_cluster.html.markdown @@ -0,0 +1,46 @@ +--- +layout: "nutanix" +page_title: "NUTANIX: nutanix_ndb_tms_cluster" +sidebar_current: "docs-nutanix-resource-ndb-tms-cluster" +description: |- + NDB multi-cluster allows you to manage time machine data availability across all the registered Nutanix clusters in NDB. This operation submits a request to add, update and delete clusters in time machine data availability for Nutanix database service (NDB). +--- + +# nutanix_ndb_tms_cluster + +Provides a resource to manage time machine data availability across all the registered Nutanix clusters in NDB. + +## Example Usage + +```hcl + resource "nutanix_ndb_tms_cluster" "cls" { + time_machine_id = "{{ tms_id }}" + nx_cluster_id = "{{ cluster_id }}" + sla_id = "{{ sla_id }}" + } +``` + +## Argument Reference + +The following arguments are supported: + +* `time_machine_id`: (Required) time machine id +* `nx_cluster_id`: (Required) Nutanix cluster id on the associated registered clusters. +* `sla_id`: (Required) SLA id for the associated cluster. + +* `type`: (Optional) Default value is "OTHER" + +## Attributes Reference + +The following attributes are exported: + +* `status`: status of the cluster associated with time machine +* `schedule_id`: schedule id of the data associated with time machine +* `owner_id`: owner id +* `source_clusters`: source clusters in time machines +* `log_drive_status`: log drive status of time machine +* `date_created`: created date of time machine associated with cluster +* `date_modified`: modified date of time machine associated with cluster +* `log_drive_id`: log drive id +* `description`: description of nutanix cluster associated with time machine +* `source`: source is present or not \ No newline at end of file diff --git a/website/nutanix.erb b/website/nutanix.erb index 6b057394..7d8689b1 100644 --- a/website/nutanix.erb +++ b/website/nutanix.erb @@ -208,6 +208,30 @@