From 488baf89d7abc74f9a5ccf03ca9e94fc5f5b6c52 Mon Sep 17 00:00:00 2001 From: graysonwu Date: Fri, 15 Sep 2023 15:00:57 -0700 Subject: [PATCH] Implement Host Transport Node Signed-off-by: graysonwu --- nsxt/provider.go | 1 + ...esource_nsxt_policy_host_transport_node.go | 308 ++++++++++++++++++ nsxt/resource_nsxt_policy_transport_zone.go | 8 +- nsxt/resource_nsxt_transport_node.go | 14 +- .../policy_host_transport_node.html.markdown | 164 ++++++++++ 5 files changed, 488 insertions(+), 7 deletions(-) create mode 100644 nsxt/resource_nsxt_policy_host_transport_node.go create mode 100644 website/docs/r/policy_host_transport_node.html.markdown diff --git a/nsxt/provider.go b/nsxt/provider.go index dfbef8b11..ce5fdcede 100644 --- a/nsxt/provider.go +++ b/nsxt/provider.go @@ -428,6 +428,7 @@ func Provider() *schema.Provider { "nsxt_transport_node": resourceNsxtTransportNode(), "nsxt_failure_domain": resourceNsxtFailureDomain(), "nsxt_cluster_virtual_ip": resourceNsxtClusterVirualIP(), + "nsxt_policy_host_transport_node": resourceNsxtPolicyHostTransportNode(), }, ConfigureFunc: providerConfigure, diff --git a/nsxt/resource_nsxt_policy_host_transport_node.go b/nsxt/resource_nsxt_policy_host_transport_node.go new file mode 100644 index 000000000..d9d647afb --- /dev/null +++ b/nsxt/resource_nsxt_policy_host_transport_node.go @@ -0,0 +1,308 @@ +/* Copyright © 2020 VMware, Inc. All Rights Reserved. + SPDX-License-Identifier: MPL-2.0 */ + +package nsxt + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/vmware/vsphere-automation-sdk-go/runtime/protocol/client" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/infra" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/infra/sites/enforcement_points" + model2 "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + "golang.org/x/exp/maps" +) + +func resourceNsxtPolicyHostTransportNode() *schema.Resource { + return &schema.Resource{ + Create: resourceNsxtPolicyHostTransportNodeCreate, + Read: resourceNsxtPolicyHostTransportNodeRead, + Update: resourceNsxtPolicyHostTransportNodeUpdate, + Delete: resourceNsxtPolicyHostTransportNodeDelete, + Importer: &schema.ResourceImporter{ + State: resourceNsxtPolicyHostTransportNodeImporter, + }, + + Schema: map[string]*schema.Schema{ + "nsx_id": getNsxIDSchema(), + "path": getPathSchema(), + "display_name": getDisplayNameSchema(), + "description": getDescriptionSchema(), + "revision": getRevisionSchema(), + "tag": getTagsSchema(), + "site_path": { + Type: schema.TypeString, + Description: "Path to the site this Host Transport Node belongs to", + Optional: true, + ForceNew: true, + Default: defaultInfraSitePath, + ValidateFunc: validatePolicyPath(), + }, + "enforcement_point": { + Type: schema.TypeString, + Description: "ID of the enforcement point this Host Transport Node belongs to", + Optional: true, + ForceNew: true, + Computed: true, + }, + "discovered_node_id": { + Type: schema.TypeString, + Optional: true, + Description: "Discovered node id to create Host Transport Node", + }, + "node_deployment_info": getFabricHostNodeSchema(), + // host_switch_spec + "standard_host_switch": getStandardHostSwitchSchema(), + "preconfigured_host_switch": getPreconfiguredHostSwitchSchema(), + }, + } +} + +func getFabricHostNodeSchema() *schema.Schema { + elemSchema := map[string]*schema.Schema{ + "fqdn": { + Type: schema.TypeString, + Computed: true, + Description: "Fully qualified domain name of the fabric node", + }, + "ip_addresses": { + Type: schema.TypeList, + Optional: true, + Description: "IP Addresses of the Node, version 4 or 6", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + } + maps.Copy(elemSchema, getHostNodeSchemaAddlElements()) + s := schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: elemSchema, + }, + } + return &s +} + +func resourceNsxtPolicyHostTransportNodeRead(d *schema.ResourceData, m interface{}) error { + connector := getPolicyConnector(m) + htnClient := enforcement_points.NewHostTransportNodesClient(connector) + + id, siteID, epID, err := policyIDSiteEPTuple(d, m) + if err != nil { + return err + } + + obj, err := htnClient.Get(siteID, epID, id) + if err != nil { + return handleReadError(d, "HostTransportNode", id, err) + } + sitePath, err := getSitePathFromChildResourcePath(*obj.ParentPath) + if err != nil { + return handleReadError(d, "HostTransportNode", id, err) + } + + d.Set("site_path", sitePath) + d.Set("enforcement_point", epID) + d.Set("display_name", obj.DisplayName) + d.Set("description", obj.Description) + setPolicyTagsInSchema(d, obj.Tags) + d.Set("nsx_id", id) + d.Set("path", obj.Path) + d.Set("revision", obj.Revision) + + err = setHostSwitchSpecInSchema(d, obj.HostSwitchSpec) + if err != nil { + return err + } + + fabricHostNode := obj.NodeDeploymentInfo + elem := make(map[string]interface{}) + elem["fqdn"] = fabricHostNode.Fqdn + elem["ip_addresses"] = fabricHostNode.IpAddresses + + elem["os_type"] = fabricHostNode.OsType + elem["os_version"] = fabricHostNode.OsVersion + elem["windows_install_location"] = fabricHostNode.WindowsInstallLocation + + d.Set("node_deployment_info", []map[string]interface{}{elem}) + + return nil +} + +func resourceNsxtPolicyHostTransportNodeExists(siteID, epID, tzID string, connector client.Connector) (bool, error) { + var err error + + // Check site existence first + siteClient := infra.NewSitesClient(connector) + _, err = siteClient.Get(siteID) + if err != nil { + msg := fmt.Sprintf("failed to read site %s", siteID) + return false, logAPIError(msg, err) + } + + // Check (ep, htn) existence. In case of ep not found, NSX returns BAD_REQUEST + htnClient := enforcement_points.NewHostTransportNodesClient(connector) + _, err = htnClient.Get(siteID, epID, tzID) + if err == nil { + return true, nil + } + + if isNotFoundError(err) { + return false, nil + } + + return false, logAPIError("Error retrieving resource", err) +} + +func getFabricHostNodeFromSchema(d *schema.ResourceData) *model2.FabricHostNode { + for _, ni := range d.Get("node_deployment_info").([]interface{}) { + nodeInfo := ni.(map[string]interface{}) + ipAddresses := interfaceListToStringList(nodeInfo["ip_addresses"].([]interface{})) + + var hostCredential *model2.HostNodeLoginCredential + for _, hci := range nodeInfo["host_credential"].([]interface{}) { + hc := hci.(map[string]interface{}) + password := hc["password"].(string) + thumbprint := hc["thumbprint"].(string) + username := hc["username"].(string) + hostCredential = &model2.HostNodeLoginCredential{ + Password: &password, + Thumbprint: &thumbprint, + Username: &username, + } + } + osType := nodeInfo["os_type"].(string) + osVersion := nodeInfo["os_version"].(string) + windowsInstallLocation := nodeInfo["windows_install_location"].(string) + + fabricHostNode := model2.FabricHostNode{ + IpAddresses: ipAddresses, + HostCredential: hostCredential, + OsType: &osType, + OsVersion: &osVersion, + WindowsInstallLocation: &windowsInstallLocation, + } + return &fabricHostNode + } + return nil +} + +func policyHostTransportNodePatch(siteID, epID, htnID string, d *schema.ResourceData, m interface{}) error { + connector := getPolicyConnector(m) + htnClient := enforcement_points.NewHostTransportNodesClient(connector) + + description := d.Get("description").(string) + displayName := d.Get("display_name").(string) + tags := getPolicyTagsFromSchema(d) + discoveredNodeID := d.Get("discovered_node_id").(string) + nodeDeploymentInfo := getFabricHostNodeFromSchema(d) + hostSwitchSpec, err := getHostSwitchSpecFromSchema(d) + if err != nil { + return fmt.Errorf("failed to create hostSwitchSpec of HostTransportNode: %v", err) + } + + obj := model2.HostTransportNode{ + Description: &description, + DisplayName: &displayName, + Tags: tags, + HostSwitchSpec: hostSwitchSpec, + NodeDeploymentInfo: nodeDeploymentInfo, + DiscoveredNodeIdForCreate: &discoveredNodeID, + } + + return htnClient.Patch(siteID, epID, htnID, obj, nil, nil, nil, nil, nil, nil, nil) +} + +func resourceNsxtPolicyHostTransportNodeCreate(d *schema.ResourceData, m interface{}) error { + connector := getPolicyConnector(m) + id := d.Get("nsx_id").(string) + if id == "" { + id = newUUID() + } + sitePath := d.Get("site_path").(string) + siteID := getResourceIDFromResourcePath(sitePath, "sites") + if siteID == "" { + return fmt.Errorf("error obtaining Site ID from site path %s", sitePath) + } + epID := d.Get("enforcement_point").(string) + if epID == "" { + epID = getPolicyEnforcementPoint(m) + } + exists, err := resourceNsxtPolicyHostTransportNodeExists(siteID, epID, id, connector) + if err != nil { + return err + } + if exists { + return fmt.Errorf("resource with ID %s already exists", id) + } + + // Create the resource using PATCH + log.Printf("[INFO] Creating HostTransportNode with ID %s under site %s enforcement point %s", id, siteID, epID) + err = policyHostTransportNodePatch(siteID, epID, id, d, m) + if err != nil { + return handleCreateError("HostTransportNode", id, err) + } + + d.SetId(id) + d.Set("nsx_id", id) + + return resourceNsxtPolicyHostTransportNodeRead(d, m) +} + +func resourceNsxtPolicyHostTransportNodeUpdate(d *schema.ResourceData, m interface{}) error { + id, siteID, epID, err := policyIDSiteEPTuple(d, m) + if err != nil { + return err + } + + log.Printf("[INFO] Updateing HostTransportNode with ID %s", id) + err = policyHostTransportNodePatch(siteID, epID, id, d, m) + if err != nil { + return handleUpdateError("HostTransportNode", id, err) + } + + return resourceNsxtPolicyHostTransportNodeRead(d, m) +} + +func resourceNsxtPolicyHostTransportNodeDelete(d *schema.ResourceData, m interface{}) error { + connector := getPolicyConnector(m) + htnClient := enforcement_points.NewHostTransportNodesClient(connector) + + id, siteID, epID, err := policyIDSiteEPTuple(d, m) + if err != nil { + return err + } + + log.Printf("[INFO] Deleting HostTransportNode with ID %s", id) + err = htnClient.Delete(siteID, epID, id, nil, nil) + if err != nil { + return handleDeleteError("HostTransportNode", id, err) + } + + return nil +} + +func resourceNsxtPolicyHostTransportNodeImporter(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + importID := d.Id() + rd, err := nsxtPolicyPathResourceImporterHelper(d, m) + if err != nil { + return rd, err + } + + epID, err := getParameterFromPolicyPath("/enforcement-points/", "/host-transport-nodes/", importID) + if err != nil { + return nil, err + } + d.Set("enforcement_point", epID) + sitePath, err := getSitePathFromChildResourcePath(importID) + if err != nil { + return rd, err + } + d.Set("site_path", sitePath) + return rd, nil +} diff --git a/nsxt/resource_nsxt_policy_transport_zone.go b/nsxt/resource_nsxt_policy_transport_zone.go index 55730d7ef..e734e6368 100644 --- a/nsxt/resource_nsxt_policy_transport_zone.go +++ b/nsxt/resource_nsxt_policy_transport_zone.go @@ -147,7 +147,7 @@ func policyTransportZonePatch(siteID, epID, tzID string, d *schema.ResourceData, return err } -func policyTransportZoneIDTuple(d *schema.ResourceData, m interface{}) (id, siteID, epID string, err error) { +func policyIDSiteEPTuple(d *schema.ResourceData, m interface{}) (id, siteID, epID string, err error) { id = d.Id() if id == "" { err = fmt.Errorf("error obtaining TransportZone ID") @@ -207,7 +207,7 @@ func resourceNsxtPolicyTransportZoneRead(d *schema.ResourceData, m interface{}) connector := getPolicyConnector(m) tzClient := enforcement_points.NewTransportZonesClient(connector) - id, siteID, epID, err := policyTransportZoneIDTuple(d, m) + id, siteID, epID, err := policyIDSiteEPTuple(d, m) if err != nil { return err } @@ -238,7 +238,7 @@ func resourceNsxtPolicyTransportZoneRead(d *schema.ResourceData, m interface{}) } func resourceNsxtPolicyTransportZoneUpdate(d *schema.ResourceData, m interface{}) error { - id, siteID, epID, err := policyTransportZoneIDTuple(d, m) + id, siteID, epID, err := policyIDSiteEPTuple(d, m) if err != nil { return err } @@ -256,7 +256,7 @@ func resourceNsxtPolicyTransportZoneDelete(d *schema.ResourceData, m interface{} connector := getPolicyConnector(m) tzClient := enforcement_points.NewTransportZonesClient(connector) - id, siteID, epID, err := policyTransportZoneIDTuple(d, m) + id, siteID, epID, err := policyIDSiteEPTuple(d, m) if err != nil { return err } diff --git a/nsxt/resource_nsxt_transport_node.go b/nsxt/resource_nsxt_transport_node.go index 516619b31..dee205438 100644 --- a/nsxt/resource_nsxt_transport_node.go +++ b/nsxt/resource_nsxt_transport_node.go @@ -489,8 +489,8 @@ func getEdgeNodeSchema() *schema.Schema { return getNodeSchema(s, false) } -func getHostNodeSchema() *schema.Schema { - s := map[string]*schema.Schema{ +func getHostNodeSchemaAddlElements() map[string]*schema.Schema { + return map[string]*schema.Schema{ "host_credential": { Type: schema.TypeList, MaxItems: 1, @@ -535,6 +535,10 @@ func getHostNodeSchema() *schema.Schema { Description: "Install location of Windows Server on baremetal being managed by NSX", }, } +} + +func getHostNodeSchema() *schema.Schema { + s := getHostNodeSchemaAddlElements() return getNodeSchema(s, false) } @@ -1393,6 +1397,10 @@ func getHostSwitchSpecFromSchema(d *schema.ResourceData) (*data.StructValue, err return nil, fmt.Errorf("error parsing HostSwitchSpec schema %v", err) } portGroupTZID := swData["portgroup_transport_zone_id"].(string) + portGroupTZIDPtr := &portGroupTZID + if portGroupTZID == "" { + portGroupTZIDPtr = nil + } transportNodeSubProfileCfg := getTransportNodeSubProfileCfg(swData["transport_node_profile_sub_configs"]) transportZoneEndpoints := getTransportZoneEndpointsFromSchema(swData["transport_zone_endpoint"].([]interface{})) uplinks := getUplinksFromSchema(swData["uplink"].([]interface{})) @@ -1406,7 +1414,7 @@ func getHostSwitchSpecFromSchema(d *schema.ResourceData) (*data.StructValue, err IpAssignmentSpec: iPAssignmentSpec, IsMigratePnics: &isMigratePNics, Pnics: pNics, - PortgroupTransportZoneId: &portGroupTZID, + PortgroupTransportZoneId: portGroupTZIDPtr, TransportNodeProfileSubConfigs: transportNodeSubProfileCfg, TransportZoneEndpoints: transportZoneEndpoints, Uplinks: uplinks, diff --git a/website/docs/r/policy_host_transport_node.html.markdown b/website/docs/r/policy_host_transport_node.html.markdown new file mode 100644 index 000000000..0a2ae3dd3 --- /dev/null +++ b/website/docs/r/policy_host_transport_node.html.markdown @@ -0,0 +1,164 @@ +--- +subcategory: "Fabric" +layout: "nsxt" +page_title: "NSXT: nsxt_policy_host_transport_node" +description: A resource to configure a HostTransportNode. +--- + +# nsxt_policy_host_transport_node + +This resource provides a method for the management of a HostTransportNode. +This resource is supported with NSX 4.1.0 onwards. + +## Example Usage + +```hcl +resource "nsxt_policy_host_transport_node" "test" { + description = "Terraform-deployed host transport node" + display_name = "tf_host_transport_node" + site_path = "/infra/sites/default" + enforcement_point = "default" + + node_deployment_info { + ip_addresses = ["10.168.186.150"] + + os_type = "ESXI" + os_version = "7.0.3" + + host_credential { + username = "user1" + password = "password1" + thumbprint = "thumbprint1" + } + } + + standard_host_switch { + host_switch_mode = "STANDARD" + host_switch_type = "NVDS" + host_switch_profile_id = ["/infra/host-switch-profiles/2812c1e9-00e6-471a-9678-873c101d8f85"] + + ip_assignment { + assigned_by_dhcp = true + } + + transport_zone_endpoint { + transport_zone_id = "/infra/sites/default/enforcement-points/default/transport-zones/1b3a2f36-bfd1-443e-a0f6-4de01abc963e" + } + + pnic { + device_name = "fp-eth0" + uplink_name = "uplink1" + } + } + + tag { + scope = "app" + tag = "web" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `display_name` - (Required) Display name of the resource. +* `description` - (Optional) Description of the resource. +* `tag` - (Optional) A list of scope + tag pairs to associate with this resource. +* `site_path` - (Optional) The path of the site which the Host Transport Node belongs to. `path` field of the existing `nsxt_policy_site` can be used here. +* `enforcement_point` - (Optional) The ID of enforcement point under given `site_path` to manage the Host Transport Node. +* `discovered_node_id` - (Optional) Discovered node id to create Host Transport Node. Specify discovered node id to create Host Transport Node for Discovered Node. This field is required during Host Transport Node create from vCenter server managing the ESXi type HostNode. No need to provide node_deployment_info while creating Host Transport Node for Discovered Node. If both node_deployment_info and discovered_node_id_for_create provided during Host TransportNode create payload then it will create Host Transport Node from the discovered node id provided in this field. +* `node_deployment_info` - (Optional) +* `fqdn` - (Optional) Fully qualified domain name of the fabric node. +* `ip_addresses` - (Required) IP Addresses of the Node, version 4 or 6. +* `host_credential` - (Optional) Host login credentials. + * `password` - (Required) The authentication password of the host node. + * `thumbprint` - (Optional) ESXi thumbprint or SSH key fingerprint of the host node. + * `username` - (Required) The username of the account on the host node. +* `os_type` - (Required) Hypervisor OS type. Accepted values - 'ESXI', 'RHELSERVER', 'WINDOWSSERVER', 'RHELCONTAINER', 'UBUNTUSERVER', 'HYPERV', 'CENTOSSERVER', 'CENTOSCONTAINER', 'SLESSERVER' or 'OELSERVER'. +* `os_version` - (Optional) Hypervisor OS version. +* `windows_install_location` - (Optional) Install location of Windows Server on baremetal being managed by NSX. Defaults to 'C:\Program Files\VMware\NSX\'. +* `standard_host_switch` - (Optional) Standard host switch specification. + * `cpu_config` - (Optional) Enhanced Networking Stack enabled HostSwitch CPU configuration. + * `num_lcores` - (Required) Number of Logical cpu cores (Lcores) to be placed on a specified NUMA node. + * `numa_node_index` - (Required) Unique index of the Non Uniform Memory Access (NUMA) node. + * `host_switch_id` - (Optional) The host switch id. This ID will be used to reference a host switch. + * `host_switch_mode` - (Optional) Operational mode of a HostSwitch. Accepted values - 'STANDARD', 'ENS', 'ENS_INTERRUPT' or 'LEGACY'. The default value is 'STANDARD'. + * `host_switch_profile_id` - (Optional) Identifiers of host switch profiles to be associated with this host switch. + * `host_switch_type` - (Optional) Type of HostSwitch. Accepted values - 'NVDS' or 'VDS'. The default value is 'NVDS'. + * `ip_assignment` - (Required) - Specification for IPs to be used with host switch virtual tunnel endpoints. Should contain exatly one of the below: + * `assigned_by_dhcp` - (Optional) Enables DHCP assignment. Should be set to true. + * `static_ip` - (Optional) IP assignment specification for Static IP List. + * `ip_addresses` - (Required) List of IPs for transport node host switch virtual tunnel endpoints. + * `subnet_mask` - (Required) Subnet mask. + * `default_gateway` - (Required) Gateway IP. + * `static_ip_mac` - (Optional) IP and MAC assignment specification for Static IP List. + * `default_gateway` - (Required) Gateway IP. + * `subnet_mask` - (Required) Subnet mask. + * `ip_mac_pair` - (Required) List of IPs and MACs for transport node host switch virtual tunnel endpoints. + * `ip` - (Required) IP address. + * `mac` - (Required) MAC address. + * `static_ip_pool_id` - (Optional) IP assignment specification for Static IP Pool. + * `is_migrate_pnics` - (Optional) Migrate any pnics which are in use. + * `pnic` - (Optional) Physical NICs connected to the host switch. + * `device_name` - (Required) Device name or key. + * `uplink_name` - (Required) Uplink name for this Pnic. + * `portgroup_transport_zone_id` - (Optional) Transport Zone ID representing the DVS used in NSX on DVPG. + * `transport_node_profile_sub_config` - (Optional) Transport Node Profile sub-configuration Options. + * `host_switch_config_option` - (Required) Subset of the host switch configuration. + * `host_switch_id` - (Optional) The host switch id. This ID will be used to reference a host switch. + * `host_switch_profile_id` - (Optional) Identifiers of host switch profiles to be associated with this host switch. + * `ip_assignment` - (Required) - Specification for IPs to be used with host switch virtual tunnel endpoints. Should contain exatly one of the below: + * `assigned_by_dhcp` - (Optional) Enables DHCP assignment. Should be set to true. + * `static_ip` - (Optional) IP assignment specification for Static IP List. + * `ip_addresses` - (Required) List of IPs for transport node host switch virtual tunnel endpoints. + * `subnet_mask` - (Required) Subnet mask. + * `default_gateway` - (Required) Gateway IP. + * `static_ip_mac` - (Optional) IP and MAC assignment specification for Static IP List. + * `default_gateway` - (Required) Gateway IP. + * `subnet_mask` - (Required) Subnet mask. + * `ip_mac_pair` - (Required) List of IPs and MACs for transport node host switch virtual tunnel endpoints. + * `ip` - (Required) IP address. + * `mac` - (Required) MAC address. + * `static_ip_pool_id` - (Optional) IP assignment specification for Static IP Pool. + * `uplink` - (Optional) Uplink/LAG of VMware vSphere Distributed Switch connected to the HostSwitch. + * `uplink_name` - (Required) Uplink name from UplinkHostSwitch profile. + * `vds_lag_name` - (Optional) Link Aggregation Group (LAG) name of Virtual Distributed Switch. + * `vds_uplink_name` - (Optional) Uplink name of VMware vSphere Distributed Switch (VDS). + * `name` - (Required) Name of the transport node profile config option. + * `transport_zone_endpoint` - (Optional) Transport zone endpoints + * `transport_zone_id` - (Required) Unique ID identifying the transport zone for this endpoint. + * `transport_zone_profile_id` - (Optional) Identifiers of the transport zone profiles associated with this transport zone endpoint on this transport node. + * `uplink` - (Optional) Uplink/LAG of VMware vSphere Distributed Switch connected to the HostSwitch. + * `uplink_name` - (Required) Uplink name from UplinkHostSwitch profile. + * `vds_lag_name` - (Optional) Link Aggregation Group (LAG) name of Virtual Distributed Switch. + * `vds_uplink_name` - (Optional) Uplink name of VMware vSphere Distributed Switch (VDS). + * `vmk_install_migration` - (Optional) The vmknic and logical switch mappings. + * `destination_network` - (Required) The network id to which the ESX vmk interface will be migrated. + * `device_name` - (Required) ESX vmk interface name. +* `preconfigured_host_switch` - (Optional) Preconfigured host switch. + * `endpoint` - (Optional) Name of the virtual tunnel endpoint which is preconfigured on this host switch. + * `host_switch_id` - (Required) External Id of the preconfigured host switch. + * `transport_zone_endpoint` - (Optional) Transport zone endpoints + * `transport_zone_id` - (Required) Unique ID identifying the transport zone for this endpoint. + * `transport_zone_profile_id` - (Optional) Identifiers of the transport zone profiles associated with this transport zone endpoint on this transport node. + +**NOTE:** Resource should contain either `standard_host_switch` or `preconfigured_host_switch` + +## Attributes Reference + +In addition to arguments listed above, the following attributes are exported: + +* `id` - ID of the resource. +* `revision` - Indicates current revision number of the object as seen by NSX-T API server. This attribute can be useful for debugging. + +## Importing + +An existing Transport Node can be [imported][docs-import] into this resource, via the following command: + +[docs-import]: https://www.terraform.io/cli/import + +``` +terraform import nsxt_policy_host_transport_node.test POLICY_PATH +``` +The above command imports Host Transport Node named `test` with the policy path `POLICY_PATH`.