From ffd784d6521b67fcbeda11c9f2857a484978b56d Mon Sep 17 00:00:00 2001 From: ShiChangkuo Date: Tue, 3 Nov 2020 14:45:05 +0800 Subject: [PATCH] support to log into mrs cluster node with password --- docs/resources/mrs_cluster.md | 230 +++++++++--------- go.mod | 2 +- go.sum | 2 + .../resource_huaweicloud_mrs_cluster_v1.go | 101 ++++---- .../openstack/mrs/v1/cluster/requests.go | 18 +- vendor/modules.txt | 2 +- 6 files changed, 180 insertions(+), 175 deletions(-) diff --git a/docs/resources/mrs_cluster.md b/docs/resources/mrs_cluster.md index f4d82fba84..254015f871 100644 --- a/docs/resources/mrs_cluster.md +++ b/docs/resources/mrs_cluster.md @@ -7,7 +7,7 @@ subcategory: "MapReduce Service (MRS)" Manages resource cluster within HuaweiCloud MRS. This is an alternative to `huaweicloud_mrs_cluster_v1` -## Example Usage: Creating a MRS cluster +## Example Usage: Creating an MRS cluster ```hcl resource "huaweicloud_mrs_cluster" "cluster1" { @@ -26,8 +26,8 @@ resource "huaweicloud_mrs_cluster" "cluster1" { safe_mode = 0 cluster_admin_secret = var.admin_secret node_public_cert_name = var.keypair - vpc_id = "51edfb75-f9f0-4bbc-b4dc-21466b93f60d" - subnet_id = "1d7a8646-43ee-455a-a3ab-40da87a1304c" + vpc_id = var.vpc_id + subnet_id = var.subnet_id component_list { component_name = "Hadoop" @@ -45,133 +45,129 @@ resource "huaweicloud_mrs_cluster" "cluster1" { The following arguments are supported: -* `region` - (Optional) The region in which to obtain the msr cluster resource. If omitted, the provider-level region will work as default. Changing this creates a new msr cluster resource. +* `region` - (Optional) The region in which to obtain the msr cluster resource. + If omitted, the provider-level region will work as default. + Changing this creates a new msr cluster resource. * `billing_type` - (Required) The value is 12, indicating on-demand payment. -* `region` - (Required) Cluster region information. Obtain the value from - Regions and Endpoints. - * `cluster_name` - (Required) Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). * `cluster_version` - (Optional) Version of the clusters. Currently, MRS 1.8.10, MRS 1.9.2 and MRS 2.1.0 are supported. -* `cluster_type` - (Optional) Type of clusters 0: analysis cluster 1: streaming - cluster The default value is 0. +* `cluster_type` - (Optional) Type of clusters. 0: analysis cluster 1: streaming + cluster. The default value is 0. -* `master_node_num` - (Required) Number of Master nodes The value is 2. +* `master_node_num` - (Required) Number of Master nodes. Set this parameter to 2 to enable cluster HA, + set this parameter to 1 to enable cluster HA. * `master_node_size` - (Required) Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space. MRS supports instance specifications detailed in [MRS specifications](https://support.huaweicloud.com/en-us/api-mrs/mrs_01_9006.html) -* `core_node_num` - (Required) Number of Core nodes Value range: 3 to 100 A - maximum of 100 Core nodes are supported by default. If more than 100 Core nodes - are required, contact technical support engineers or invoke background APIs - to modify the database. +* `core_node_num` - (Required) Number of Core nodes. Value range: 1 to 500. * `core_node_size` - (Required) Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size. -* `available_zone_id` - (Required) ID of an available zone. Obtain the value - from Regions and Endpoints. - North China AZ1 (cn-north-1a): ae04cf9d61544df3806a3feeb401b204, - North China AZ2 (cn-north-1b): d573142f24894ef3bd3664de068b44b0, - East China AZ1 (cn-east-2a): 72d50cedc49846b9b42c21495f38d81c, - East China AZ2 (cn-east-2b): 38b0f7a602344246bcb0da47b5d548e7, - East China AZ3 (cn-east-2c): 5547fd6bf8f84bb5a7f9db062ad3d015, - South China AZ1(cn-south-1a): 34f5ff4865cf4ed6b270f15382ebdec5, - South China AZ2(cn-south-2b): 043c7e39ecb347a08dc8fcb6c35a274e, - South China AZ3(cn-south-1c): af1687643e8c4ec1b34b688e4e3b8901, - -* `vpc_id` - (Required) ID of the VPC where the subnet locates Obtain the VPC - ID from the management console as follows: Register an account and log in to - the management console. Click Virtual Private Cloud and select Virtual Private - Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC - ID from the list. - -* `subnet_id` - (Required) Subnet ID Obtain the subnet ID from the management - console as follows: Register an account and log in to the management console. - Click Virtual Private Cloud and select Virtual Private Cloud from the left list. - On the Virtual Private Cloud page, obtain the subnet ID from the list. - -* `volume_type` - (Required) Type of disks SATA and SSD are supported. SATA: - common I/O SSD: super high-speed I/O - -* `volume_size` - (Required) Data disk storage space of a Core node Users can - add disks to expand storage capacity when creating a cluster. There are the - following scenarios: Separation of data storage and computing: Data is stored - in the OBS system. Costs of clusters are relatively low but computing performance - is poor. The clusters can be deleted at any time. It is recommended when data - computing is not frequently performed. Integration of data storage and computing: - Data is stored in the HDFS system. Costs of clusters are relatively high but - computing performance is good. The clusters cannot be deleted in a short term. - It is recommended when data computing is frequently performed. Value range: - 100 GB to 32000 GB - -* `node_public_cert_name` - (Required) Name of a key pair You can use a key +* `available_zone_id` - (Required) ID of an available zone. The value as follows: + + CN North-Beijing1 AZ1 (cn-north-1a): ae04cf9d61544df3806a3feeb401b204
+ CN North-Beijing1 AZ2 (cn-north-1b): d573142f24894ef3bd3664de068b44b0
+ CN North-Beijing4 AZ1 (cn-north-4a): effdcbc7d4d64a02aa1fa26b42f56533
+ CN North-Beijing4 AZ2 (cn-north-4b): a0865121f83b41cbafce65930a22a6e8
+ CN North-Beijing4 AZ3 (cn-north-4c): 2dcb154ac2724a6d92e9bcc859657c1e
+ CN East-Shanghai1 AZ1 (cn-east-3a): e7afd64502d64fe3bfb60c2c82ec0ec6
+ CN East-Shanghai1 AZ2 (cn-east-3b): d90ff6d692954373bf53be49cf3900cb
+ CN East-Shanghai1 AZ3 (cn-east-3c): 2dafb4c708da4d509d0ad24864ae1c6d
+ CN East-Shanghai2 AZ1 (cn-east-2a): 72d50cedc49846b9b42c21495f38d81c
+ CN East-Shanghai2 AZ2 (cn-east-2b): 38b0f7a602344246bcb0da47b5d548e7
+ CN East-Shanghai2 AZ3 (cn-east-2c): 5547fd6bf8f84bb5a7f9db062ad3d015
+ CN South-Guangzhou AZ1 (cn-south-1a): 34f5ff4865cf4ed6b270f15382ebdec5
+ CN South-Guangzhou AZ2 (cn-south-2b): 043c7e39ecb347a08dc8fcb6c35a274e
+ CN South-Guangzhou AZ3 (cn-south-1c): af1687643e8c4ec1b34b688e4e3b8901
+ +* `vpc_id` - (Required) Specifies the VPC ID. Changing this parameter will create a new resource. + +* `subnet_id` - (Required) Specifies the network id of a subnet. Changing this parameter will create a new resource. + +* `volume_type` - (Required) Type of disks SATA and SSD are supported. SATA: common I/O; + SSD: super high-speed I/O. + +* `volume_size` - (Required) Data disk storage space of a Core node. Value range: 100 GB to 32000 GB + +* `safe_mode` - (Required) running mode of an MRS cluster. + - 0: indicates that the Kerberos authentication is disabled. + Users can use all functions provided by the cluster. + - 1: indicates that the Kerberos authentication is enabled. + Common users cannot use the file management or job management functions of an MRS cluster + and cannot view cluster resource usage or the job records of Hadoop and Spark. + To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator. + +* `cluster_admin_secret` - (Required) Indicates the password of the MRS Manager administrator. + This parameter must meet the following requirements: + - Must contain 8 to 26 characters. + - Must contain at least three of the following: uppercase letters, lowercase letters, + digits, and special characters: `~!@#$%^&*()-_=+\|[{}];:'",<.>/? and space. + - Cannot be the username or the username spelled backwards. + +* `node_password` - (Optional) Password of user **root** for logging in to a cluster node. + This parameter and `node_public_cert_name` are alternative. A password must meet the following requirements: + - Must be 8 to 26 characters. + - Must contain at least three of the following: uppercase letters, lowercase letters, + digits, and special characters (!@$%^-_=+[{}]:,./?), but must not contain spaces. + - Cannot be the username or the username spelled backwards. + +* `node_public_cert_name` - (Optional) Name of a key pair. You can use a key to log in to the Master node in the cluster. - -* `safe_mode` - (Required) MRS cluster running mode 0: common mode The value - indicates that the Kerberos authentication is disabled. Users can use all functions - provided by the cluster. 1: safe mode The value indicates that the Kerberos - authentication is enabled. Common users cannot use the file management or job - management functions of an MRS cluster and cannot view cluster resource usage - or the job records of Hadoop and Spark. To use these functions, the users must - obtain the relevant permissions from the MRS Manager administrator. The request - has the cluster_admin_secret parameter only when safe_mode is set to 1. - -* `cluster_admin_secret` - (Optional) Indicates the password of the MRS Manager - administrator. The password for MRS 1.5.0: Must contain 6 to 32 characters. - Must contain at least two types of the following: Lowercase letters Uppercase - letters Digits Special characters of `~!@#$%^&*()-_=+\|[{}];:'",<.>/? Spaces - Must be different from the username. Must be different from the username written - in reverse order. The password for MRS 1.3.0: Must contain 8 to 64 characters. - Must contain at least four types of the following: Lowercase letters Uppercase - letters Digits Special characters of `~!@#$%^&*()-_=+\|[{}];:'",<.>/? Spaces - Must be different from the username. Must be different from the username written - in reverse order. This parameter needs to be configured only when safe_mode - is set to 1. + This parameter and `node_password` are alternative. * `log_collection` - (Optional) Indicates whether logs are collected when cluster - installation fails. 0: not collected 1: collected The default value is 0. If - log_collection is set to 1, OBS buckets will be created to collect the MRS logs. - These buckets will be charged. + installation fails. 0: not collected; 1: collected. The default value is 1, + indicating that OBS buckets will be created and only used to collect logs that + record MRS cluster creation failures. -* `component_list` - (Required) Service component list. +* `component_list` - (Required) List of service components to be installed. + Structure is documented below. -* `add_jobs` - (Optional) You can submit a job when you create a cluster to - save time and use MRS easily. Only one job can be added. +* `add_jobs` - (Optional) Jobs can be submitted when a cluster is created. + Currently, only one job can be created. Structure is documented below. * `tags` - (Optional) The key/value pairs to associate with the cluster. The `component_list` block supports: -* `component_name` - (Required) Component name Currently, Hadoop, Spark, HBase, - Hive, Hue, Loader, Flume, Kafka and Storm are supported. +* `component_name` - (Required) Component name. + - MRS 2.1.0 supports: Presto, Hadoop, Spark, HBase, Hive, Tez, Hue, Loader, Flink, Impala, Kudu, Flume, Kafka, and Storm; + - MRS 1.9.2 supports: Presto, Hadoop, Spark, HBase, OpenTSDB, Hive, Hue, Loader, Tez, Flink, Alluxio, Ranger, Flume, Kafka, KafkaManager, and Storm; + - MRS 1.8.10 supports: Presto, Hadoop, Spark, HBase, OpenTSDB, Hive, Hue, Loader, Flink, Flume, Kafka, KafkaManager, and Storm; The `add_jobs` block supports: -* `job_type` - (Required) Job type 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL - (not supported currently) 5: DistCp, importing and exporting data (not supported - in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements - (not supported in this API currently). NOTE: Spark and Hive jobs can be added - to only clusters including Spark and Hive components. +* `job_type` - (Required) Job type code. 1: MapReduce; 2: Spark; 3: Hive Script; 4: HiveQL + (not supported currently); 5: DistCp, importing and exporting data (not supported + currently); 6: Spark Script; 7: Spark SQL, submitting Spark SQL statements + (not supported currently). + NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components. -* `job_name` - (Required) Job name It contains only 1 to 64 letters, digits, - hyphens (-), and underscores (_). NOTE: Identical job names are allowed but - not recommended. +* `job_name` - (Required) Job name. It contains 1 to 64 characters. Only letters, digits, + hyphens (-), and underscores (_) are allowed. + NOTE: Identical job names are allowed but not recommended. * `jar_path` - (Required) Path of the .jar file or .sql file for program execution - The parameter must meet the following requirements: Contains a maximum of 1023 - characters, excluding special characters such as ;|&><'$. The address cannot - be empty or full of spaces. Starts with / or s3a://. Spark Script must end with - .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive. - -* `arguments` - (Optional) Key parameter for program execution The parameter + The parameter must meet the following requirements: + - Contains a maximum of 1,023 characters, excluding special characters such as ;|&><'$. + The parameter value cannot be empty or full of spaces. + - Files can be stored in HDFS or OBS. The path varies depending on the file system. + OBS: The path must start with s3a://. Files or programs encrypted by KMS are not supported. + HDFS: The path starts with a slash (/). + - Spark Script must end with .sql while MapReduce and Spark Jar must end with .jar. + sql and jar are case-insensitive. + +* `arguments` - (Optional) Key parameter for program execution. The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty. @@ -191,15 +187,14 @@ The `add_jobs` block supports: ;|&>'<$, and can be empty. * `shutdown_cluster` - (Optional) Whether to delete the cluster after the jobs - are complete true: Yes false: No + are complete. -* `file_action` - (Optional) Data import and export import export +* `file_action` - (Optional) Data import and export. Valid values include: import, export. -* `submit_job_once_cluster_run` - (Required) true: A job is submitted when a - cluster is created. false: A job is submitted separately. The parameter is set - to true in this example. +* `submit_job_once_cluster_run` - (Required) Whether the job is submitted during the cluster + creation or after the cluster is created. -* `hql` - (Optional) HiveQL statement +* `hql` - (Optional) HiveQL statement. * `hive_script_path` - (Optional) SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: @@ -211,30 +206,31 @@ The `add_jobs` block supports: The following attributes are exported: +* `id` - Indicates the MRS cluster ID. +* `region` - See Argument Reference above. +* `available_zone_id` - See Argument Reference above. +* `available_zone_name` - Indicates the name of an availability zone. * `billing_type` - See Argument Reference above. -* `data_center` - See Argument Reference above. +* `cluster_name` - See Argument Reference above. +* `cluster_version` - See Argument Reference above. +* `cluster_type` - See Argument Reference above. * `master_node_num` - See Argument Reference above. * `master_node_size` - See Argument Reference above. * `core_node_num` - See Argument Reference above. * `core_node_size` - See Argument Reference above. -* `available_zone_id` - See Argument Reference above. -* `cluster_name` - See Argument Reference above. -* `vpc_id` - See Argument Reference above. -* `subnet_id` - See Argument Reference above. -* `cluster_version` - See Argument Reference above. -* `cluster_type` - See Argument Reference above. * `volume_type` - See Argument Reference above. * `volume_size` - See Argument Reference above. -* `node_public_cert_name` - See Argument Reference above. * `safe_mode` - See Argument Reference above. * `cluster_admin_secret` - See Argument Reference above. +* `node_password` - See Argument Reference above. +* `node_public_cert_name` - See Argument Reference above. * `log_collection` - See Argument Reference above. +* `vpc_id` - See Argument Reference above. +* `subnet_id` - See Argument Reference above. * `component_list` - See Argument Reference below. * `add_jobs` - See Argument Reference above. * `tags` - See Argument Reference above. * `order_id` - Order ID for creating clusters. -* `cluster_id` - Cluster ID. -* `available_zone_name` - Name of an availability zone. * `instance_id` - Instance ID. * `hadoop_version` - Hadoop version. * `master_node_ip` - IP address of a Master node. @@ -252,16 +248,16 @@ The following attributes are exported: * `vnc` - URI address for remote login of the elastic cloud server. * `fee` - Cluster creation fee, which is automatically calculated. * `deployment_id` - Deployment ID of a cluster. -* `cluster_state` - Cluster status Valid values include: existing history starting - running terminated failed abnormal terminating rebooting shutdown frozen scaling-out - scaling-in scaling-error. -* `tenant_id` - Project ID. +* `cluster_state` - Cluster status. Valid values include: starting, running, terminating, terminated, + failed, abnormal, frozen, scaling-out, scaling-in. * `create_at` - Cluster creation time. * `update_at` - Cluster update time. * `error_info` - Error information. * `charging_start_time` - Time when charging starts. * `remark` - Remarks of a cluster. -The component_list attributes: -* `component_name` - (Required) Component name Currently, Hadoop, Spark, HBase, - Hive, Hue, Loader, Flume, Kafka and Storm are supported. +The `component_list` attributes supports: +* `component_id` - Indicates the component ID. +* `component_name` - Indicates the component name. +* `component_version` - Indicates the component version. +* `component_desc` - Indicates the component description. diff --git a/go.mod b/go.mod index db7955e469..979534c344 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/terraform-plugin-sdk v1.13.0 - github.com/huaweicloud/golangsdk v0.0.0-20201030072716-cb31520416ba + github.com/huaweicloud/golangsdk v0.0.0-20201103063720-d2a60b0922dd github.com/jen20/awspolicyequivalence v0.0.0-20170831201602-3d48364a137a github.com/mitchellh/go-homedir v1.1.0 github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa // indirect diff --git a/go.sum b/go.sum index 457434e6d6..127ded44bd 100644 --- a/go.sum +++ b/go.sum @@ -131,6 +131,8 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/huaweicloud/golangsdk v0.0.0-20201030072716-cb31520416ba h1:HYHfyOIFOG8Y3y1XZoiyFhsTUxkg56IxPn+D9ME60pE= github.com/huaweicloud/golangsdk v0.0.0-20201030072716-cb31520416ba/go.mod h1:fcOI5u+0f62JtJd7zkCch/Z57BNC6bhqb32TKuiF4r0= +github.com/huaweicloud/golangsdk v0.0.0-20201103063720-d2a60b0922dd h1:RBbFW2weIB9TXSLu2nAqtW13T86TafHZxEOglqRbHc4= +github.com/huaweicloud/golangsdk v0.0.0-20201103063720-d2a60b0922dd/go.mod h1:fcOI5u+0f62JtJd7zkCch/Z57BNC6bhqb32TKuiF4r0= github.com/jen20/awspolicyequivalence v0.0.0-20170831201602-3d48364a137a h1:FyS/ubzBR5xJlnJGRTwe7GUHpJOR4ukYK3y+LFNffuA= github.com/jen20/awspolicyequivalence v0.0.0-20170831201602-3d48364a137a/go.mod h1:uoIMjNxUfXi48Ci40IXkPRbghZ1vbti6v9LCbNqRgHY= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= diff --git a/huaweicloud/resource_huaweicloud_mrs_cluster_v1.go b/huaweicloud/resource_huaweicloud_mrs_cluster_v1.go index edec46280c..508f6242c6 100644 --- a/huaweicloud/resource_huaweicloud_mrs_cluster_v1.go +++ b/huaweicloud/resource_huaweicloud_mrs_cluster_v1.go @@ -66,9 +66,10 @@ func resourceMRSClusterV1() *schema.Resource { ForceNew: true, }, "master_node_num": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 2), }, "master_node_size": { Type: schema.TypeString, @@ -78,8 +79,8 @@ func resourceMRSClusterV1() *schema.Resource { "core_node_num": { Type: schema.TypeInt, Required: true, - ValidateFunc: validation.IntBetween(1, 500), ForceNew: true, + ValidateFunc: validation.IntBetween(1, 500), }, "core_node_size": { Type: schema.TypeString, @@ -117,15 +118,25 @@ func resourceMRSClusterV1() *schema.Resource { ForceNew: true, }, "cluster_admin_secret": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + Sensitive: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(8, 26), + }, + "node_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ForceNew: true, + ExactlyOneOf: []string{"node_public_cert_name"}, + ValidateFunc: validation.StringLenBetween(8, 26), }, "node_public_cert_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"node_password"}, }, "log_collection": { Type: schema.TypeInt, @@ -241,10 +252,6 @@ func resourceMRSClusterV1() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "cluster_id": { - Type: schema.TypeString, - Computed: true, - }, "available_zone_name": { Type: schema.TypeString, Computed: true, @@ -329,10 +336,6 @@ func resourceMRSClusterV1() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "tenant_id": { - Type: schema.TypeString, - Computed: true, - }, "update_at": { Type: schema.TypeString, Computed: true, @@ -411,11 +414,13 @@ func ClusterStateRefreshFunc(client *golangsdk.ServiceClient, clusterID string) func resourceClusterV1Create(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - client, err := config.MrsV1Client(GetRegion(d, config)) + region := GetRegion(d, config) + + client, err := config.MrsV1Client(region) if err != nil { return fmt.Errorf("Error creating HuaweiCloud MRS client: %s", err) } - vpcClient, err := config.NetworkingV1Client(GetRegion(d, config)) + vpcClient, err := config.NetworkingV1Client(region) if err != nil { return fmt.Errorf("Error creating HuaweiCloud Vpc client: %s", err) } @@ -431,30 +436,36 @@ func resourceClusterV1Create(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error retrieving HuaweiCloud Subnet: %s", err) } + loginMode := 0 + if _, ok := d.GetOk("node_public_cert_name"); ok { + loginMode = 1 + } + createOpts := &cluster.CreateOpts{ - BillingType: d.Get("billing_type").(int), - DataCenter: d.Get("region").(string), - MasterNodeNum: d.Get("master_node_num").(int), - MasterNodeSize: d.Get("master_node_size").(string), - CoreNodeNum: d.Get("core_node_num").(int), - CoreNodeSize: d.Get("core_node_size").(string), - AvailableZoneID: d.Get("available_zone_id").(string), - ClusterName: d.Get("cluster_name").(string), - ClusterVersion: d.Get("cluster_version").(string), - ClusterType: d.Get("cluster_type").(int), - VpcID: d.Get("vpc_id").(string), - SubnetID: d.Get("subnet_id").(string), - Vpc: vpc.Name, - SubnetName: subnet.Name, - VolumeType: d.Get("volume_type").(string), - VolumeSize: d.Get("volume_size").(int), - SafeMode: d.Get("safe_mode").(int), - LoginMode: 1, - NodePublicCertName: d.Get("node_public_cert_name").(string), - ClusterAdminSecret: d.Get("cluster_admin_secret").(string), - LogCollection: d.Get("log_collection").(int), - ComponentList: getAllClusterComponents(d), - AddJobs: getAllClusterJobs(d), + DataCenter: region, + BillingType: d.Get("billing_type").(int), + MasterNodeNum: d.Get("master_node_num").(int), + MasterNodeSize: d.Get("master_node_size").(string), + CoreNodeNum: d.Get("core_node_num").(int), + CoreNodeSize: d.Get("core_node_size").(string), + AvailableZoneID: d.Get("available_zone_id").(string), + ClusterName: d.Get("cluster_name").(string), + ClusterVersion: d.Get("cluster_version").(string), + ClusterType: d.Get("cluster_type").(int), + VpcID: d.Get("vpc_id").(string), + SubnetID: d.Get("subnet_id").(string), + Vpc: vpc.Name, + SubnetName: subnet.Name, + VolumeType: d.Get("volume_type").(string), + VolumeSize: d.Get("volume_size").(int), + SafeMode: d.Get("safe_mode").(int), + LoginMode: loginMode, + NodePublicCertName: d.Get("node_public_cert_name").(string), + ClusterMasterSecret: d.Get("node_password").(string), + ClusterAdminSecret: d.Get("cluster_admin_secret").(string), + LogCollection: d.Get("log_collection").(int), + ComponentList: getAllClusterComponents(d), + AddJobs: getAllClusterJobs(d), } log.Printf("[DEBUG] Create Options: %#v", createOpts) @@ -514,7 +525,6 @@ func resourceClusterV1Read(d *schema.ResourceData, meta interface{}) error { d.SetId(clusterGet.Clusterid) d.Set("region", GetRegion(d, config)) d.Set("order_id", clusterGet.Orderid) - d.Set("cluster_id", clusterGet.Clusterid) d.Set("available_zone_name", clusterGet.Azname) d.Set("available_zone_id", clusterGet.Azid) d.Set("cluster_name", clusterGet.Clustername) @@ -563,7 +573,6 @@ func resourceClusterV1Read(d *schema.ResourceData, meta interface{}) error { d.Set("cluster_state", clusterGet.Clusterstate) d.Set("error_info", clusterGet.Errorinfo) d.Set("remark", clusterGet.Remark) - d.Set("tenant_id", clusterGet.Tenantid) updateAt, err := strconv.ParseInt(clusterGet.Updateat, 10, 64) if err != nil { diff --git a/vendor/github.com/huaweicloud/golangsdk/openstack/mrs/v1/cluster/requests.go b/vendor/github.com/huaweicloud/golangsdk/openstack/mrs/v1/cluster/requests.go index a1e7ed4d93..438bd9e9c9 100644 --- a/vendor/github.com/huaweicloud/golangsdk/openstack/mrs/v1/cluster/requests.go +++ b/vendor/github.com/huaweicloud/golangsdk/openstack/mrs/v1/cluster/requests.go @@ -1,12 +1,8 @@ package cluster -import ( - "log" +import "github.com/huaweicloud/golangsdk" - "github.com/huaweicloud/golangsdk" -) - -var RequestOpts golangsdk.RequestOpts = golangsdk.RequestOpts{ +var requestOpts golangsdk.RequestOpts = golangsdk.RequestOpts{ MoreHeaders: map[string]string{"Content-Type": "application/json", "X-Language": "en-us"}, } @@ -88,7 +84,7 @@ func Create(c *golangsdk.ServiceClient, opts CreateOptsBuilder) (r CreateResult) r.Err = err return } - log.Printf("[DEBUG] create url:%q, body=%#v", createURL(c), b) + reqOpt := &golangsdk.RequestOpts{OkCodes: []int{200}} _, r.Err = c.Post(createURL(c), b, &r.Body, reqOpt) return @@ -97,14 +93,16 @@ func Create(c *golangsdk.ServiceClient, opts CreateOptsBuilder) (r CreateResult) func Get(c *golangsdk.ServiceClient, id string) (r GetResult) { _, r.Err = c.Get(getURL(c, id), &r.Body, &golangsdk.RequestOpts{ OkCodes: []int{200}, - MoreHeaders: RequestOpts.MoreHeaders, JSONBody: nil, + MoreHeaders: requestOpts.MoreHeaders, JSONBody: nil, }) return } func Delete(c *golangsdk.ServiceClient, id string) (r DeleteResult) { - reqOpt := &golangsdk.RequestOpts{OkCodes: []int{204}, - MoreHeaders: RequestOpts.MoreHeaders} + reqOpt := &golangsdk.RequestOpts{ + OkCodes: []int{204}, + MoreHeaders: requestOpts.MoreHeaders, + } _, r.Err = c.Delete(deleteURL(c, id), reqOpt) return } diff --git a/vendor/modules.txt b/vendor/modules.txt index a0a9240418..cc87b6a426 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -186,7 +186,7 @@ github.com/hashicorp/terraform-svchost/auth github.com/hashicorp/terraform-svchost/disco # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux -# github.com/huaweicloud/golangsdk v0.0.0-20201030072716-cb31520416ba +# github.com/huaweicloud/golangsdk v0.0.0-20201103063720-d2a60b0922dd ## explicit github.com/huaweicloud/golangsdk github.com/huaweicloud/golangsdk/internal