From 4ebea10dce2ab319b59678ed2ac6c99bb975fa49 Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Mon, 19 Sep 2022 12:02:03 +0800 Subject: [PATCH 01/12] update property `source_port_ranges` --- internal/services/batch/batch_pool.go | 22 +++++++++++++++++++ .../services/batch/batch_pool_data_source.go | 7 ++++++ .../services/batch/batch_pool_resource.go | 11 ++++++++++ .../batch/batch_pool_resource_test.go | 2 ++ website/docs/d/batch_pool.html.markdown | 2 ++ 5 files changed, 44 insertions(+) diff --git a/internal/services/batch/batch_pool.go b/internal/services/batch/batch_pool.go index 6725f9cbd530..bcbabf577d11 100644 --- a/internal/services/batch/batch_pool.go +++ b/internal/services/batch/batch_pool.go @@ -1164,6 +1164,21 @@ func expandPoolNetworkSecurityGroupRule(list []interface{}) []batch.NetworkSecur sourceAddressPrefix := groupRuleMap["source_address_prefix"].(string) access := batch.NetworkSecurityGroupRuleAccess(groupRuleMap["access"].(string)) + networkSecurityGroupRuleObject := batch.NetworkSecurityGroupRule{ + Priority: &priority, + SourceAddressPrefix: &sourceAddressPrefix, + Access: access, + } + + portRanges := groupRuleMap["source_port_ranges"].([]interface{}) + if len(portRanges) > 0 { + portRangesResult := make([]string, 0) + for _, v := range portRanges { + portRangesResult = append(portRangesResult, v.(string)) + } + networkSecurityGroupRuleObject.SourcePortRanges = &portRangesResult + } + networkSecurityGroupRule = append(networkSecurityGroupRule, batch.NetworkSecurityGroupRule{ Priority: &priority, SourceAddressPrefix: &sourceAddressPrefix, @@ -1220,10 +1235,17 @@ func flattenBatchPoolNetworkConfiguration(input *batch.NetworkConfiguration) []i if networkSecurity.SourceAddressPrefix != nil { sourceAddressPrefix = *networkSecurity.SourceAddressPrefix } + sourcePortRanges := make([]interface{}, 0) + if networkSecurity.SourcePortRanges != nil { + for _, sourcePortRange := range *networkSecurity.SourcePortRanges { + sourcePortRanges = append(sourcePortRanges, sourcePortRange) + } + } networkSecurities = append(networkSecurities, map[string]interface{}{ "access": string(networkSecurity.Access), "priority": priority, "source_address_prefix": sourceAddressPrefix, + "source_port_ranges": sourcePortRanges, }) } } diff --git a/internal/services/batch/batch_pool_data_source.go b/internal/services/batch/batch_pool_data_source.go index ed78db7b0cc8..6cda35d1fcb7 100644 --- a/internal/services/batch/batch_pool_data_source.go +++ b/internal/services/batch/batch_pool_data_source.go @@ -537,6 +537,13 @@ func dataSourceBatchPool() *pluginsdk.Resource { Type: pluginsdk.TypeString, Computed: true, }, + "source_port_ranges": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, }, }, }, diff --git a/internal/services/batch/batch_pool_resource.go b/internal/services/batch/batch_pool_resource.go index e2d94d49d34a..6f0ba20135d3 100644 --- a/internal/services/batch/batch_pool_resource.go +++ b/internal/services/batch/batch_pool_resource.go @@ -591,6 +591,17 @@ func resourceBatchPool() *pluginsdk.Resource { string(batch.StorageAccountTypePremiumLRS), }, false), }, + "source_port_ranges": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Default: "*", + ValidateFunc: validation.StringIsNotEmpty, + }, + }, }, }, }, diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index 3eea329b4236..4771f9f12818 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -476,6 +476,7 @@ func TestAccBatchPool_frontEndPortRanges(t *testing.T) { check.That(data.ResourceName).Key("network_configuration.#").HasValue("1"), check.That(data.ResourceName).Key("network_configuration.0.subnet_id").Exists(), check.That(data.ResourceName).Key("network_configuration.0.public_ips.#").HasValue("1"), + check.That(data.ResourceName).Key("network_configuration.0.endpoint_configuration.0.network_security_group_rules.0.source_port_ranges.0").HasValue("*"), ), }, data.ImportStep("stop_pending_resize_operation"), @@ -1934,6 +1935,7 @@ resource "azurerm_batch_pool" "test" { access = "Deny" priority = 1001 source_address_prefix = "*" + source_port_ranges = ["*"] } } } diff --git a/website/docs/d/batch_pool.html.markdown b/website/docs/d/batch_pool.html.markdown index 9719c89a17ba..b497520a1fe5 100644 --- a/website/docs/d/batch_pool.html.markdown +++ b/website/docs/d/batch_pool.html.markdown @@ -324,6 +324,8 @@ A `network_security_group_rules` block exports the following: * `source_address_prefix` - The source address prefix or tag to match for the rule. +* `source_port_ranges` - The source port ranges to match for the rule. + --- A `task_scheduling_policy` block exports the following: From 0fbfce52565f10194485e20ff343ce0c3b153607 Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Mon, 19 Sep 2022 13:21:44 +0800 Subject: [PATCH 02/12] update property `dynamic_vnet_assignment_scope` --- internal/services/batch/batch_pool.go | 5 +++ .../services/batch/batch_pool_data_source.go | 4 +++ .../services/batch/batch_pool_resource.go | 33 ++++++++++++------- .../batch/batch_pool_resource_test.go | 2 ++ website/docs/d/batch_pool.html.markdown | 6 ++++ website/docs/r/batch_pool.html.markdown | 10 ++++-- 6 files changed, 46 insertions(+), 14 deletions(-) diff --git a/internal/services/batch/batch_pool.go b/internal/services/batch/batch_pool.go index bcbabf577d11..ddbd00de9599 100644 --- a/internal/services/batch/batch_pool.go +++ b/internal/services/batch/batch_pool.go @@ -1060,6 +1060,10 @@ func ExpandBatchPoolNetworkConfiguration(list []interface{}) (*batch.NetworkConf networkConfigValue := list[0].(map[string]interface{}) networkConfiguration := &batch.NetworkConfiguration{} + if v, ok := networkConfigValue["dynamic_vnet_assignment_scope"]; ok { + networkConfiguration.DynamicVNetAssignmentScope = batch.DynamicVNetAssignmentScope(v.(string)) + } + if v, ok := networkConfigValue["subnet_id"]; ok { if value := v.(string); value != "" { networkConfiguration.SubnetID = &value @@ -1262,6 +1266,7 @@ func flattenBatchPoolNetworkConfiguration(input *batch.NetworkConfiguration) []i return []interface{}{ map[string]interface{}{ + "dynamic_vnet_assignment_scope": string(input.DynamicVNetAssignmentScope), "endpoint_configuration": endpointConfigs, "public_address_provisioning_type": publicAddressProvisioningType, "public_ips": pluginsdk.NewSet(pluginsdk.HashString, publicIPAddressIds), diff --git a/internal/services/batch/batch_pool_data_source.go b/internal/services/batch/batch_pool_data_source.go index 6cda35d1fcb7..676053737492 100644 --- a/internal/services/batch/batch_pool_data_source.go +++ b/internal/services/batch/batch_pool_data_source.go @@ -484,6 +484,10 @@ func dataSourceBatchPool() *pluginsdk.Resource { Computed: true, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ + "dynamic_vnet_assignment_scope": { + Type: pluginsdk.TypeString, + Computed: true, + }, "subnet_id": { Type: pluginsdk.TypeString, Computed: true, diff --git a/internal/services/batch/batch_pool_resource.go b/internal/services/batch/batch_pool_resource.go index 6f0ba20135d3..04900bb8a38f 100644 --- a/internal/services/batch/batch_pool_resource.go +++ b/internal/services/batch/batch_pool_resource.go @@ -461,6 +461,17 @@ func resourceBatchPool() *pluginsdk.Resource { MaxItems: 1, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ + "dynamic_vnet_assignment_scope": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Default: string(batch.DynamicVNetAssignmentScopeNone), + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: validation.StringInSlice([]string{ + string(batch.DynamicVNetAssignmentScopeNone), + string(batch.DynamicVNetAssignmentScopeJob), + }, false), + }, "subnet_id": { Type: pluginsdk.TypeString, Required: true, @@ -548,6 +559,17 @@ func resourceBatchPool() *pluginsdk.Resource { ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, + "source_port_ranges": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Default: "*", + ValidateFunc: validation.StringIsNotEmpty, + }, + }, }, }, }, @@ -591,17 +613,6 @@ func resourceBatchPool() *pluginsdk.Resource { string(batch.StorageAccountTypePremiumLRS), }, false), }, - "source_port_ranges": { - Type: pluginsdk.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &pluginsdk.Schema{ - Type: pluginsdk.TypeString, - Default: "*", - ValidateFunc: validation.StringIsNotEmpty, - }, - }, }, }, }, diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index 4771f9f12818..cc491ac23ede 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -474,6 +474,7 @@ func TestAccBatchPool_frontEndPortRanges(t *testing.T) { check.That(data.ResourceName).Key("fixed_scale.0.target_dedicated_nodes").HasValue("1"), check.That(data.ResourceName).Key("start_task.#").HasValue("0"), check.That(data.ResourceName).Key("network_configuration.#").HasValue("1"), + check.That(data.ResourceName).Key("network_configuration.0.dynamic_vnet_assignment_scope").HasValue("None"), check.That(data.ResourceName).Key("network_configuration.0.subnet_id").Exists(), check.That(data.ResourceName).Key("network_configuration.0.public_ips.#").HasValue("1"), check.That(data.ResourceName).Key("network_configuration.0.endpoint_configuration.0.network_security_group_rules.0.source_port_ranges.0").HasValue("*"), @@ -1921,6 +1922,7 @@ resource "azurerm_batch_pool" "test" { } network_configuration { + dynamic_vnet_assignment_scope = "None" public_address_provisioning_type = "UserManaged" public_ips = [azurerm_public_ip.test.id] subnet_id = azurerm_subnet.test.id diff --git a/website/docs/d/batch_pool.html.markdown b/website/docs/d/batch_pool.html.markdown index b497520a1fe5..85ad3f063205 100644 --- a/website/docs/d/batch_pool.html.markdown +++ b/website/docs/d/batch_pool.html.markdown @@ -298,8 +298,14 @@ A `network_configuration` block exports the following: * `subnet_id` - The ARM resource identifier of the virtual network subnet which the compute nodes of the pool are joined too. +* `dynamic_vnet_assignment_scope` - The scope of dynamic vnet assignment. + * `endpoint_configuration` - The inbound NAT pools that are used to address specific ports on the individual compute node externally. +* `public_ips` - A list of public IP ids that will be allocated to nodes. + +* `public_address_provisioning_type` - Type of public IP address provisioning. + --- A `endpoint_configuration` block exports the following: diff --git a/website/docs/r/batch_pool.html.markdown b/website/docs/r/batch_pool.html.markdown index 12c97e664b8c..03b72eac460a 100644 --- a/website/docs/r/batch_pool.html.markdown +++ b/website/docs/r/batch_pool.html.markdown @@ -432,6 +432,8 @@ A `network_configuration` block supports the following: * `subnet_id` - (Required) The ARM resource identifier of the virtual network subnet which the compute nodes of the pool will join. Changing this forces a new resource to be created. +* `dynamic_vnet_assignment_scope` - (Optional) The scope of dynamic vnet assignment. Allowed values: `None`, `Job`. + * `public_ips` - (Optional) A list of public IP ids that will be allocated to nodes. Changing this forces a new resource to be created. * `endpoint_configuration` - (Optional) A list of inbound NAT pools that can be used to address specific ports on an individual compute node externally. Set as documented in the inbound_nat_pools block below. Changing this forces a new resource to be created. @@ -456,11 +458,13 @@ A `endpoint_configuration` block supports the following: A `network_security_group_rules` block supports the following: -* `access` - The action that should be taken for a specified IP address, subnet range or tag. Acceptable values are `Allow` and `Deny`. Changing this forces a new resource to be created. +* `access` - (Required) The action that should be taken for a specified IP address, subnet range or tag. Acceptable values are `Allow` and `Deny`. Changing this forces a new resource to be created. + +* `priority` - (Required) The priority for this rule. The value must be at least `150`. Changing this forces a new resource to be created. -* `priority` - The priority for this rule. The value must be at least `150`. Changing this forces a new resource to be created. +* `source_address_prefix` - (Required) The source address prefix or tag to match for the rule. Changing this forces a new resource to be created. -* `source_address_prefix` - The source address prefix or tag to match for the rule. Changing this forces a new resource to be created. +* `source_port_ranges` - (Optional) The source port ranges to match for the rule. Valid values are `*` (for all ports 0 - 65535) or arrays of ports or port ranges (i.e. `100-200`). The ports should in the range of 0 to 65535 and the port ranges or ports can't overlap. If any other values are provided the request fails with HTTP status code 400. Default value will be `*`. --- From fa420f9700284fedb2a9f304e35fb9e4ce6fc005 Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Mon, 19 Sep 2022 15:50:32 +0800 Subject: [PATCH 03/12] update property `node_deallocation_option` and `container_settings`. --- internal/services/batch/batch_pool.go | 51 +++++++- .../services/batch/batch_pool_data_source.go | 75 ++++++++---- .../services/batch/batch_pool_resource.go | 114 +++++++++++++----- .../batch/batch_pool_resource_test.go | 30 ++++- website/docs/r/batch_pool.html.markdown | 16 +++ 5 files changed, 227 insertions(+), 59 deletions(-) diff --git a/internal/services/batch/batch_pool.go b/internal/services/batch/batch_pool.go index ddbd00de9599..f98fe6d2210f 100644 --- a/internal/services/batch/batch_pool.go +++ b/internal/services/batch/batch_pool.go @@ -35,7 +35,7 @@ func flattenBatchPoolAutoScaleSettings(settings *batch.AutoScaleSettings) []inte } // flattenBatchPoolFixedScaleSettings flattens the fixed scale settings for a Batch pool -func flattenBatchPoolFixedScaleSettings(settings *batch.FixedScaleSettings) []interface{} { +func flattenBatchPoolFixedScaleSettings(d *pluginsdk.ResourceData, settings *batch.FixedScaleSettings) []interface{} { results := make([]interface{}, 0) if settings == nil { @@ -45,6 +45,11 @@ func flattenBatchPoolFixedScaleSettings(settings *batch.FixedScaleSettings) []in result := make(map[string]interface{}) + // for now, this is a writeOnly property, so we treat this as secret. + if v, ok := d.GetOk("fixed_scale.0.node_deallocation_option"); ok { + result["node_deallocation_option"] = v.(string) + } + if settings.TargetDedicatedNodes != nil { result["target_dedicated_nodes"] = *settings.TargetDedicatedNodes } @@ -89,7 +94,7 @@ func flattenBatchPoolImageReference(image *batch.ImageReference) []interface{} { } // flattenBatchPoolStartTask flattens a Batch pool start task -func flattenBatchPoolStartTask(startTask *batch.StartTask) []interface{} { +func flattenBatchPoolStartTask(oldConfig *pluginsdk.ResourceData, startTask *batch.StartTask) []interface{} { results := make([]interface{}, 0) if startTask == nil { @@ -104,6 +109,25 @@ func flattenBatchPoolStartTask(startTask *batch.StartTask) []interface{} { } result["command_line"] = commandLine + if startTask.ContainerSettings != nil { + containerSettings := make(map[string]interface{}) + containerSettings["image_name"] = *startTask.ContainerSettings.ImageName + containerSettings["working_directory"] = string(startTask.ContainerSettings.WorkingDirectory) + if startTask.ContainerSettings.ContainerRunOptions != nil { + containerSettings["container_run_options"] = *startTask.ContainerSettings.ContainerRunOptions + } + if startTask.ContainerSettings.Registry != nil { + tmpReg := flattenBatchPoolContainerRegistry(oldConfig, startTask.ContainerSettings.Registry) + containerSettings["registry"] = []interface{}{ + tmpReg, + } + } + + result["container_settings"] = []interface{}{ + containerSettings, + } + } + waitForSuccess := false if startTask.WaitForSuccess != nil { waitForSuccess = *startTask.WaitForSuccess @@ -676,6 +700,29 @@ func ExpandBatchPoolStartTask(list []interface{}) (*batch.StartTask, error) { startTask.EnvironmentSettings = expandCommonEnvironmentProperties(v) } + if startTaskValue["container_settings"] != nil && len(startTaskValue["container_settings"].([]interface{})) > 0 { + var containerSettings batch.TaskContainerSettings + containerSettingsList := startTaskValue["container_settings"].([]interface{}) + + if len(containerSettingsList) > 0 && containerSettingsList[0] != nil { + settingMap := containerSettingsList[0].(map[string]interface{}) + containerSettings.ImageName = utils.String(settingMap["image_name"].(string)) + if containerRunOptions, ok := settingMap["container_run_options"]; ok { + containerSettings.ContainerRunOptions = utils.String(containerRunOptions.(string)) + } + if settingMap["registry"].([]interface{})[0] != nil { + containerRegMap := settingMap["registry"].([]interface{})[0].(map[string]interface{}) + if containerRegistryRef, err := expandBatchPoolContainerRegistry(containerRegMap); err == nil { + containerSettings.Registry = containerRegistryRef + } + } + if workingDir, ok := settingMap["working_directory"]; ok { + containerSettings.WorkingDirectory = batch.ContainerWorkingDirectory(workingDir.(string)) + } + } + startTask.ContainerSettings = &containerSettings + } + return startTask, nil } diff --git a/internal/services/batch/batch_pool_data_source.go b/internal/services/batch/batch_pool_data_source.go index 676053737492..b9e186f36cfe 100644 --- a/internal/services/batch/batch_pool_data_source.go +++ b/internal/services/batch/batch_pool_data_source.go @@ -2,6 +2,7 @@ package batch import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "time" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" @@ -139,25 +140,7 @@ func dataSourceBatchPool() *pluginsdk.Resource { Type: pluginsdk.TypeList, Computed: true, Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "registry_server": { - Type: pluginsdk.TypeString, - Computed: true, - }, - "user_assigned_identity_id": { - Type: pluginsdk.TypeString, - Computed: true, - }, - "user_name": { - Type: pluginsdk.TypeString, - Computed: true, - }, - "password": { - Type: pluginsdk.TypeString, - Computed: true, - Sensitive: true, - }, - }, + Schema: batchPoolDataContainerRegistry(), }, }, }, @@ -592,6 +575,34 @@ func startTaskDSSchema() map[string]*pluginsdk.Schema { Computed: true, }, + "container_settings": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*schema.Schema{ + "container_run_options": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "image_name": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "registry": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: batchPoolDataContainerRegistry(), + }, + }, + "working_directory": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + "task_retry_maximum": { Type: pluginsdk.TypeInt, Computed: true, @@ -680,6 +691,28 @@ func startTaskDSSchema() map[string]*pluginsdk.Schema { return s } +func batchPoolDataContainerRegistry() map[string]*schema.Schema { + return map[string]*pluginsdk.Schema{ + "registry_server": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "user_assigned_identity_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "user_name": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "password": { + Type: pluginsdk.TypeString, + Computed: true, + Sensitive: true, + }, + } +} + func dataSourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Batch.PoolClient subscriptionId := meta.(*clients.Client).Account.SubscriptionId @@ -711,7 +744,7 @@ func dataSourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error if err := d.Set("auto_scale", flattenBatchPoolAutoScaleSettings(scaleSettings.AutoScale)); err != nil { return fmt.Errorf("flattening `auto_scale`: %+v", err) } - if err := d.Set("fixed_scale", flattenBatchPoolFixedScaleSettings(scaleSettings.FixedScale)); err != nil { + if err := d.Set("fixed_scale", flattenBatchPoolFixedScaleSettings(d, scaleSettings.FixedScale)); err != nil { return fmt.Errorf("flattening `fixed_scale `: %+v", err) } } @@ -827,7 +860,7 @@ func dataSourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error return fmt.Errorf("setting `certificate`: %v", err) } - d.Set("start_task", flattenBatchPoolStartTask(props.StartTask)) + d.Set("start_task", flattenBatchPoolStartTask(d, props.StartTask)) d.Set("metadata", FlattenBatchMetaData(props.Metadata)) if err := d.Set("network_configuration", flattenBatchPoolNetworkConfiguration(props.NetworkConfiguration)); err != nil { diff --git a/internal/services/batch/batch_pool_resource.go b/internal/services/batch/batch_pool_resource.go index 04900bb8a38f..307f6fa02c0c 100644 --- a/internal/services/batch/batch_pool_resource.go +++ b/internal/services/batch/batch_pool_resource.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "log" "strings" "time" @@ -83,6 +84,19 @@ func resourceBatchPool() *pluginsdk.Resource { MaxItems: 1, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ + // Property `node_deallocation_option` is set to be a writeOnly property by service team + // It can only perform on PUT operation and is not able to perform GET operation + // Here we treat `node_deallocation_option` the same as a secret value. + "node_deallocation_option": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + string(batch.ComputeNodeDeallocationOptionRequeue), + string(batch.ComputeNodeDeallocationOptionRetainedData), + string(batch.ComputeNodeDeallocationOptionTaskCompletion), + string(batch.ComputeNodeDeallocationOptionTerminate), + }, false), + }, "target_dedicated_nodes": { Type: pluginsdk.TypeInt, Optional: true, @@ -153,34 +167,7 @@ func resourceBatchPool() *pluginsdk.Resource { ForceNew: true, ConfigMode: pluginsdk.SchemaConfigModeAttr, Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "registry_server": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringIsNotEmpty, - }, - "user_assigned_identity_id": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: commonids.ValidateUserAssignedIdentityID, - Description: "The User Assigned Identity to use for Container Registry access.", - }, - "user_name": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringIsNotEmpty, - }, - "password": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - ValidateFunc: validation.StringIsNotEmpty, - }, - }, + Schema: containerRegistry(), }, AtLeastOneOf: []string{"container_configuration.0.type", "container_configuration.0.container_image_names", "container_configuration.0.container_registries"}, }, @@ -1102,7 +1089,7 @@ func resourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error { if err := d.Set("auto_scale", flattenBatchPoolAutoScaleSettings(scaleSettings.AutoScale)); err != nil { return fmt.Errorf("flattening `auto_scale`: %+v", err) } - if err := d.Set("fixed_scale", flattenBatchPoolFixedScaleSettings(scaleSettings.FixedScale)); err != nil { + if err := d.Set("fixed_scale", flattenBatchPoolFixedScaleSettings(d, scaleSettings.FixedScale)); err != nil { return fmt.Errorf("flattening `fixed_scale `: %+v", err) } } @@ -1220,7 +1207,7 @@ func resourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error { return fmt.Errorf("flattening `certificate`: %+v", err) } - d.Set("start_task", flattenBatchPoolStartTask(props.StartTask)) + d.Set("start_task", flattenBatchPoolStartTask(d, props.StartTask)) d.Set("metadata", FlattenBatchMetaData(props.Metadata)) if props.MountConfiguration != nil { @@ -1299,11 +1286,13 @@ func expandBatchPoolScaleSettings(d *pluginsdk.ResourceData) (*batch.ScaleSettin fixedScaleSettings := fixedScale[0].(map[string]interface{}) + nodeDeallocationOption := batch.ComputeNodeDeallocationOption(fixedScaleSettings["node_deallocation_option"].(string)) targetDedicatedNodes := int32(fixedScaleSettings["target_dedicated_nodes"].(int)) targetLowPriorityNodes := int32(fixedScaleSettings["target_low_priority_nodes"].(int)) resizeTimeout := fixedScaleSettings["resize_timeout"].(string) scaleSettings.FixedScale = &batch.FixedScaleSettings{ + NodeDeallocationOption: nodeDeallocationOption, ResizeTimeout: &resizeTimeout, TargetDedicatedNodes: &targetDedicatedNodes, TargetLowPriorityNodes: &targetLowPriorityNodes, @@ -1437,6 +1426,40 @@ func startTaskSchema() map[string]*pluginsdk.Schema { ValidateFunc: validation.StringIsNotEmpty, }, + "container_settings": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "container_run_options": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "image_name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "registry": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: containerRegistry(), + }, + }, + "working_directory": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + string(batch.ContainerWorkingDirectoryTaskWorkingDirectory), + string(batch.ContainerWorkingDirectoryContainerImageDefault), + }, false), + }, + }, + }, + }, + "task_retry_maximum": { Type: pluginsdk.TypeInt, Optional: true, @@ -1537,3 +1560,34 @@ func startTaskSchema() map[string]*pluginsdk.Schema { }, } } + +func containerRegistry() map[string]*schema.Schema { + return map[string]*pluginsdk.Schema{ + "registry_server": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "user_assigned_identity_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: commonids.ValidateUserAssignedIdentityID, + Description: "The User Assigned Identity to use for Container Registry access.", + }, + "user_name": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "password": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + } +} diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index cc491ac23ede..d43ef0a1fcef 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -148,13 +148,14 @@ func TestAccBatchPool_fixedScale_complete(t *testing.T) { check.That(data.ResourceName).Key("storage_image_reference.0.offer").HasValue("UbuntuServer"), check.That(data.ResourceName).Key("auto_scale.#").HasValue("0"), check.That(data.ResourceName).Key("fixed_scale.#").HasValue("1"), + check.That(data.ResourceName).Key("fixed_scale.0.node_deallocation_option").HasValue("Terminate"), check.That(data.ResourceName).Key("fixed_scale.0.target_dedicated_nodes").HasValue("2"), check.That(data.ResourceName).Key("fixed_scale.0.resize_timeout").HasValue("PT15M"), check.That(data.ResourceName).Key("fixed_scale.0.target_low_priority_nodes").HasValue("0"), check.That(data.ResourceName).Key("start_task.#").HasValue("0"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), }) } @@ -179,7 +180,7 @@ func TestAccBatchPool_autoScale_complete(t *testing.T) { check.That(data.ResourceName).Key("start_task.#").HasValue("0"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), }) } @@ -206,7 +207,7 @@ func TestAccBatchPool_completeUpdated(t *testing.T) { check.That(data.ResourceName).Key("start_task.#").HasValue("0"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), { Config: r.autoScale_complete(data), Check: acceptance.ComposeTestCheckFunc( @@ -271,9 +272,14 @@ func TestAccBatchPool_startTask_complete(t *testing.T) { Config: r.startTask_complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("start_task.0.container_configuration.0.container_registries.0.registry_server").HasValue("myContainerRegistry.azurecr.io"), + check.That(data.ResourceName).Key("start_task.0.container_configuration.0.container_registries.0.user_name").HasValue("myUserName"), + check.That(data.ResourceName).Key("start_task.0.container_configuration.0.container_registries.0.password").HasValue("myPassword"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", + "container_configuration.0.container_registries.0.password", + "start_task.0.container_settings.0.registry.0.password"), }) } @@ -493,16 +499,17 @@ func TestAccBatchPool_fixedScaleUpdate(t *testing.T) { Config: r.fixedScale_complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("fixed_scale.0.node_deallocation_option").HasValue("Terminate"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), { Config: r.fixedScale_completeUpdate(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), }) } @@ -758,6 +765,7 @@ resource "azurerm_batch_pool" "test" { node_agent_sku_id = "batch.node.ubuntu 18.04" fixed_scale { + node_deallocation_option = "Terminate" target_dedicated_nodes = 2 resize_timeout = "PT15M" target_low_priority_nodes = 0 @@ -1115,6 +1123,16 @@ resource "azurerm_batch_pool" "test" { bu = "Research&Dev" } + container_configuration { + type = "DockerCompatible" + container_image_names = ["centos7"] + container_registries { + registry_server = "myContainerRegistry.azurecr.io" + user_name = "myUserName" + password = "myPassword" + } + } + user_identity { auto_user { elevation_level = "NonAdmin" diff --git a/website/docs/r/batch_pool.html.markdown b/website/docs/r/batch_pool.html.markdown index 03b72eac460a..621820bf395e 100644 --- a/website/docs/r/batch_pool.html.markdown +++ b/website/docs/r/batch_pool.html.markdown @@ -252,6 +252,8 @@ To provision a Custom Image, the following fields are applicable: A `fixed_scale` block supports the following: +* `node_deallocation_option` - (Optional) Write-only property. It determines what to do with a node and its running task(s) if the pool size is decreasing. Values are `Requeue`, `RetainedData`, `TaskCompletion` and `Terminate`. + * `target_dedicated_nodes` - (Optional) The number of nodes in the Batch pool. Defaults to `1`. * `target_low_priority_nodes` - (Optional) The number of low priority nodes in the Batch pool. Defaults to `0`. @@ -272,6 +274,8 @@ A `start_task` block supports the following: * `command_line` - (Required) The command line executed by the start task. +* `container_settings` - (Optional) A `container_settings` block is the settings for the container under which the start task runs. When this is specified, all directories recursively below the `AZ_BATCH_NODE_ROOT_DIR` (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. + * `task_retry_maximum` - (Optional) The number of retry count. Defaults to `1`. * `wait_for_success` - (Optional) A flag that indicates if the Batch pool should wait for the start task to be completed. Default to `false`. @@ -284,6 +288,18 @@ A `start_task` block supports the following: --- +A `container_settings` block supports the following: + +* `image_name` - (Required) The image to use to create the container in which the task will run. This is the full image reference, as would be specified to "docker pull". If no tag is provided as part of the image name, the tag ":latest" is used as a default. + +* `container_run_options` - (Optional) Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. + +* `registry` - (Optional) The same reference as `container_registries` block defined as follows. + +* `working_directory` - (Optional) A flag to indicate where the container task working directory is. The default is "TaskWorkingDirectory", an alternative value is "ContainerImageDefault". + +--- + A `user_identity` block supports the following: * `user_name` - (Optional) The username to be used by the Batch pool start task. From 867d86508560e4568c0015ffbc9f5630046f44b2 Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Mon, 19 Sep 2022 16:02:50 +0800 Subject: [PATCH 04/12] update on `container_settings` --- website/docs/d/batch_pool.html.markdown | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/website/docs/d/batch_pool.html.markdown b/website/docs/d/batch_pool.html.markdown index 85ad3f063205..163d26091a10 100644 --- a/website/docs/d/batch_pool.html.markdown +++ b/website/docs/d/batch_pool.html.markdown @@ -140,6 +140,8 @@ A `start_task` block exports the following: * `command_line` - The command line executed by the start task. +* `container_settings` - The settings for the container under which the start task runs. + * `task_retry_maximum` - The number of retry count * `wait_for_success` - A flag that indicates if the Batch pool should wait for the start task to be completed. @@ -152,6 +154,18 @@ A `start_task` block exports the following: --- +A `container_settings` block exports the following: + +* `image_name` - The image to use to create the container in which the task will run. + +* `container_run_options` - Additional options to the container create command. + +* `registry` - The same reference as `container_registries` block defined as follows. + +* `working_directory` - A flag to indicate where the container task working directory is. + +--- + A `user_identity` block exports the following: * `user_name` - The username to be used by the Batch pool start task. From 55a90bae4eaf85c288d751f2d36e31dfdba8294f Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Mon, 19 Sep 2022 16:32:19 +0800 Subject: [PATCH 05/12] fix a test case error --- internal/services/batch/batch_pool_resource_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index d43ef0a1fcef..a5ee12479922 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -1123,7 +1123,7 @@ resource "azurerm_batch_pool" "test" { bu = "Research&Dev" } - container_configuration { + container_settings { type = "DockerCompatible" container_image_names = ["centos7"] container_registries { From 5842378deafa06606390510ff57fbd0cbee41267 Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Mon, 19 Sep 2022 17:33:18 +0800 Subject: [PATCH 06/12] fix test case errors and update doc --- internal/services/batch/batch_pool_data_source.go | 2 +- internal/services/batch/batch_pool_resource_test.go | 2 +- website/docs/r/batch_pool.html.markdown | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/services/batch/batch_pool_data_source.go b/internal/services/batch/batch_pool_data_source.go index b9e186f36cfe..39aeb441e8b5 100644 --- a/internal/services/batch/batch_pool_data_source.go +++ b/internal/services/batch/batch_pool_data_source.go @@ -2,10 +2,10 @@ package batch import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "time" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/services/batch/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/services/batch/validate" diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index a5ee12479922..2fbebebc5830 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -1940,7 +1940,7 @@ resource "azurerm_batch_pool" "test" { } network_configuration { - dynamic_vnet_assignment_scope = "None" + dynamic_vnet_assignment_scope = "none" public_address_provisioning_type = "UserManaged" public_ips = [azurerm_public_ip.test.id] subnet_id = azurerm_subnet.test.id diff --git a/website/docs/r/batch_pool.html.markdown b/website/docs/r/batch_pool.html.markdown index 621820bf395e..30a26de5ad48 100644 --- a/website/docs/r/batch_pool.html.markdown +++ b/website/docs/r/batch_pool.html.markdown @@ -448,7 +448,7 @@ A `network_configuration` block supports the following: * `subnet_id` - (Required) The ARM resource identifier of the virtual network subnet which the compute nodes of the pool will join. Changing this forces a new resource to be created. -* `dynamic_vnet_assignment_scope` - (Optional) The scope of dynamic vnet assignment. Allowed values: `None`, `Job`. +* `dynamic_vnet_assignment_scope` - (Optional) The scope of dynamic vnet assignment. Allowed values: `none`, `job`. * `public_ips` - (Optional) A list of public IP ids that will be allocated to nodes. Changing this forces a new resource to be created. From 363f789a575580f8fbe52f0649584e31282f5155 Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Mon, 19 Sep 2022 18:24:31 +0800 Subject: [PATCH 07/12] fix test case errors and update doc --- internal/services/batch/batch_pool_resource.go | 2 +- .../services/batch/batch_pool_resource_test.go | 16 ++++++++++------ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/internal/services/batch/batch_pool_resource.go b/internal/services/batch/batch_pool_resource.go index 307f6fa02c0c..dd1e855db2df 100644 --- a/internal/services/batch/batch_pool_resource.go +++ b/internal/services/batch/batch_pool_resource.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "log" "strings" "time" @@ -14,6 +13,7 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index 2fbebebc5830..372f60966568 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -272,9 +272,12 @@ func TestAccBatchPool_startTask_complete(t *testing.T) { Config: r.startTask_complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("start_task.0.container_configuration.0.container_registries.0.registry_server").HasValue("myContainerRegistry.azurecr.io"), - check.That(data.ResourceName).Key("start_task.0.container_configuration.0.container_registries.0.user_name").HasValue("myUserName"), - check.That(data.ResourceName).Key("start_task.0.container_configuration.0.container_registries.0.password").HasValue("myPassword"), + check.That(data.ResourceName).Key("start_task.0.container_configuration.0.registry.0.user_name").HasValue("myUserName"), + check.That(data.ResourceName).Key("start_task.0.container_configuration.0.registry.0.registry_server").HasValue("myContainerRegistry.azurecr.io"), + check.That(data.ResourceName).Key("start_task.0.container_configuration.0.registry.0.user_name").HasValue("myUserName"), + check.That(data.ResourceName).Key("start_task.0.container_configuration.0.container_run_options").HasValue("cat /proc/cpuinfo"), + check.That(data.ResourceName).Key("start_task.0.container_configuration.0.image_name").HasValue("centos7"), + check.That(data.ResourceName).Key("start_task.0.container_configuration.0.working_directory ").HasValue("ContainerImageDefault"), ), }, data.ImportStep("stop_pending_resize_operation", @@ -1124,13 +1127,14 @@ resource "azurerm_batch_pool" "test" { } container_settings { - type = "DockerCompatible" - container_image_names = ["centos7"] - container_registries { + container_run_options = "cat /proc/cpuinfo" + image_name = "centos7" + registry { registry_server = "myContainerRegistry.azurecr.io" user_name = "myUserName" password = "myPassword" } + working_directory = "ContainerImageDefault" } user_identity { From 71ee85affcfc78e4207a168be8f0c4b4f42309da Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Mon, 19 Sep 2022 20:30:49 +0800 Subject: [PATCH 08/12] fix test case errors and update doc --- internal/services/batch/batch_pool_resource_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index 372f60966568..d49074de6ffe 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -1117,6 +1117,16 @@ resource "azurerm_batch_pool" "test" { version = "latest" } + container_configuration { + type = "DockerCompatible" + container_image_names = ["centos7"] + container_registries { + registry_server = "myContainerRegistry.azurecr.io" + user_name = "myUserName" + password = "myPassword" + } + } + start_task { command_line = "echo 'Hello World from $env'" wait_for_success = true From b945bcf108e367af8fe25eb13fc75f72074a7e3e Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Tue, 20 Sep 2022 11:31:56 +0800 Subject: [PATCH 09/12] fix test case errors in startTask_complete --- .../batch/batch_pool_resource_test.go | 32 ++++++++----------- website/docs/r/batch_pool.html.markdown | 2 +- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index d49074de6ffe..7657b0c58a8d 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -272,12 +272,12 @@ func TestAccBatchPool_startTask_complete(t *testing.T) { Config: r.startTask_complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("start_task.0.container_configuration.0.registry.0.user_name").HasValue("myUserName"), - check.That(data.ResourceName).Key("start_task.0.container_configuration.0.registry.0.registry_server").HasValue("myContainerRegistry.azurecr.io"), - check.That(data.ResourceName).Key("start_task.0.container_configuration.0.registry.0.user_name").HasValue("myUserName"), - check.That(data.ResourceName).Key("start_task.0.container_configuration.0.container_run_options").HasValue("cat /proc/cpuinfo"), - check.That(data.ResourceName).Key("start_task.0.container_configuration.0.image_name").HasValue("centos7"), - check.That(data.ResourceName).Key("start_task.0.container_configuration.0.working_directory ").HasValue("ContainerImageDefault"), + check.That(data.ResourceName).Key("start_task.0.container_settings.0.registry.0.user_name").HasValue("myUserName"), + check.That(data.ResourceName).Key("start_task.0.container_settings.0.registry.0.registry_server").HasValue("myContainerRegistry.azurecr.io"), + check.That(data.ResourceName).Key("start_task.0.container_settings.0.registry.0.user_name").HasValue("myUserName"), + check.That(data.ResourceName).Key("start_task.0.container_settings.0.container_run_options").HasValue("cat /proc/cpuinfo"), + check.That(data.ResourceName).Key("start_task.0.container_settings.0.image_name").HasValue("centos7"), + check.That(data.ResourceName).Key("start_task.0.container_settings.0.working_directory").HasValue("ContainerImageDefault"), ), }, data.ImportStep("stop_pending_resize_operation", @@ -1083,15 +1083,9 @@ resource "azurerm_batch_pool" "test" { } func (BatchPoolResource) startTask_complete(data acceptance.TestData) string { + template := BatchPoolResource{}.template(data) return fmt.Sprintf(` -provider "azurerm" { - features {} -} - -resource "azurerm_resource_group" "test" { - name = "testaccRG-batch-%d" - location = "%s" -} +%s resource "azurerm_batch_account" "test" { name = "testaccbatch%s" @@ -1103,7 +1097,7 @@ resource "azurerm_batch_pool" "test" { name = "testaccpool%s" resource_group_name = azurerm_resource_group.test.name account_name = azurerm_batch_account.test.name - node_agent_sku_id = "batch.node.ubuntu 18.04" + node_agent_sku_id = "batch.node.ubuntu 20.04" vm_size = "Standard_A1" fixed_scale { @@ -1111,9 +1105,9 @@ resource "azurerm_batch_pool" "test" { } storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "18.04-lts" + publisher = "microsoft-azure-batch" + offer = "ubuntu-server-container" + sku = "20-04-lts" version = "latest" } @@ -1160,7 +1154,7 @@ resource "azurerm_batch_pool" "test" { } } } -`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString) +`, template, data.RandomString, data.RandomString) } func (BatchPoolResource) startTask_userIdentity(data acceptance.TestData) string { diff --git a/website/docs/r/batch_pool.html.markdown b/website/docs/r/batch_pool.html.markdown index 30a26de5ad48..55f9bd7068c6 100644 --- a/website/docs/r/batch_pool.html.markdown +++ b/website/docs/r/batch_pool.html.markdown @@ -296,7 +296,7 @@ A `container_settings` block supports the following: * `registry` - (Optional) The same reference as `container_registries` block defined as follows. -* `working_directory` - (Optional) A flag to indicate where the container task working directory is. The default is "TaskWorkingDirectory", an alternative value is "ContainerImageDefault". +* `working_directory` - (Optional) A flag to indicate where the container task working directory is. The default is `TaskWorkingDirectory`, an alternative value is `ContainerImageDefault`. --- From d5eb3971c5925dd7d45e2e8a236799aade93f989 Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Tue, 27 Sep 2022 16:43:59 +0800 Subject: [PATCH 10/12] update by review comments --- internal/services/batch/batch_pool.go | 6 +++--- .../services/batch/batch_pool_data_source.go | 2 +- internal/services/batch/batch_pool_resource.go | 2 +- .../services/batch/batch_pool_resource_test.go | 16 ++++++++-------- website/docs/d/batch_pool.html.markdown | 4 ++-- website/docs/r/batch_pool.html.markdown | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/internal/services/batch/batch_pool.go b/internal/services/batch/batch_pool.go index f98fe6d2210f..f3d9d3951d05 100644 --- a/internal/services/batch/batch_pool.go +++ b/internal/services/batch/batch_pool.go @@ -123,7 +123,7 @@ func flattenBatchPoolStartTask(oldConfig *pluginsdk.ResourceData, startTask *bat } } - result["container_settings"] = []interface{}{ + result["container"] = []interface{}{ containerSettings, } } @@ -700,9 +700,9 @@ func ExpandBatchPoolStartTask(list []interface{}) (*batch.StartTask, error) { startTask.EnvironmentSettings = expandCommonEnvironmentProperties(v) } - if startTaskValue["container_settings"] != nil && len(startTaskValue["container_settings"].([]interface{})) > 0 { + if startTaskValue["container"] != nil && len(startTaskValue["container"].([]interface{})) > 0 { var containerSettings batch.TaskContainerSettings - containerSettingsList := startTaskValue["container_settings"].([]interface{}) + containerSettingsList := startTaskValue["container"].([]interface{}) if len(containerSettingsList) > 0 && containerSettingsList[0] != nil { settingMap := containerSettingsList[0].(map[string]interface{}) diff --git a/internal/services/batch/batch_pool_data_source.go b/internal/services/batch/batch_pool_data_source.go index 39aeb441e8b5..55c60dc8b568 100644 --- a/internal/services/batch/batch_pool_data_source.go +++ b/internal/services/batch/batch_pool_data_source.go @@ -575,7 +575,7 @@ func startTaskDSSchema() map[string]*pluginsdk.Schema { Computed: true, }, - "container_settings": { + "container": { Type: pluginsdk.TypeList, Computed: true, Elem: &pluginsdk.Resource{ diff --git a/internal/services/batch/batch_pool_resource.go b/internal/services/batch/batch_pool_resource.go index dd1e855db2df..728c8a5a8064 100644 --- a/internal/services/batch/batch_pool_resource.go +++ b/internal/services/batch/batch_pool_resource.go @@ -1426,7 +1426,7 @@ func startTaskSchema() map[string]*pluginsdk.Schema { ValidateFunc: validation.StringIsNotEmpty, }, - "container_settings": { + "container": { Type: pluginsdk.TypeList, Optional: true, Elem: &pluginsdk.Resource{ diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index 7657b0c58a8d..fea9206faf09 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -272,17 +272,17 @@ func TestAccBatchPool_startTask_complete(t *testing.T) { Config: r.startTask_complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("start_task.0.container_settings.0.registry.0.user_name").HasValue("myUserName"), - check.That(data.ResourceName).Key("start_task.0.container_settings.0.registry.0.registry_server").HasValue("myContainerRegistry.azurecr.io"), - check.That(data.ResourceName).Key("start_task.0.container_settings.0.registry.0.user_name").HasValue("myUserName"), - check.That(data.ResourceName).Key("start_task.0.container_settings.0.container_run_options").HasValue("cat /proc/cpuinfo"), - check.That(data.ResourceName).Key("start_task.0.container_settings.0.image_name").HasValue("centos7"), - check.That(data.ResourceName).Key("start_task.0.container_settings.0.working_directory").HasValue("ContainerImageDefault"), + check.That(data.ResourceName).Key("start_task.0.container.0.registry.0.user_name").HasValue("myUserName"), + check.That(data.ResourceName).Key("start_task.0.container.0.registry.0.registry_server").HasValue("myContainerRegistry.azurecr.io"), + check.That(data.ResourceName).Key("start_task.0.container.0.registry.0.user_name").HasValue("myUserName"), + check.That(data.ResourceName).Key("start_task.0.container.0.container_run_options").HasValue("cat /proc/cpuinfo"), + check.That(data.ResourceName).Key("start_task.0.container.0.image_name").HasValue("centos7"), + check.That(data.ResourceName).Key("start_task.0.container.0.working_directory").HasValue("ContainerImageDefault"), ), }, data.ImportStep("stop_pending_resize_operation", "container_configuration.0.container_registries.0.password", - "start_task.0.container_settings.0.registry.0.password"), + "start_task.0.container.0.registry.0.password"), }) } @@ -1130,7 +1130,7 @@ resource "azurerm_batch_pool" "test" { bu = "Research&Dev" } - container_settings { + container { container_run_options = "cat /proc/cpuinfo" image_name = "centos7" registry { diff --git a/website/docs/d/batch_pool.html.markdown b/website/docs/d/batch_pool.html.markdown index 163d26091a10..8e0b97a64a8a 100644 --- a/website/docs/d/batch_pool.html.markdown +++ b/website/docs/d/batch_pool.html.markdown @@ -140,7 +140,7 @@ A `start_task` block exports the following: * `command_line` - The command line executed by the start task. -* `container_settings` - The settings for the container under which the start task runs. +* `container` - The settings for the container under which the start task runs. * `task_retry_maximum` - The number of retry count @@ -154,7 +154,7 @@ A `start_task` block exports the following: --- -A `container_settings` block exports the following: +A `container` block exports the following: * `image_name` - The image to use to create the container in which the task will run. diff --git a/website/docs/r/batch_pool.html.markdown b/website/docs/r/batch_pool.html.markdown index 55f9bd7068c6..653ef9410f6a 100644 --- a/website/docs/r/batch_pool.html.markdown +++ b/website/docs/r/batch_pool.html.markdown @@ -274,7 +274,7 @@ A `start_task` block supports the following: * `command_line` - (Required) The command line executed by the start task. -* `container_settings` - (Optional) A `container_settings` block is the settings for the container under which the start task runs. When this is specified, all directories recursively below the `AZ_BATCH_NODE_ROOT_DIR` (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. +* `container` - (Optional) A `container` block is the settings for the container under which the start task runs. When this is specified, all directories recursively below the `AZ_BATCH_NODE_ROOT_DIR` (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. * `task_retry_maximum` - (Optional) The number of retry count. Defaults to `1`. @@ -288,7 +288,7 @@ A `start_task` block supports the following: --- -A `container_settings` block supports the following: +A `container` block supports the following: * `image_name` - (Required) The image to use to create the container in which the task will run. This is the full image reference, as would be specified to "docker pull". If no tag is provided as part of the image name, the tag ":latest" is used as a default. From 43e384fb6f4240c347a53a9fc7819f04496878a0 Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Fri, 30 Sep 2022 09:43:56 +0800 Subject: [PATCH 11/12] change 'container_run_options' to 'run_options' --- internal/services/batch/batch_pool.go | 4 ++-- internal/services/batch/batch_pool_data_source.go | 2 +- internal/services/batch/batch_pool_resource.go | 2 +- internal/services/batch/batch_pool_resource_test.go | 6 +++--- website/docs/d/batch_pool.html.markdown | 2 +- website/docs/r/batch_pool.html.markdown | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/internal/services/batch/batch_pool.go b/internal/services/batch/batch_pool.go index f3d9d3951d05..2c40fd951ba9 100644 --- a/internal/services/batch/batch_pool.go +++ b/internal/services/batch/batch_pool.go @@ -114,7 +114,7 @@ func flattenBatchPoolStartTask(oldConfig *pluginsdk.ResourceData, startTask *bat containerSettings["image_name"] = *startTask.ContainerSettings.ImageName containerSettings["working_directory"] = string(startTask.ContainerSettings.WorkingDirectory) if startTask.ContainerSettings.ContainerRunOptions != nil { - containerSettings["container_run_options"] = *startTask.ContainerSettings.ContainerRunOptions + containerSettings["run_options"] = *startTask.ContainerSettings.ContainerRunOptions } if startTask.ContainerSettings.Registry != nil { tmpReg := flattenBatchPoolContainerRegistry(oldConfig, startTask.ContainerSettings.Registry) @@ -707,7 +707,7 @@ func ExpandBatchPoolStartTask(list []interface{}) (*batch.StartTask, error) { if len(containerSettingsList) > 0 && containerSettingsList[0] != nil { settingMap := containerSettingsList[0].(map[string]interface{}) containerSettings.ImageName = utils.String(settingMap["image_name"].(string)) - if containerRunOptions, ok := settingMap["container_run_options"]; ok { + if containerRunOptions, ok := settingMap["run_options"]; ok { containerSettings.ContainerRunOptions = utils.String(containerRunOptions.(string)) } if settingMap["registry"].([]interface{})[0] != nil { diff --git a/internal/services/batch/batch_pool_data_source.go b/internal/services/batch/batch_pool_data_source.go index 55c60dc8b568..0568b4c02445 100644 --- a/internal/services/batch/batch_pool_data_source.go +++ b/internal/services/batch/batch_pool_data_source.go @@ -580,7 +580,7 @@ func startTaskDSSchema() map[string]*pluginsdk.Schema { Computed: true, Elem: &pluginsdk.Resource{ Schema: map[string]*schema.Schema{ - "container_run_options": { + "run_options": { Type: pluginsdk.TypeString, Computed: true, }, diff --git a/internal/services/batch/batch_pool_resource.go b/internal/services/batch/batch_pool_resource.go index 728c8a5a8064..f35e7c9e2ba8 100644 --- a/internal/services/batch/batch_pool_resource.go +++ b/internal/services/batch/batch_pool_resource.go @@ -1431,7 +1431,7 @@ func startTaskSchema() map[string]*pluginsdk.Schema { Optional: true, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ - "container_run_options": { + "run_options": { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringIsNotEmpty, diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index fea9206faf09..d0458bcb557a 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -275,7 +275,7 @@ func TestAccBatchPool_startTask_complete(t *testing.T) { check.That(data.ResourceName).Key("start_task.0.container.0.registry.0.user_name").HasValue("myUserName"), check.That(data.ResourceName).Key("start_task.0.container.0.registry.0.registry_server").HasValue("myContainerRegistry.azurecr.io"), check.That(data.ResourceName).Key("start_task.0.container.0.registry.0.user_name").HasValue("myUserName"), - check.That(data.ResourceName).Key("start_task.0.container.0.container_run_options").HasValue("cat /proc/cpuinfo"), + check.That(data.ResourceName).Key("start_task.0.container.0.run_options").HasValue("cat /proc/cpuinfo"), check.That(data.ResourceName).Key("start_task.0.container.0.image_name").HasValue("centos7"), check.That(data.ResourceName).Key("start_task.0.container.0.working_directory").HasValue("ContainerImageDefault"), ), @@ -1131,8 +1131,8 @@ resource "azurerm_batch_pool" "test" { } container { - container_run_options = "cat /proc/cpuinfo" - image_name = "centos7" + run_options = "cat /proc/cpuinfo" + image_name = "centos7" registry { registry_server = "myContainerRegistry.azurecr.io" user_name = "myUserName" diff --git a/website/docs/d/batch_pool.html.markdown b/website/docs/d/batch_pool.html.markdown index 8e0b97a64a8a..f93baaab8df9 100644 --- a/website/docs/d/batch_pool.html.markdown +++ b/website/docs/d/batch_pool.html.markdown @@ -158,7 +158,7 @@ A `container` block exports the following: * `image_name` - The image to use to create the container in which the task will run. -* `container_run_options` - Additional options to the container create command. +* `run_options` - Additional options to the container create command. * `registry` - The same reference as `container_registries` block defined as follows. diff --git a/website/docs/r/batch_pool.html.markdown b/website/docs/r/batch_pool.html.markdown index 653ef9410f6a..a6647393434f 100644 --- a/website/docs/r/batch_pool.html.markdown +++ b/website/docs/r/batch_pool.html.markdown @@ -292,7 +292,7 @@ A `container` block supports the following: * `image_name` - (Required) The image to use to create the container in which the task will run. This is the full image reference, as would be specified to "docker pull". If no tag is provided as part of the image name, the tag ":latest" is used as a default. -* `container_run_options` - (Optional) Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. +* `run_options` - (Optional) Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. * `registry` - (Optional) The same reference as `container_registries` block defined as follows. From 2c9a35510095995ebc66d41cad77bbc4a02bb549 Mon Sep 17 00:00:00 2001 From: Yun Liu Date: Fri, 30 Sep 2022 10:44:30 +0800 Subject: [PATCH 12/12] change `node_deallocation_option` to `node_deallocation_method` --- internal/services/batch/batch_pool.go | 4 ++-- .../services/batch/batch_pool_resource.go | 10 +++++----- .../batch/batch_pool_resource_test.go | 20 +++++++++---------- website/docs/r/batch_pool.html.markdown | 2 +- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/internal/services/batch/batch_pool.go b/internal/services/batch/batch_pool.go index 2c40fd951ba9..96b16e1e8623 100644 --- a/internal/services/batch/batch_pool.go +++ b/internal/services/batch/batch_pool.go @@ -46,8 +46,8 @@ func flattenBatchPoolFixedScaleSettings(d *pluginsdk.ResourceData, settings *bat result := make(map[string]interface{}) // for now, this is a writeOnly property, so we treat this as secret. - if v, ok := d.GetOk("fixed_scale.0.node_deallocation_option"); ok { - result["node_deallocation_option"] = v.(string) + if v, ok := d.GetOk("fixed_scale.0.node_deallocation_method"); ok { + result["node_deallocation_method"] = v.(string) } if settings.TargetDedicatedNodes != nil { diff --git a/internal/services/batch/batch_pool_resource.go b/internal/services/batch/batch_pool_resource.go index f35e7c9e2ba8..6370213fd2eb 100644 --- a/internal/services/batch/batch_pool_resource.go +++ b/internal/services/batch/batch_pool_resource.go @@ -84,10 +84,11 @@ func resourceBatchPool() *pluginsdk.Resource { MaxItems: 1, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ - // Property `node_deallocation_option` is set to be a writeOnly property by service team + // Property `node_deallocation_method` is set to be a writeOnly property by service team // It can only perform on PUT operation and is not able to perform GET operation - // Here we treat `node_deallocation_option` the same as a secret value. - "node_deallocation_option": { + // Here we treat `node_deallocation_method` the same as a secret value. + // Issue link: https://github.com/Azure/azure-rest-api-specs/issues/20948 + "node_deallocation_method": { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ @@ -1285,8 +1286,7 @@ func expandBatchPoolScaleSettings(d *pluginsdk.ResourceData) (*batch.ScaleSettin } fixedScaleSettings := fixedScale[0].(map[string]interface{}) - - nodeDeallocationOption := batch.ComputeNodeDeallocationOption(fixedScaleSettings["node_deallocation_option"].(string)) + nodeDeallocationOption := batch.ComputeNodeDeallocationOption(fixedScaleSettings["node_deallocation_method"].(string)) targetDedicatedNodes := int32(fixedScaleSettings["target_dedicated_nodes"].(int)) targetLowPriorityNodes := int32(fixedScaleSettings["target_low_priority_nodes"].(int)) resizeTimeout := fixedScaleSettings["resize_timeout"].(string) diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index d0458bcb557a..3decb97f526b 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -148,14 +148,14 @@ func TestAccBatchPool_fixedScale_complete(t *testing.T) { check.That(data.ResourceName).Key("storage_image_reference.0.offer").HasValue("UbuntuServer"), check.That(data.ResourceName).Key("auto_scale.#").HasValue("0"), check.That(data.ResourceName).Key("fixed_scale.#").HasValue("1"), - check.That(data.ResourceName).Key("fixed_scale.0.node_deallocation_option").HasValue("Terminate"), + check.That(data.ResourceName).Key("fixed_scale.0.node_deallocation_method").HasValue("Terminate"), check.That(data.ResourceName).Key("fixed_scale.0.target_dedicated_nodes").HasValue("2"), check.That(data.ResourceName).Key("fixed_scale.0.resize_timeout").HasValue("PT15M"), check.That(data.ResourceName).Key("fixed_scale.0.target_low_priority_nodes").HasValue("0"), check.That(data.ResourceName).Key("start_task.#").HasValue("0"), ), }, - data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), }) } @@ -180,7 +180,7 @@ func TestAccBatchPool_autoScale_complete(t *testing.T) { check.That(data.ResourceName).Key("start_task.#").HasValue("0"), ), }, - data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), }) } @@ -207,7 +207,7 @@ func TestAccBatchPool_completeUpdated(t *testing.T) { check.That(data.ResourceName).Key("start_task.#").HasValue("0"), ), }, - data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), { Config: r.autoScale_complete(data), Check: acceptance.ComposeTestCheckFunc( @@ -502,17 +502,17 @@ func TestAccBatchPool_fixedScaleUpdate(t *testing.T) { Config: r.fixedScale_complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), - check.That(data.ResourceName).Key("fixed_scale.0.node_deallocation_option").HasValue("Terminate"), + check.That(data.ResourceName).Key("fixed_scale.0.node_deallocation_method").HasValue("Terminate"), ), }, - data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), { Config: r.fixedScale_completeUpdate(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_option"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), }) } @@ -768,7 +768,7 @@ resource "azurerm_batch_pool" "test" { node_agent_sku_id = "batch.node.ubuntu 18.04" fixed_scale { - node_deallocation_option = "Terminate" + node_deallocation_method = "Terminate" target_dedicated_nodes = 2 resize_timeout = "PT15M" target_low_priority_nodes = 0 @@ -1131,8 +1131,8 @@ resource "azurerm_batch_pool" "test" { } container { - run_options = "cat /proc/cpuinfo" - image_name = "centos7" + run_options = "cat /proc/cpuinfo" + image_name = "centos7" registry { registry_server = "myContainerRegistry.azurecr.io" user_name = "myUserName" diff --git a/website/docs/r/batch_pool.html.markdown b/website/docs/r/batch_pool.html.markdown index a6647393434f..3fbaf2455b46 100644 --- a/website/docs/r/batch_pool.html.markdown +++ b/website/docs/r/batch_pool.html.markdown @@ -252,7 +252,7 @@ To provision a Custom Image, the following fields are applicable: A `fixed_scale` block supports the following: -* `node_deallocation_option` - (Optional) Write-only property. It determines what to do with a node and its running task(s) if the pool size is decreasing. Values are `Requeue`, `RetainedData`, `TaskCompletion` and `Terminate`. +* `node_deallocation_method` - (Optional) It determines what to do with a node and its running task(s) if the pool size is decreasing. Values are `Requeue`, `RetainedData`, `TaskCompletion` and `Terminate`. * `target_dedicated_nodes` - (Optional) The number of nodes in the Batch pool. Defaults to `1`.