diff --git a/internal/services/batch/batch_pool.go b/internal/services/batch/batch_pool.go index 6725f9cbd530..96b16e1e8623 100644 --- a/internal/services/batch/batch_pool.go +++ b/internal/services/batch/batch_pool.go @@ -35,7 +35,7 @@ func flattenBatchPoolAutoScaleSettings(settings *batch.AutoScaleSettings) []inte } // flattenBatchPoolFixedScaleSettings flattens the fixed scale settings for a Batch pool -func flattenBatchPoolFixedScaleSettings(settings *batch.FixedScaleSettings) []interface{} { +func flattenBatchPoolFixedScaleSettings(d *pluginsdk.ResourceData, settings *batch.FixedScaleSettings) []interface{} { results := make([]interface{}, 0) if settings == nil { @@ -45,6 +45,11 @@ func flattenBatchPoolFixedScaleSettings(settings *batch.FixedScaleSettings) []in result := make(map[string]interface{}) + // for now, this is a writeOnly property, so we treat this as secret. + if v, ok := d.GetOk("fixed_scale.0.node_deallocation_method"); ok { + result["node_deallocation_method"] = v.(string) + } + if settings.TargetDedicatedNodes != nil { result["target_dedicated_nodes"] = *settings.TargetDedicatedNodes } @@ -89,7 +94,7 @@ func flattenBatchPoolImageReference(image *batch.ImageReference) []interface{} { } // flattenBatchPoolStartTask flattens a Batch pool start task -func flattenBatchPoolStartTask(startTask *batch.StartTask) []interface{} { +func flattenBatchPoolStartTask(oldConfig *pluginsdk.ResourceData, startTask *batch.StartTask) []interface{} { results := make([]interface{}, 0) if startTask == nil { @@ -104,6 +109,25 @@ func flattenBatchPoolStartTask(startTask *batch.StartTask) []interface{} { } result["command_line"] = commandLine + if startTask.ContainerSettings != nil { + containerSettings := make(map[string]interface{}) + containerSettings["image_name"] = *startTask.ContainerSettings.ImageName + containerSettings["working_directory"] = string(startTask.ContainerSettings.WorkingDirectory) + if startTask.ContainerSettings.ContainerRunOptions != nil { + containerSettings["run_options"] = *startTask.ContainerSettings.ContainerRunOptions + } + if startTask.ContainerSettings.Registry != nil { + tmpReg := flattenBatchPoolContainerRegistry(oldConfig, startTask.ContainerSettings.Registry) + containerSettings["registry"] = []interface{}{ + tmpReg, + } + } + + result["container"] = []interface{}{ + containerSettings, + } + } + waitForSuccess := false if startTask.WaitForSuccess != nil { waitForSuccess = *startTask.WaitForSuccess @@ -676,6 +700,29 @@ func ExpandBatchPoolStartTask(list []interface{}) (*batch.StartTask, error) { startTask.EnvironmentSettings = expandCommonEnvironmentProperties(v) } + if startTaskValue["container"] != nil && len(startTaskValue["container"].([]interface{})) > 0 { + var containerSettings batch.TaskContainerSettings + containerSettingsList := startTaskValue["container"].([]interface{}) + + if len(containerSettingsList) > 0 && containerSettingsList[0] != nil { + settingMap := containerSettingsList[0].(map[string]interface{}) + containerSettings.ImageName = utils.String(settingMap["image_name"].(string)) + if containerRunOptions, ok := settingMap["run_options"]; ok { + containerSettings.ContainerRunOptions = utils.String(containerRunOptions.(string)) + } + if settingMap["registry"].([]interface{})[0] != nil { + containerRegMap := settingMap["registry"].([]interface{})[0].(map[string]interface{}) + if containerRegistryRef, err := expandBatchPoolContainerRegistry(containerRegMap); err == nil { + containerSettings.Registry = containerRegistryRef + } + } + if workingDir, ok := settingMap["working_directory"]; ok { + containerSettings.WorkingDirectory = batch.ContainerWorkingDirectory(workingDir.(string)) + } + } + startTask.ContainerSettings = &containerSettings + } + return startTask, nil } @@ -1060,6 +1107,10 @@ func ExpandBatchPoolNetworkConfiguration(list []interface{}) (*batch.NetworkConf networkConfigValue := list[0].(map[string]interface{}) networkConfiguration := &batch.NetworkConfiguration{} + if v, ok := networkConfigValue["dynamic_vnet_assignment_scope"]; ok { + networkConfiguration.DynamicVNetAssignmentScope = batch.DynamicVNetAssignmentScope(v.(string)) + } + if v, ok := networkConfigValue["subnet_id"]; ok { if value := v.(string); value != "" { networkConfiguration.SubnetID = &value @@ -1164,6 +1215,21 @@ func expandPoolNetworkSecurityGroupRule(list []interface{}) []batch.NetworkSecur sourceAddressPrefix := groupRuleMap["source_address_prefix"].(string) access := batch.NetworkSecurityGroupRuleAccess(groupRuleMap["access"].(string)) + networkSecurityGroupRuleObject := batch.NetworkSecurityGroupRule{ + Priority: &priority, + SourceAddressPrefix: &sourceAddressPrefix, + Access: access, + } + + portRanges := groupRuleMap["source_port_ranges"].([]interface{}) + if len(portRanges) > 0 { + portRangesResult := make([]string, 0) + for _, v := range portRanges { + portRangesResult = append(portRangesResult, v.(string)) + } + networkSecurityGroupRuleObject.SourcePortRanges = &portRangesResult + } + networkSecurityGroupRule = append(networkSecurityGroupRule, batch.NetworkSecurityGroupRule{ Priority: &priority, SourceAddressPrefix: &sourceAddressPrefix, @@ -1220,10 +1286,17 @@ func flattenBatchPoolNetworkConfiguration(input *batch.NetworkConfiguration) []i if networkSecurity.SourceAddressPrefix != nil { sourceAddressPrefix = *networkSecurity.SourceAddressPrefix } + sourcePortRanges := make([]interface{}, 0) + if networkSecurity.SourcePortRanges != nil { + for _, sourcePortRange := range *networkSecurity.SourcePortRanges { + sourcePortRanges = append(sourcePortRanges, sourcePortRange) + } + } networkSecurities = append(networkSecurities, map[string]interface{}{ "access": string(networkSecurity.Access), "priority": priority, "source_address_prefix": sourceAddressPrefix, + "source_port_ranges": sourcePortRanges, }) } } @@ -1240,6 +1313,7 @@ func flattenBatchPoolNetworkConfiguration(input *batch.NetworkConfiguration) []i return []interface{}{ map[string]interface{}{ + "dynamic_vnet_assignment_scope": string(input.DynamicVNetAssignmentScope), "endpoint_configuration": endpointConfigs, "public_address_provisioning_type": publicAddressProvisioningType, "public_ips": pluginsdk.NewSet(pluginsdk.HashString, publicIPAddressIds), diff --git a/internal/services/batch/batch_pool_data_source.go b/internal/services/batch/batch_pool_data_source.go index ed78db7b0cc8..0568b4c02445 100644 --- a/internal/services/batch/batch_pool_data_source.go +++ b/internal/services/batch/batch_pool_data_source.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/services/batch/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/services/batch/validate" @@ -139,25 +140,7 @@ func dataSourceBatchPool() *pluginsdk.Resource { Type: pluginsdk.TypeList, Computed: true, Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "registry_server": { - Type: pluginsdk.TypeString, - Computed: true, - }, - "user_assigned_identity_id": { - Type: pluginsdk.TypeString, - Computed: true, - }, - "user_name": { - Type: pluginsdk.TypeString, - Computed: true, - }, - "password": { - Type: pluginsdk.TypeString, - Computed: true, - Sensitive: true, - }, - }, + Schema: batchPoolDataContainerRegistry(), }, }, }, @@ -484,6 +467,10 @@ func dataSourceBatchPool() *pluginsdk.Resource { Computed: true, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ + "dynamic_vnet_assignment_scope": { + Type: pluginsdk.TypeString, + Computed: true, + }, "subnet_id": { Type: pluginsdk.TypeString, Computed: true, @@ -537,6 +524,13 @@ func dataSourceBatchPool() *pluginsdk.Resource { Type: pluginsdk.TypeString, Computed: true, }, + "source_port_ranges": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, }, }, }, @@ -581,6 +575,34 @@ func startTaskDSSchema() map[string]*pluginsdk.Schema { Computed: true, }, + "container": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*schema.Schema{ + "run_options": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "image_name": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "registry": { + Type: pluginsdk.TypeList, + Computed: true, + Elem: &pluginsdk.Resource{ + Schema: batchPoolDataContainerRegistry(), + }, + }, + "working_directory": { + Type: pluginsdk.TypeString, + Computed: true, + }, + }, + }, + }, + "task_retry_maximum": { Type: pluginsdk.TypeInt, Computed: true, @@ -669,6 +691,28 @@ func startTaskDSSchema() map[string]*pluginsdk.Schema { return s } +func batchPoolDataContainerRegistry() map[string]*schema.Schema { + return map[string]*pluginsdk.Schema{ + "registry_server": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "user_assigned_identity_id": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "user_name": { + Type: pluginsdk.TypeString, + Computed: true, + }, + "password": { + Type: pluginsdk.TypeString, + Computed: true, + Sensitive: true, + }, + } +} + func dataSourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Batch.PoolClient subscriptionId := meta.(*clients.Client).Account.SubscriptionId @@ -700,7 +744,7 @@ func dataSourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error if err := d.Set("auto_scale", flattenBatchPoolAutoScaleSettings(scaleSettings.AutoScale)); err != nil { return fmt.Errorf("flattening `auto_scale`: %+v", err) } - if err := d.Set("fixed_scale", flattenBatchPoolFixedScaleSettings(scaleSettings.FixedScale)); err != nil { + if err := d.Set("fixed_scale", flattenBatchPoolFixedScaleSettings(d, scaleSettings.FixedScale)); err != nil { return fmt.Errorf("flattening `fixed_scale `: %+v", err) } } @@ -816,7 +860,7 @@ func dataSourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error return fmt.Errorf("setting `certificate`: %v", err) } - d.Set("start_task", flattenBatchPoolStartTask(props.StartTask)) + d.Set("start_task", flattenBatchPoolStartTask(d, props.StartTask)) d.Set("metadata", FlattenBatchMetaData(props.Metadata)) if err := d.Set("network_configuration", flattenBatchPoolNetworkConfiguration(props.NetworkConfiguration)); err != nil { diff --git a/internal/services/batch/batch_pool_resource.go b/internal/services/batch/batch_pool_resource.go index e2d94d49d34a..6370213fd2eb 100644 --- a/internal/services/batch/batch_pool_resource.go +++ b/internal/services/batch/batch_pool_resource.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" @@ -83,6 +84,20 @@ func resourceBatchPool() *pluginsdk.Resource { MaxItems: 1, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ + // Property `node_deallocation_method` is set to be a writeOnly property by service team + // It can only perform on PUT operation and is not able to perform GET operation + // Here we treat `node_deallocation_method` the same as a secret value. + // Issue link: https://github.com/Azure/azure-rest-api-specs/issues/20948 + "node_deallocation_method": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + string(batch.ComputeNodeDeallocationOptionRequeue), + string(batch.ComputeNodeDeallocationOptionRetainedData), + string(batch.ComputeNodeDeallocationOptionTaskCompletion), + string(batch.ComputeNodeDeallocationOptionTerminate), + }, false), + }, "target_dedicated_nodes": { Type: pluginsdk.TypeInt, Optional: true, @@ -153,34 +168,7 @@ func resourceBatchPool() *pluginsdk.Resource { ForceNew: true, ConfigMode: pluginsdk.SchemaConfigModeAttr, Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "registry_server": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringIsNotEmpty, - }, - "user_assigned_identity_id": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: commonids.ValidateUserAssignedIdentityID, - Description: "The User Assigned Identity to use for Container Registry access.", - }, - "user_name": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringIsNotEmpty, - }, - "password": { - Type: pluginsdk.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - ValidateFunc: validation.StringIsNotEmpty, - }, - }, + Schema: containerRegistry(), }, AtLeastOneOf: []string{"container_configuration.0.type", "container_configuration.0.container_image_names", "container_configuration.0.container_registries"}, }, @@ -461,6 +449,17 @@ func resourceBatchPool() *pluginsdk.Resource { MaxItems: 1, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ + "dynamic_vnet_assignment_scope": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Default: string(batch.DynamicVNetAssignmentScopeNone), + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: validation.StringInSlice([]string{ + string(batch.DynamicVNetAssignmentScopeNone), + string(batch.DynamicVNetAssignmentScopeJob), + }, false), + }, "subnet_id": { Type: pluginsdk.TypeString, Required: true, @@ -548,6 +547,17 @@ func resourceBatchPool() *pluginsdk.Resource { ForceNew: true, ValidateFunc: validation.StringIsNotEmpty, }, + "source_port_ranges": { + Type: pluginsdk.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + Default: "*", + ValidateFunc: validation.StringIsNotEmpty, + }, + }, }, }, }, @@ -1080,7 +1090,7 @@ func resourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error { if err := d.Set("auto_scale", flattenBatchPoolAutoScaleSettings(scaleSettings.AutoScale)); err != nil { return fmt.Errorf("flattening `auto_scale`: %+v", err) } - if err := d.Set("fixed_scale", flattenBatchPoolFixedScaleSettings(scaleSettings.FixedScale)); err != nil { + if err := d.Set("fixed_scale", flattenBatchPoolFixedScaleSettings(d, scaleSettings.FixedScale)); err != nil { return fmt.Errorf("flattening `fixed_scale `: %+v", err) } } @@ -1198,7 +1208,7 @@ func resourceBatchPoolRead(d *pluginsdk.ResourceData, meta interface{}) error { return fmt.Errorf("flattening `certificate`: %+v", err) } - d.Set("start_task", flattenBatchPoolStartTask(props.StartTask)) + d.Set("start_task", flattenBatchPoolStartTask(d, props.StartTask)) d.Set("metadata", FlattenBatchMetaData(props.Metadata)) if props.MountConfiguration != nil { @@ -1276,12 +1286,13 @@ func expandBatchPoolScaleSettings(d *pluginsdk.ResourceData) (*batch.ScaleSettin } fixedScaleSettings := fixedScale[0].(map[string]interface{}) - + nodeDeallocationOption := batch.ComputeNodeDeallocationOption(fixedScaleSettings["node_deallocation_method"].(string)) targetDedicatedNodes := int32(fixedScaleSettings["target_dedicated_nodes"].(int)) targetLowPriorityNodes := int32(fixedScaleSettings["target_low_priority_nodes"].(int)) resizeTimeout := fixedScaleSettings["resize_timeout"].(string) scaleSettings.FixedScale = &batch.FixedScaleSettings{ + NodeDeallocationOption: nodeDeallocationOption, ResizeTimeout: &resizeTimeout, TargetDedicatedNodes: &targetDedicatedNodes, TargetLowPriorityNodes: &targetLowPriorityNodes, @@ -1415,6 +1426,40 @@ func startTaskSchema() map[string]*pluginsdk.Schema { ValidateFunc: validation.StringIsNotEmpty, }, + "container": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "run_options": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "image_name": { + Type: pluginsdk.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "registry": { + Type: pluginsdk.TypeList, + Optional: true, + Elem: &pluginsdk.Resource{ + Schema: containerRegistry(), + }, + }, + "working_directory": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + string(batch.ContainerWorkingDirectoryTaskWorkingDirectory), + string(batch.ContainerWorkingDirectoryContainerImageDefault), + }, false), + }, + }, + }, + }, + "task_retry_maximum": { Type: pluginsdk.TypeInt, Optional: true, @@ -1515,3 +1560,34 @@ func startTaskSchema() map[string]*pluginsdk.Schema { }, } } + +func containerRegistry() map[string]*schema.Schema { + return map[string]*pluginsdk.Schema{ + "registry_server": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "user_assigned_identity_id": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: commonids.ValidateUserAssignedIdentityID, + Description: "The User Assigned Identity to use for Container Registry access.", + }, + "user_name": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "password": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + } +} diff --git a/internal/services/batch/batch_pool_resource_test.go b/internal/services/batch/batch_pool_resource_test.go index 3eea329b4236..3decb97f526b 100644 --- a/internal/services/batch/batch_pool_resource_test.go +++ b/internal/services/batch/batch_pool_resource_test.go @@ -148,13 +148,14 @@ func TestAccBatchPool_fixedScale_complete(t *testing.T) { check.That(data.ResourceName).Key("storage_image_reference.0.offer").HasValue("UbuntuServer"), check.That(data.ResourceName).Key("auto_scale.#").HasValue("0"), check.That(data.ResourceName).Key("fixed_scale.#").HasValue("1"), + check.That(data.ResourceName).Key("fixed_scale.0.node_deallocation_method").HasValue("Terminate"), check.That(data.ResourceName).Key("fixed_scale.0.target_dedicated_nodes").HasValue("2"), check.That(data.ResourceName).Key("fixed_scale.0.resize_timeout").HasValue("PT15M"), check.That(data.ResourceName).Key("fixed_scale.0.target_low_priority_nodes").HasValue("0"), check.That(data.ResourceName).Key("start_task.#").HasValue("0"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), }) } @@ -179,7 +180,7 @@ func TestAccBatchPool_autoScale_complete(t *testing.T) { check.That(data.ResourceName).Key("start_task.#").HasValue("0"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), }) } @@ -206,7 +207,7 @@ func TestAccBatchPool_completeUpdated(t *testing.T) { check.That(data.ResourceName).Key("start_task.#").HasValue("0"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), { Config: r.autoScale_complete(data), Check: acceptance.ComposeTestCheckFunc( @@ -271,9 +272,17 @@ func TestAccBatchPool_startTask_complete(t *testing.T) { Config: r.startTask_complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("start_task.0.container.0.registry.0.user_name").HasValue("myUserName"), + check.That(data.ResourceName).Key("start_task.0.container.0.registry.0.registry_server").HasValue("myContainerRegistry.azurecr.io"), + check.That(data.ResourceName).Key("start_task.0.container.0.registry.0.user_name").HasValue("myUserName"), + check.That(data.ResourceName).Key("start_task.0.container.0.run_options").HasValue("cat /proc/cpuinfo"), + check.That(data.ResourceName).Key("start_task.0.container.0.image_name").HasValue("centos7"), + check.That(data.ResourceName).Key("start_task.0.container.0.working_directory").HasValue("ContainerImageDefault"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", + "container_configuration.0.container_registries.0.password", + "start_task.0.container.0.registry.0.password"), }) } @@ -474,8 +483,10 @@ func TestAccBatchPool_frontEndPortRanges(t *testing.T) { check.That(data.ResourceName).Key("fixed_scale.0.target_dedicated_nodes").HasValue("1"), check.That(data.ResourceName).Key("start_task.#").HasValue("0"), check.That(data.ResourceName).Key("network_configuration.#").HasValue("1"), + check.That(data.ResourceName).Key("network_configuration.0.dynamic_vnet_assignment_scope").HasValue("None"), check.That(data.ResourceName).Key("network_configuration.0.subnet_id").Exists(), check.That(data.ResourceName).Key("network_configuration.0.public_ips.#").HasValue("1"), + check.That(data.ResourceName).Key("network_configuration.0.endpoint_configuration.0.network_security_group_rules.0.source_port_ranges.0").HasValue("*"), ), }, data.ImportStep("stop_pending_resize_operation"), @@ -491,16 +502,17 @@ func TestAccBatchPool_fixedScaleUpdate(t *testing.T) { Config: r.fixedScale_complete(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("fixed_scale.0.node_deallocation_method").HasValue("Terminate"), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), { Config: r.fixedScale_completeUpdate(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), }, - data.ImportStep("stop_pending_resize_operation"), + data.ImportStep("stop_pending_resize_operation", "fixed_scale.0.node_deallocation_method"), }) } @@ -756,6 +768,7 @@ resource "azurerm_batch_pool" "test" { node_agent_sku_id = "batch.node.ubuntu 18.04" fixed_scale { + node_deallocation_method = "Terminate" target_dedicated_nodes = 2 resize_timeout = "PT15M" target_low_priority_nodes = 0 @@ -1070,15 +1083,9 @@ resource "azurerm_batch_pool" "test" { } func (BatchPoolResource) startTask_complete(data acceptance.TestData) string { + template := BatchPoolResource{}.template(data) return fmt.Sprintf(` -provider "azurerm" { - features {} -} - -resource "azurerm_resource_group" "test" { - name = "testaccRG-batch-%d" - location = "%s" -} +%s resource "azurerm_batch_account" "test" { name = "testaccbatch%s" @@ -1090,7 +1097,7 @@ resource "azurerm_batch_pool" "test" { name = "testaccpool%s" resource_group_name = azurerm_resource_group.test.name account_name = azurerm_batch_account.test.name - node_agent_sku_id = "batch.node.ubuntu 18.04" + node_agent_sku_id = "batch.node.ubuntu 20.04" vm_size = "Standard_A1" fixed_scale { @@ -1098,12 +1105,22 @@ resource "azurerm_batch_pool" "test" { } storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "18.04-lts" + publisher = "microsoft-azure-batch" + offer = "ubuntu-server-container" + sku = "20-04-lts" version = "latest" } + container_configuration { + type = "DockerCompatible" + container_image_names = ["centos7"] + container_registries { + registry_server = "myContainerRegistry.azurecr.io" + user_name = "myUserName" + password = "myPassword" + } + } + start_task { command_line = "echo 'Hello World from $env'" wait_for_success = true @@ -1113,6 +1130,17 @@ resource "azurerm_batch_pool" "test" { bu = "Research&Dev" } + container { + run_options = "cat /proc/cpuinfo" + image_name = "centos7" + registry { + registry_server = "myContainerRegistry.azurecr.io" + user_name = "myUserName" + password = "myPassword" + } + working_directory = "ContainerImageDefault" + } + user_identity { auto_user { elevation_level = "NonAdmin" @@ -1126,7 +1154,7 @@ resource "azurerm_batch_pool" "test" { } } } -`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomString) +`, template, data.RandomString, data.RandomString) } func (BatchPoolResource) startTask_userIdentity(data acceptance.TestData) string { @@ -1920,6 +1948,7 @@ resource "azurerm_batch_pool" "test" { } network_configuration { + dynamic_vnet_assignment_scope = "none" public_address_provisioning_type = "UserManaged" public_ips = [azurerm_public_ip.test.id] subnet_id = azurerm_subnet.test.id @@ -1934,6 +1963,7 @@ resource "azurerm_batch_pool" "test" { access = "Deny" priority = 1001 source_address_prefix = "*" + source_port_ranges = ["*"] } } } diff --git a/website/docs/d/batch_pool.html.markdown b/website/docs/d/batch_pool.html.markdown index 9719c89a17ba..f93baaab8df9 100644 --- a/website/docs/d/batch_pool.html.markdown +++ b/website/docs/d/batch_pool.html.markdown @@ -140,6 +140,8 @@ A `start_task` block exports the following: * `command_line` - The command line executed by the start task. +* `container` - The settings for the container under which the start task runs. + * `task_retry_maximum` - The number of retry count * `wait_for_success` - A flag that indicates if the Batch pool should wait for the start task to be completed. @@ -152,6 +154,18 @@ A `start_task` block exports the following: --- +A `container` block exports the following: + +* `image_name` - The image to use to create the container in which the task will run. + +* `run_options` - Additional options to the container create command. + +* `registry` - The same reference as `container_registries` block defined as follows. + +* `working_directory` - A flag to indicate where the container task working directory is. + +--- + A `user_identity` block exports the following: * `user_name` - The username to be used by the Batch pool start task. @@ -298,8 +312,14 @@ A `network_configuration` block exports the following: * `subnet_id` - The ARM resource identifier of the virtual network subnet which the compute nodes of the pool are joined too. +* `dynamic_vnet_assignment_scope` - The scope of dynamic vnet assignment. + * `endpoint_configuration` - The inbound NAT pools that are used to address specific ports on the individual compute node externally. +* `public_ips` - A list of public IP ids that will be allocated to nodes. + +* `public_address_provisioning_type` - Type of public IP address provisioning. + --- A `endpoint_configuration` block exports the following: @@ -324,6 +344,8 @@ A `network_security_group_rules` block exports the following: * `source_address_prefix` - The source address prefix or tag to match for the rule. +* `source_port_ranges` - The source port ranges to match for the rule. + --- A `task_scheduling_policy` block exports the following: diff --git a/website/docs/r/batch_pool.html.markdown b/website/docs/r/batch_pool.html.markdown index 12c97e664b8c..3fbaf2455b46 100644 --- a/website/docs/r/batch_pool.html.markdown +++ b/website/docs/r/batch_pool.html.markdown @@ -252,6 +252,8 @@ To provision a Custom Image, the following fields are applicable: A `fixed_scale` block supports the following: +* `node_deallocation_method` - (Optional) It determines what to do with a node and its running task(s) if the pool size is decreasing. Values are `Requeue`, `RetainedData`, `TaskCompletion` and `Terminate`. + * `target_dedicated_nodes` - (Optional) The number of nodes in the Batch pool. Defaults to `1`. * `target_low_priority_nodes` - (Optional) The number of low priority nodes in the Batch pool. Defaults to `0`. @@ -272,6 +274,8 @@ A `start_task` block supports the following: * `command_line` - (Required) The command line executed by the start task. +* `container` - (Optional) A `container` block is the settings for the container under which the start task runs. When this is specified, all directories recursively below the `AZ_BATCH_NODE_ROOT_DIR` (the root of Azure Batch directories on the node) are mapped into the container, all task environment variables are mapped into the container, and the task command line is executed in the container. + * `task_retry_maximum` - (Optional) The number of retry count. Defaults to `1`. * `wait_for_success` - (Optional) A flag that indicates if the Batch pool should wait for the start task to be completed. Default to `false`. @@ -284,6 +288,18 @@ A `start_task` block supports the following: --- +A `container` block supports the following: + +* `image_name` - (Required) The image to use to create the container in which the task will run. This is the full image reference, as would be specified to "docker pull". If no tag is provided as part of the image name, the tag ":latest" is used as a default. + +* `run_options` - (Optional) Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. + +* `registry` - (Optional) The same reference as `container_registries` block defined as follows. + +* `working_directory` - (Optional) A flag to indicate where the container task working directory is. The default is `TaskWorkingDirectory`, an alternative value is `ContainerImageDefault`. + +--- + A `user_identity` block supports the following: * `user_name` - (Optional) The username to be used by the Batch pool start task. @@ -432,6 +448,8 @@ A `network_configuration` block supports the following: * `subnet_id` - (Required) The ARM resource identifier of the virtual network subnet which the compute nodes of the pool will join. Changing this forces a new resource to be created. +* `dynamic_vnet_assignment_scope` - (Optional) The scope of dynamic vnet assignment. Allowed values: `none`, `job`. + * `public_ips` - (Optional) A list of public IP ids that will be allocated to nodes. Changing this forces a new resource to be created. * `endpoint_configuration` - (Optional) A list of inbound NAT pools that can be used to address specific ports on an individual compute node externally. Set as documented in the inbound_nat_pools block below. Changing this forces a new resource to be created. @@ -456,11 +474,13 @@ A `endpoint_configuration` block supports the following: A `network_security_group_rules` block supports the following: -* `access` - The action that should be taken for a specified IP address, subnet range or tag. Acceptable values are `Allow` and `Deny`. Changing this forces a new resource to be created. +* `access` - (Required) The action that should be taken for a specified IP address, subnet range or tag. Acceptable values are `Allow` and `Deny`. Changing this forces a new resource to be created. + +* `priority` - (Required) The priority for this rule. The value must be at least `150`. Changing this forces a new resource to be created. -* `priority` - The priority for this rule. The value must be at least `150`. Changing this forces a new resource to be created. +* `source_address_prefix` - (Required) The source address prefix or tag to match for the rule. Changing this forces a new resource to be created. -* `source_address_prefix` - The source address prefix or tag to match for the rule. Changing this forces a new resource to be created. +* `source_port_ranges` - (Optional) The source port ranges to match for the rule. Valid values are `*` (for all ports 0 - 65535) or arrays of ports or port ranges (i.e. `100-200`). The ports should in the range of 0 to 65535 and the port ranges or ports can't overlap. If any other values are provided the request fails with HTTP status code 400. Default value will be `*`. ---