diff --git a/src/azure-cli/azure/cli/command_modules/acs/_params.py b/src/azure-cli/azure/cli/command_modules/acs/_params.py index fb471a7dcd8..16b03bae332 100644 --- a/src/azure-cli/azure/cli/command_modules/acs/_params.py +++ b/src/azure-cli/azure/cli/command_modules/acs/_params.py @@ -448,16 +448,16 @@ def load_arguments(self, _): c.argument('enable_cluster_autoscaler', options_list=["--enable-cluster-autoscaler", "-e"], action='store_true') c.argument('min_count', type=int, validator=validate_nodes_count) c.argument('max_count', type=int, validator=validate_nodes_count) - c.argument('scale_down_mode', arg_type=get_enum_type([CONST_SCALE_DOWN_MODE_DELETE, CONST_SCALE_DOWN_MODE_DEALLOCATE])) c.argument('priority', arg_type=get_enum_type(node_priorities), validator=validate_priority) c.argument('eviction_policy', arg_type=get_enum_type(node_eviction_policies), validator=validate_eviction_policy) c.argument('spot_max_price', type=float, validator=validate_spot_max_price) c.argument('labels', nargs='*', validator=validate_nodepool_labels) c.argument('tags', tags_type) c.argument('node_taints', validator=validate_taints) - c.argument('mode', get_enum_type(node_mode_types)) c.argument('node_osdisk_type', arg_type=get_enum_type(node_os_disk_types)) c.argument('node_osdisk_size', type=int) + c.argument('mode', get_enum_type(node_mode_types)) + c.argument('scale_down_mode', arg_type=get_enum_type([CONST_SCALE_DOWN_MODE_DELETE, CONST_SCALE_DOWN_MODE_DEALLOCATE])) c.argument('max_surge', validator=validate_max_surge) c.argument('max_pods', type=int, options_list=['--max-pods', '-m']) c.argument('zones', zones_type, options_list=['--zones', '-z'], help='Space-separated list of availability zones where agent nodes will be placed.') @@ -478,11 +478,11 @@ def load_arguments(self, _): "--update-cluster-autoscaler", "-u"], action='store_true') c.argument('min_count', type=int, validator=validate_nodes_count) c.argument('max_count', type=int, validator=validate_nodes_count) - c.argument('scale_down_mode', arg_type=get_enum_type([CONST_SCALE_DOWN_MODE_DELETE, CONST_SCALE_DOWN_MODE_DEALLOCATE])) c.argument('labels', nargs='*', validator=validate_nodepool_labels) c.argument('tags', tags_type) c.argument('node_taints', validator=validate_taints) c.argument('mode', get_enum_type(node_mode_types)) + c.argument('scale_down_mode', arg_type=get_enum_type([CONST_SCALE_DOWN_MODE_DELETE, CONST_SCALE_DOWN_MODE_DEALLOCATE])) c.argument('max_surge', validator=validate_max_surge) with self.argument_context('aks nodepool upgrade') as c: diff --git a/src/azure-cli/azure/cli/command_modules/acs/agentpool_decorator.py b/src/azure-cli/azure/cli/command_modules/acs/agentpool_decorator.py index eed33bbda43..195057a8622 100644 --- a/src/azure-cli/azure/cli/command_modules/acs/agentpool_decorator.py +++ b/src/azure-cli/azure/cli/command_modules/acs/agentpool_decorator.py @@ -20,13 +20,21 @@ CONST_SPOT_EVICTION_POLICY_DELETE, CONST_VIRTUAL_MACHINE_SCALE_SETS, AgentPoolDecoratorMode, + DecoratorEarlyExitException, DecoratorMode, ) from azure.cli.command_modules.acs._helpers import get_snapshot_by_snapshot_id from azure.cli.command_modules.acs._validators import extract_comma_separated_string from azure.cli.command_modules.acs.base_decorator import BaseAKSContext, BaseAKSModels, BaseAKSParamDict +from azure.cli.command_modules.acs.decorator import safe_list_get from azure.cli.core import AzCommandsLoader -from azure.cli.core.azclierror import CLIInternalError, InvalidArgumentValueError, RequiredArgumentMissingError +from azure.cli.core.azclierror import ( + ArgumentUsageError, + CLIInternalError, + InvalidArgumentValueError, + MutuallyExclusiveArgumentError, + RequiredArgumentMissingError, +) from azure.cli.core.commands import AzCliCommand from azure.cli.core.profiles import ResourceType from azure.cli.core.util import get_file_json, sdk_no_wait @@ -93,6 +101,7 @@ def __init__( super().__init__(cmd, raw_parameters, models, decorator_mode) self.agentpool_decorator_mode = agentpool_decorator_mode self.agentpool = None + self._agentpools = [] # pylint: disable=no-self-use def __validate_counts_in_autoscaler( @@ -218,7 +227,10 @@ def _get_nodepool_name(self, enable_validation: bool = False) -> str: # this parameter does not need dynamic completion # validation if enable_validation: - if self.agentpool_decorator_mode == AgentPoolDecoratorMode.STANDALONE: + if ( + self.agentpool_decorator_mode == AgentPoolDecoratorMode.STANDALONE and + self.decorator_mode == DecoratorMode.CREATE + ): agentpool_client = cf_agent_pools(self.cmd.cli_ctx) instances = agentpool_client.list(self.get_resource_group_name(), self.get_cluster_name()) for agentpool_profile in instances: @@ -265,7 +277,6 @@ def get_max_surge(self): # this parameter does not need validation return max_surge - # pylint: disable=too-many-branches def get_node_count_and_enable_cluster_autoscaler_min_max_count( self, ) -> Tuple[int, bool, Union[int, None], Union[int, None]]: @@ -316,6 +327,93 @@ def get_node_count_and_enable_cluster_autoscaler_min_max_count( ) return node_count, enable_cluster_autoscaler, min_count, max_count + def get_update_enable_disable_cluster_autoscaler_and_min_max_count( + self, + ) -> Tuple[bool, bool, bool, Union[int, None], Union[int, None]]: + """Obtain the value of update_cluster_autoscaler, enable_cluster_autoscaler, disable_cluster_autoscaler, + min_count and max_count. + + This function will verify the parameters through function "__validate_counts_in_autoscaler" by default. Besides + if both enable_cluster_autoscaler and update_cluster_autoscaler are specified, a MutuallyExclusiveArgumentError + will be raised. If enable_cluster_autoscaler or update_cluster_autoscaler is specified and there are multiple + agent pool profiles, an ArgumentUsageError will be raised. If enable_cluster_autoscaler is specified and + autoscaler is already enabled in `mc`, it will output warning messages and exit with code 0. If + update_cluster_autoscaler is specified and autoscaler is not enabled in `mc`, it will raise an + InvalidArgumentValueError. If disable_cluster_autoscaler is specified and autoscaler is not enabled in `mc`, + it will output warning messages and exit with code 0. + + :return: a tuple containing four elements: update_cluster_autoscaler of bool type, enable_cluster_autoscaler + of bool type, disable_cluster_autoscaler of bool type, min_count of int type or None and max_count of int type + or None + """ + # update_cluster_autoscaler + # read the original value passed by the command + update_cluster_autoscaler = self.raw_param.get("update_cluster_autoscaler", False) + + # enable_cluster_autoscaler + # read the original value passed by the command + enable_cluster_autoscaler = self.raw_param.get("enable_cluster_autoscaler", False) + + # disable_cluster_autoscaler + # read the original value passed by the command + disable_cluster_autoscaler = self.raw_param.get("disable_cluster_autoscaler", False) + + # min_count + # read the original value passed by the command + min_count = self.raw_param.get("min_count") + + # max_count + # read the original value passed by the command + max_count = self.raw_param.get("max_count") + + # these parameters do not need dynamic completion + + # validation + if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER: + # For multi-agent pool, use the az aks nodepool command + if (enable_cluster_autoscaler or update_cluster_autoscaler) and len(self._agentpools) > 1: + raise ArgumentUsageError( + 'There are more than one node pool in the cluster. Please use "az aks nodepool" command ' + "to update per node pool auto scaler settings" + ) + + if enable_cluster_autoscaler + update_cluster_autoscaler + disable_cluster_autoscaler > 1: + raise MutuallyExclusiveArgumentError( + "Can only specify one of --enable-cluster-autoscaler, --update-cluster-autoscaler and " + "--disable-cluster-autoscaler" + ) + + self.__validate_counts_in_autoscaler( + None, + enable_cluster_autoscaler or update_cluster_autoscaler, + min_count, + max_count, + decorator_mode=DecoratorMode.UPDATE, + ) + + if enable_cluster_autoscaler and self.agentpool.enable_auto_scaling: + logger.warning( + "Cluster autoscaler is already enabled for this node pool.\n" + 'Please run "az aks --update-cluster-autoscaler" ' + "if you want to update min-count or max-count." + ) + raise DecoratorEarlyExitException() + + if update_cluster_autoscaler and not self.agentpool.enable_auto_scaling: + raise InvalidArgumentValueError( + "Cluster autoscaler is not enabled for this node pool.\n" + 'Run "az aks nodepool update --enable-cluster-autoscaler" ' + "to enable cluster with min-count and max-count." + ) + + if disable_cluster_autoscaler and not self.agentpool.enable_auto_scaling: + logger.warning( + "Cluster autoscaler is already disabled for this node pool." + ) + raise DecoratorEarlyExitException() + + return update_cluster_autoscaler, enable_cluster_autoscaler, disable_cluster_autoscaler, min_count, max_count + def get_node_osdisk_size(self) -> Union[int, None]: """Obtain the value of node_osdisk_size. @@ -583,9 +681,10 @@ def get_nodepool_labels(self) -> Union[Dict[str, str], None]: else: nodepool_labels = self.raw_param.get("labels") - # try to read the property value corresponding to the parameter from the `agentpool` object - if self.agentpool and self.agentpool.node_labels is not None: - nodepool_labels = self.agentpool.node_labels + # In create mode, try to read the property value corresponding to the parameter from the `agentpool` object + if self.decorator_mode == DecoratorMode.CREATE: + if self.agentpool and self.agentpool.node_labels is not None: + nodepool_labels = self.agentpool.node_labels # this parameter does not need dynamic completion # this parameter does not need validation @@ -602,9 +701,10 @@ def get_nodepool_tags(self) -> Union[Dict[str, str], None]: else: nodepool_tags = self.raw_param.get("tags") - # try to read the property value corresponding to the parameter from the `agentpool` object - if self.agentpool and self.agentpool.tags is not None: - nodepool_tags = self.agentpool.tags + # In create mode, try to read the property value corresponding to the parameter from the `agentpool` object + if self.decorator_mode == DecoratorMode.CREATE: + if self.agentpool and self.agentpool.tags is not None: + nodepool_tags = self.agentpool.tags # this parameter does not need dynamic completion # this parameter does not need validation @@ -617,13 +717,17 @@ def get_node_taints(self) -> Union[List[str], None]: """ # read the original value passed by the command node_taints = self.raw_param.get("node_taints") - # normalize, keep None as None + # normalize, default is an empty list if node_taints is not None: node_taints = [x.strip() for x in (node_taints.split(",") if node_taints else [])] + # keep None as None for update mode + if node_taints is None and self.decorator_mode == DecoratorMode.CREATE: + node_taints = [] - # try to read the property value corresponding to the parameter from the `agentpool` object - if self.agentpool and self.agentpool.node_taints is not None: - node_taints = self.agentpool.node_taints + # In create mode, try to read the property value corresponding to the parameter from the `agentpool` object + if self.decorator_mode == DecoratorMode.CREATE: + if self.agentpool and self.agentpool.node_taints is not None: + node_taints = self.agentpool.node_taints # this parameter does not need validation return node_taints @@ -1228,8 +1332,8 @@ def set_up_custom_node_config(self, agentpool: AgentPool) -> AgentPool: agentpool.linux_os_config = self.context.get_linux_os_config() return agentpool - def construct_default_agentpool_profile(self) -> AgentPool: - """The overall controller used to construct the default AgentPool profile. + def construct_agentpool_profile_default(self) -> AgentPool: + """The overall controller used to construct the AgentPool profile by default. The completely constructed AgentPool object will later be passed as a parameter to the underlying SDK (mgmt-containerservice) to send the actual request. @@ -1266,8 +1370,8 @@ def construct_default_agentpool_profile(self) -> AgentPool: def add_agentpool(self, agentpool: AgentPool) -> AgentPool: """Send request to add a new agentpool. - The function "sdk_no_wait" will be called to use the ContainerServiceClient to send a reqeust to add a new agent - pool to the cluster. + The function "sdk_no_wait" will be called to use the Agentpool operations of ContainerServiceClient to send a + reqeust to add a new agent pool to the cluster. :return: the AgentPool object """ @@ -1292,6 +1396,7 @@ def __init__( client: AgentPoolsOperations, raw_parameters: Dict, resource_type: ResourceType, + agentpool_decorator_mode: AgentPoolDecoratorMode, ): """Internal controller of aks_agentpool_update. @@ -1303,8 +1408,135 @@ def __init__( """ self.cmd = cmd self.client = client - self.models = AKSAgentPoolModels(cmd, resource_type) + self.agentpool_decorator_mode = agentpool_decorator_mode + self.models = AKSAgentPoolModels(cmd, resource_type, agentpool_decorator_mode) # store the context in the process of assemble the AgentPool object self.context = AKSAgentPoolContext( - cmd, AKSAgentPoolParamDict(raw_parameters), self.models, decorator_mode=DecoratorMode.UPDATE + cmd, AKSAgentPoolParamDict(raw_parameters), self.models, DecoratorMode.UPDATE, agentpool_decorator_mode + ) + + def _ensure_agentpool(self, agentpool: AgentPool) -> None: + """Internal function to ensure that the incoming `agentpool` object is valid and the same as the attached + `agentpool` object in the context. + + If the incoming `agentpool` is not valid or is inconsistent with the `agentpool` in the context, raise a + CLIInternalError. + + :return: None + """ + if not isinstance(agentpool, self.models.UnifiedAgentPoolModel): + raise CLIInternalError( + "Unexpected agentpool object with type '{}'.".format(type(agentpool)) + ) + + if self.context.agentpool != agentpool: + raise CLIInternalError( + "Inconsistent state detected. The incoming `agentpool` " + "is not the same as the `agentpool` in the context." + ) + + # pylint: disable=protected-access + def fetch_agentpool(self, agentpools: List[AgentPool] = None) -> AgentPool: + """Get the AgentPool object currently in use and attach it to internal context. + + Internally send request using Agentpool operations of ContainerServiceClient. + + :return: the AgentPool object + """ + if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER: + self.context._agentpools = agentpools + agentpool = safe_list_get(agentpools, 0) + else: + agentpool = self.client.get( + self.context.get_resource_group_name(), + self.context.get_cluster_name(), + self.context.get_nodepool_name(), + ) + + # attach agentpool to AKSAgentPoolContext + self.context.attach_agentpool(agentpool) + return agentpool + + def update_auto_scaler_properties(self, agentpool: AgentPool) -> AgentPool: + """Update auto scaler related properties for the Agentpool object. + + :return: the Agentpool object + """ + self._ensure_agentpool(agentpool) + + ( + update_cluster_autoscaler, + enable_cluster_autoscaler, + disable_cluster_autoscaler, + min_count, + max_count, + ) = ( + self.context.get_update_enable_disable_cluster_autoscaler_and_min_max_count() + ) + + if update_cluster_autoscaler or enable_cluster_autoscaler: + agentpool.enable_auto_scaling = True + agentpool.min_count = min_count + agentpool.max_count = max_count + + if disable_cluster_autoscaler: + agentpool.enable_auto_scaling = False + agentpool.min_count = None + agentpool.max_count = None + return agentpool + + def update_label_tag_taint(self, agentpool: AgentPool) -> AgentPool: + """Set up label, tag, taint for the AgentPool object. + + :return: the AgentPool object + """ + self._ensure_agentpool(agentpool) + + labels = self.context.get_nodepool_labels() + if labels is not None: + agentpool.node_labels = labels + + tags = self.context.get_nodepool_tags() + if tags is not None: + agentpool.tags = tags + + node_taints = self.context.get_node_taints() + if node_taints is not None: + agentpool.node_taints = node_taints + return agentpool + + def update_agentpool_profile_default(self, agentpools: List[AgentPool] = None) -> AgentPool: + """The overall controller used to update AgentPool profile by default. + + The completely constructed AgentPool object will later be passed as a parameter to the underlying SDK + (mgmt-containerservice) to send the actual request. + + :return: the AgentPool object + """ + # fetch the Agentpool object + agentpool = self.fetch_agentpool(agentpools) + # update auto scaler properties + agentpool = self.update_auto_scaler_properties(agentpool) + # update label, tag, taint + agentpool = self.update_label_tag_taint(agentpool) + return agentpool + + def update_agentpool(self, agentpool: AgentPool) -> AgentPool: + """Send request to add a new agentpool. + + The function "sdk_no_wait" will be called to use the Agentpool operations of ContainerServiceClient to send a + reqeust to update an existing agent pool of the cluster. + + :return: the AgentPool object + """ + self._ensure_agentpool(agentpool) + + return sdk_no_wait( + self.context.get_no_wait(), + self.client.begin_create_or_update, + self.context.get_resource_group_name(), + self.context.get_cluster_name(), + self.context.get_nodepool_name(), + agentpool, + headers=self.context.get_aks_custom_headers(), ) diff --git a/src/azure-cli/azure/cli/command_modules/acs/custom.py b/src/azure-cli/azure/cli/command_modules/acs/custom.py index e3c50d0a6ce..33a61a3c48f 100644 --- a/src/azure-cli/azure/cli/command_modules/acs/custom.py +++ b/src/azure-cli/azure/cli/command_modules/acs/custom.py @@ -2887,37 +2887,37 @@ def aks_agentpool_list(cmd, client, resource_group_name, cluster_name): def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name, kubernetes_version=None, - zones=None, - enable_node_public_ip=False, - node_public_ip_prefix_id=None, node_vm_size=None, - node_osdisk_type=None, - node_osdisk_size=0, - node_count=3, - vnet_subnet_id=None, - pod_subnet_id=None, - ppg=None, - max_pods=0, os_type=None, os_sku=None, + vnet_subnet_id=None, + pod_subnet_id=None, + enable_node_public_ip=False, + node_public_ip_prefix_id=None, + enable_cluster_autoscaler=False, min_count=None, max_count=None, - enable_cluster_autoscaler=False, - scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE, - node_taints=None, + node_count=3, priority=CONST_SCALE_SET_PRIORITY_REGULAR, eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE, spot_max_price=float('nan'), - tags=None, labels=None, - kubelet_config=None, - linux_os_config=None, - max_surge=None, + tags=None, + node_taints=None, + node_osdisk_type=None, + node_osdisk_size=0, mode=CONST_NODEPOOL_MODE_USER, + scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE, + max_surge=None, + max_pods=0, + zones=None, + ppg=None, enable_encryption_at_host=False, enable_ultra_ssd=False, enable_fips_image=False, snapshot_id=None, + kubelet_config=None, + linux_os_config=None, no_wait=False, aks_custom_headers=None): AgentPool = cmd.get_models('AgentPool', @@ -3043,116 +3043,19 @@ def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_n ) -def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name, - nodepool_name, - node_count=3, - no_wait=False): - instance = client.get(resource_group_name, cluster_name, nodepool_name) - new_node_count = int(node_count) - if instance.enable_auto_scaling: - raise CLIError("Cannot scale cluster autoscaler enabled node pool.") - if new_node_count == instance.count: - raise CLIError( - "The new node count is the same as the current node count.") - instance.count = new_node_count # pylint: disable=no-member - return sdk_no_wait( - no_wait, - client.begin_create_or_update, - resource_group_name, - cluster_name, - nodepool_name, - instance, - ) - - -def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name, - nodepool_name, - kubernetes_version='', - node_image_only=False, - max_surge=None, - no_wait=False, - aks_custom_headers=None, - snapshot_id=None): - AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools') - if kubernetes_version != '' and node_image_only: - raise CLIError( - 'Conflicting flags. Upgrading the Kubernetes version will also ' - 'upgrade node image version. If you only want to upgrade the ' - 'node version please use the "--node-image-only" option only.' - ) - - # Note: we exclude this option because node image upgrade can't accept nodepool put fields like max surge - if max_surge and node_image_only: - raise MutuallyExclusiveArgumentError( - 'Conflicting flags. Unable to specify max-surge with node-image-only.' - 'If you want to use max-surge with a node image upgrade, please first ' - 'update max-surge using "az aks nodepool update --max-surge".' - ) - - if node_image_only: - return _upgrade_single_nodepool_image_version(no_wait, - client, - resource_group_name, - cluster_name, - nodepool_name, - snapshot_id) - - # load model CreationData - from azure.cli.command_modules.acs.decorator import AKSModels - CreationData = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).CreationData - - creationData = None - if snapshot_id: - snapshot = get_snapshot_by_snapshot_id(cmd.cli_ctx, snapshot_id) - if not kubernetes_version and not node_image_only: - kubernetes_version = snapshot.kubernetes_version - - creationData = CreationData( - source_resource_id=snapshot_id - ) - - instance = client.get(resource_group_name, cluster_name, nodepool_name) - instance.orchestrator_version = kubernetes_version - instance.creation_data = creationData - - if not instance.upgrade_settings: - instance.upgrade_settings = AgentPoolUpgradeSettings() - - if max_surge: - instance.upgrade_settings.max_surge = max_surge - - # custom headers - aks_custom_headers = extract_comma_separated_string( - aks_custom_headers, - enable_strip=True, - extract_kv=True, - default_value={}, - allow_appending_values_to_same_key=True, - ) - - return sdk_no_wait( - no_wait, - client.begin_create_or_update, - resource_group_name, - cluster_name, - nodepool_name, - instance, - headers=aks_custom_headers, - ) - - # pylint: disable=too-many-boolean-expressions def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name, enable_cluster_autoscaler=False, disable_cluster_autoscaler=False, update_cluster_autoscaler=False, - scale_down_mode=None, - min_count=None, max_count=None, - tags=None, - max_surge=None, - mode=None, + min_count=None, + max_count=None, labels=None, + tags=None, node_taints=None, + mode=None, + scale_down_mode=None, + max_surge=None, no_wait=False, aks_custom_headers=None): AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', @@ -3255,6 +3158,104 @@ def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepoo ) +def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name, + nodepool_name, + node_count=3, + no_wait=False): + instance = client.get(resource_group_name, cluster_name, nodepool_name) + new_node_count = int(node_count) + if instance.enable_auto_scaling: + raise CLIError("Cannot scale cluster autoscaler enabled node pool.") + if new_node_count == instance.count: + raise CLIError( + "The new node count is the same as the current node count.") + instance.count = new_node_count # pylint: disable=no-member + return sdk_no_wait( + no_wait, + client.begin_create_or_update, + resource_group_name, + cluster_name, + nodepool_name, + instance, + ) + + +def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name, + nodepool_name, + kubernetes_version='', + node_image_only=False, + max_surge=None, + no_wait=False, + aks_custom_headers=None, + snapshot_id=None): + AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools') + if kubernetes_version != '' and node_image_only: + raise CLIError( + 'Conflicting flags. Upgrading the Kubernetes version will also ' + 'upgrade node image version. If you only want to upgrade the ' + 'node version please use the "--node-image-only" option only.' + ) + + # Note: we exclude this option because node image upgrade can't accept nodepool put fields like max surge + if max_surge and node_image_only: + raise MutuallyExclusiveArgumentError( + 'Conflicting flags. Unable to specify max-surge with node-image-only.' + 'If you want to use max-surge with a node image upgrade, please first ' + 'update max-surge using "az aks nodepool update --max-surge".' + ) + + if node_image_only: + return _upgrade_single_nodepool_image_version(no_wait, + client, + resource_group_name, + cluster_name, + nodepool_name, + snapshot_id) + + # load model CreationData + from azure.cli.command_modules.acs.decorator import AKSModels + CreationData = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).CreationData + + creationData = None + if snapshot_id: + snapshot = get_snapshot_by_snapshot_id(cmd.cli_ctx, snapshot_id) + if not kubernetes_version and not node_image_only: + kubernetes_version = snapshot.kubernetes_version + + creationData = CreationData( + source_resource_id=snapshot_id + ) + + instance = client.get(resource_group_name, cluster_name, nodepool_name) + instance.orchestrator_version = kubernetes_version + instance.creation_data = creationData + + if not instance.upgrade_settings: + instance.upgrade_settings = AgentPoolUpgradeSettings() + + if max_surge: + instance.upgrade_settings.max_surge = max_surge + + # custom headers + aks_custom_headers = extract_comma_separated_string( + aks_custom_headers, + enable_strip=True, + extract_kv=True, + default_value={}, + allow_appending_values_to_same_key=True, + ) + + return sdk_no_wait( + no_wait, + client.begin_create_or_update, + resource_group_name, + cluster_name, + nodepool_name, + instance, + headers=aks_custom_headers, + ) + + def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name, nodepool_name, no_wait=False): diff --git a/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_agentpool_decorator.py b/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_agentpool_decorator.py index cdcbd861268..86d4059079c 100644 --- a/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_agentpool_decorator.py +++ b/src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_agentpool_decorator.py @@ -253,6 +253,149 @@ def common_get_node_count_and_enable_cluster_autoscaler_min_max_count( (5, True, 1, 10), ) + def common_get_update_enable_disable_cluster_autoscaler_and_min_max_count( + self, + ): + # default + ctx_1 = AKSAgentPoolContext( + self.cmd, + AKSAgentPoolParamDict( + { + "update_cluster_autoscaler": False, + "enable_cluster_autoscaler": False, + "disable_cluster_autoscaler": False, + "min_count": None, + "max_count": None, + } + ), + self.models, + DecoratorMode.CREATE, + self.agentpool_decorator_mode, + ) + agentpool_1 = self.create_initialized_agentpool_instance(count=3) + ctx_1.attach_agentpool(agentpool_1) + self.assertEqual( + ctx_1.get_update_enable_disable_cluster_autoscaler_and_min_max_count(), + (False, False, False, None, None), + ) + + # custom value + ctx_2 = AKSAgentPoolContext( + self.cmd, + AKSAgentPoolParamDict( + { + "update_cluster_autoscaler": True, + "enable_cluster_autoscaler": False, + "disable_cluster_autoscaler": False, + "min_count": None, + "max_count": None, + } + ), + self.models, + DecoratorMode.CREATE, + self.agentpool_decorator_mode, + ) + agentpool_2 = self.create_initialized_agentpool_instance(count=3) + ctx_2.attach_agentpool(agentpool_2) + ctx_2._agentpools = [agentpool_2, agentpool_2] + if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER: + # fail on multi-agent pool + with self.assertRaises(ArgumentUsageError): + ctx_2.get_update_enable_disable_cluster_autoscaler_and_min_max_count() + else: + # fail on min count and max count not specifed + with self.assertRaises(RequiredArgumentMissingError): + ctx_2.get_update_enable_disable_cluster_autoscaler_and_min_max_count() + + # custom value + ctx_3 = AKSAgentPoolContext( + self.cmd, + AKSAgentPoolParamDict( + { + "update_cluster_autoscaler": False, + "enable_cluster_autoscaler": True, + "disable_cluster_autoscaler": True, + "min_count": None, + "max_count": None, + } + ), + self.models, + DecoratorMode.CREATE, + self.agentpool_decorator_mode, + ) + agentpool_3 = self.create_initialized_agentpool_instance(count=3) + ctx_3.attach_agentpool(agentpool_3) + # fail on mutually exclusive update_cluster_autoscaler, enable_cluster_autoscaler and disable_cluster_autoscaler + with self.assertRaises(MutuallyExclusiveArgumentError): + ctx_3.get_update_enable_disable_cluster_autoscaler_and_min_max_count() + + # custom value + ctx_4 = AKSAgentPoolContext( + self.cmd, + AKSAgentPoolParamDict( + { + "update_cluster_autoscaler": False, + "enable_cluster_autoscaler": True, + "disable_cluster_autoscaler": False, + "min_count": 1, + "max_count": 5, + } + ), + self.models, + DecoratorMode.CREATE, + self.agentpool_decorator_mode, + ) + agentpool_4 = self.create_initialized_agentpool_instance(count=3, enable_auto_scaling=True) + ctx_4.attach_agentpool(agentpool_4) + # fail on cluster autoscaler already enabled + with self.assertRaises(DecoratorEarlyExitException): + ctx_4.get_update_enable_disable_cluster_autoscaler_and_min_max_count() + + # custom value + ctx_5 = AKSAgentPoolContext( + self.cmd, + AKSAgentPoolParamDict( + { + "update_cluster_autoscaler": True, + "enable_cluster_autoscaler": False, + "disable_cluster_autoscaler": False, + "min_count": 1, + "max_count": 5, + } + ), + self.models, + DecoratorMode.CREATE, + self.agentpool_decorator_mode, + ) + agentpool_5 = self.create_initialized_agentpool_instance(count=3, enable_auto_scaling=False) + ctx_5.attach_agentpool(agentpool_5) + # fail on cluster autoscaler not enabled + with self.assertRaises(InvalidArgumentValueError): + ctx_5.get_update_enable_disable_cluster_autoscaler_and_min_max_count() + + # custom value + ctx_6 = AKSAgentPoolContext( + self.cmd, + AKSAgentPoolParamDict( + { + "update_cluster_autoscaler": False, + "enable_cluster_autoscaler": False, + "disable_cluster_autoscaler": True, + "min_count": None, + "max_count": None, + } + ), + self.models, + DecoratorMode.CREATE, + self.agentpool_decorator_mode, + ) + + agentpool_6 = self.create_initialized_agentpool_instance(count=3, enable_auto_scaling=False) + ctx_6.attach_agentpool(agentpool_6) + # fail on cluster autoscaler already disabled + with self.assertRaises(DecoratorEarlyExitException): + ctx_6.get_update_enable_disable_cluster_autoscaler_and_min_max_count() + def common_get_node_osdisk_size(self): # default ctx_1 = AKSAgentPoolContext( @@ -574,6 +717,25 @@ def common_get_nodepool_labels(self): ctx_1.attach_agentpool(agentpool) self.assertEqual(ctx_1.get_nodepool_labels(), {"key1": "value1", "key2": "value2"}) + # custom + ctx_2 = AKSAgentPoolContext( + self.cmd, + AKSAgentPoolParamDict({"nodepool_labels": "test_nodepool_labels", "labels": "test_labels"}), + self.models, + DecoratorMode.UPDATE, + self.agentpool_decorator_mode, + ) + if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER: + self.assertEqual(ctx_2.get_nodepool_labels(), "test_nodepool_labels") + else: + self.assertEqual(ctx_2.get_nodepool_labels(), "test_labels") + agentpool_2 = self.create_initialized_agentpool_instance(node_labels={"key1": "value1", "key2": "value2"}) + ctx_2.attach_agentpool(agentpool_2) + if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER: + self.assertEqual(ctx_2.get_nodepool_labels(), "test_nodepool_labels") + else: + self.assertEqual(ctx_2.get_nodepool_labels(), "test_labels") + def common_get_nodepool_tags(self): # default ctx_1 = AKSAgentPoolContext( @@ -591,6 +753,25 @@ def common_get_nodepool_tags(self): ctx_1.attach_agentpool(agentpool) self.assertEqual(ctx_1.get_nodepool_tags(), {}) + # custom + ctx_2 = AKSAgentPoolContext( + self.cmd, + AKSAgentPoolParamDict({"nodepool_tags": "test_nodepool_tags", "tags": "test_tags"}), + self.models, + DecoratorMode.UPDATE, + self.agentpool_decorator_mode, + ) + if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER: + self.assertEqual(ctx_2.get_nodepool_tags(), "test_nodepool_tags") + else: + self.assertEqual(ctx_2.get_nodepool_tags(), "test_tags") + agentpool_2 = self.create_initialized_agentpool_instance(tags={}) + ctx_2.attach_agentpool(agentpool_2) + if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER: + self.assertEqual(ctx_2.get_nodepool_tags(), "test_nodepool_tags") + else: + self.assertEqual(ctx_2.get_nodepool_tags(), "test_tags") + def common_get_node_taints(self): # default ctx_1 = AKSAgentPoolContext( @@ -605,6 +786,19 @@ def common_get_node_taints(self): ctx_1.attach_agentpool(agentpool) self.assertEqual(ctx_1.get_node_taints(), []) + # custom + ctx_2 = AKSAgentPoolContext( + self.cmd, + AKSAgentPoolParamDict({"node_taints": ""}), + self.models, + DecoratorMode.UPDATE, + self.agentpool_decorator_mode, + ) + self.assertEqual(ctx_2.get_node_taints(), []) + agentpool_2 = self.create_initialized_agentpool_instance(node_taints=["abc=xyz:123", "123=456:abc"]) + ctx_2.attach_agentpool(agentpool_2) + self.assertEqual(ctx_2.get_node_taints(), []) + def common_get_priority(self): # default ctx_1 = AKSAgentPoolContext( @@ -1051,6 +1245,9 @@ def test_get_node_count_and_enable_cluster_autoscaler_min_max_count( ): self.common_get_node_count_and_enable_cluster_autoscaler_min_max_count() + def test_get_update_enable_disable_cluster_autoscaler_and_min_max_count(self): + self.common_get_update_enable_disable_cluster_autoscaler_and_min_max_count() + def test_get_node_osdisk_size(self): self.common_get_node_osdisk_size() @@ -1191,6 +1388,9 @@ def test_get_node_count_and_enable_cluster_autoscaler_min_max_count( ): self.common_get_node_count_and_enable_cluster_autoscaler_min_max_count() + def test_get_update_enable_disable_cluster_autoscaler_and_min_max_count(self): + self.common_get_update_enable_disable_cluster_autoscaler_and_min_max_count() + def test_get_node_osdisk_size(self): self.common_get_node_osdisk_size() @@ -1740,7 +1940,7 @@ def test_set_up_vm_properties(self): def test_set_up_custom_node_config(self): self.common_set_up_custom_node_config() - def test_construct_default_agentpool(self): + def test_construct_agentpool_profile_default(self): import inspect from azure.cli.command_modules.acs.custom import aks_agentpool_add @@ -1782,22 +1982,23 @@ def test_construct_default_agentpool(self): "azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools", return_value=Mock(list=Mock(return_value=[])), ): - dec_agentpool_1 = dec_1.construct_default_agentpool_profile() + dec_agentpool_1 = dec_1.construct_agentpool_profile_default() ground_truth_upgrade_settings_1 = self.models.AgentPoolUpgradeSettings() ground_truth_agentpool_1 = self.create_initialized_agentpool_instance( nodepool_name="test_nodepool_name", upgrade_settings=ground_truth_upgrade_settings_1, os_disk_size_gb=0, - count=3, enable_auto_scaling=False, + count=3, os_type=CONST_DEFAULT_NODE_OS_TYPE, vm_size=CONST_DEFAULT_NODE_VM_SIZE, + node_taints=[], + enable_node_public_ip=False, type_properties_type=CONST_VIRTUAL_MACHINE_SCALE_SETS, enable_encryption_at_host=False, enable_ultra_ssd=False, enable_fips=False, - enable_node_public_ip=False, mode=CONST_NODEPOOL_MODE_USER, scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE, ) @@ -1819,7 +2020,7 @@ def test_add_agentpool(self): ) # fail on passing the wrong agentpool object with self.assertRaises(CLIInternalError): - dec_1.set_up_label_tag_taint(None) + dec_1.add_agentpool(None) agentpool_1 = self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_name") dec_1.context.attach_agentpool(agentpool_1) with patch("azure.cli.command_modules.acs.agentpool_decorator.sdk_no_wait") as put_agentpool: @@ -1880,7 +2081,7 @@ def test_set_up_vm_properties(self): def test_set_up_custom_node_config(self): self.common_set_up_custom_node_config() - def test_construct_default_agentpool(self): + def test_construct_agentpool_profile_default(self): import inspect from azure.cli.command_modules.acs.custom import aks_create @@ -1922,19 +2123,20 @@ def test_construct_default_agentpool(self): "azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools", return_value=Mock(list=Mock(return_value=[])), ): - dec_agentpool_1 = dec_1.construct_default_agentpool_profile() + dec_agentpool_1 = dec_1.construct_agentpool_profile_default() upgrade_settings_1 = self.models.AgentPoolUpgradeSettings() ground_truth_agentpool_1 = self.create_initialized_agentpool_instance( nodepool_name="nodepool1", upgrade_settings=upgrade_settings_1, os_disk_size_gb=0, - count=3, enable_auto_scaling=False, - enable_node_public_ip=False, + count=3, orchestrator_version="", os_type=CONST_DEFAULT_NODE_OS_TYPE, vm_size=CONST_DEFAULT_NODE_VM_SIZE, + node_taints=[], + enable_node_public_ip=False, type=CONST_VIRTUAL_MACHINE_SCALE_SETS, enable_encryption_at_host=False, enable_ultra_ssd=False, @@ -1947,9 +2149,367 @@ def test_construct_default_agentpool(self): dec_1.context.raw_param.print_usage_statistics() -class AKSAgentPoolUpdateDecoratorTestCase(unittest.TestCase): - def test(self): - pass +class AKSAgentPoolUpdateDecoratorCommonTestCase(unittest.TestCase): + def _remove_defaults_in_agentpool(self, agentpool: AgentPool) -> AgentPool: + """Internal function to remove values from properties with default values of the `agentpool` object. + + Removing default values is to prevent getters from mistakenly overwriting user provided values with default + values in the object. + + :return: the AgentPool object + """ + self.defaults_in_agentpool = {} + for attr_name, attr_value in vars(agentpool).items(): + if not attr_name.startswith("_") and attr_name != "name" and attr_value is not None: + self.defaults_in_agentpool[attr_name] = attr_value + setattr(agentpool, attr_name, None) + return agentpool + + def _restore_defaults_in_agentpool(self, agentpool: AgentPool) -> AgentPool: + """Internal function to restore values of properties with default values of the `agentpool` object. + + Restoring default values is to keep the content of the request sent by cli consistent with that before the + refactoring. + + :return: the AgentPool object + """ + for key, value in self.defaults_in_agentpool.items(): + if getattr(agentpool, key, None) is None: + setattr(agentpool, key, value) + return agentpool + + def create_initialized_agentpool_instance( + self, nodepool_name="nodepool1", remove_defaults=True, restore_defaults=True, **kwargs + ) -> AgentPool: + """Helper function to create a properly initialized agentpool instance. + + :return: the AgentPool object + """ + if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER: + agentpool = self.models.UnifiedAgentPoolModel(name=nodepool_name) + else: + agentpool = self.models.UnifiedAgentPoolModel() + agentpool.name = nodepool_name + + # remove defaults + if remove_defaults: + self._remove_defaults_in_agentpool(agentpool) + + # set properties + for key, value in kwargs.items(): + setattr(agentpool, key, value) + + # resote defaults + if restore_defaults: + self._restore_defaults_in_agentpool(agentpool) + return agentpool + + def common_ensure_agentpool(self): + dec_1 = AKSAgentPoolUpdateDecorator( + self.cmd, + self.client, + {}, + self.resource_type, + self.agentpool_decorator_mode, + ) + # fail on passing the wrong agentpool object + with self.assertRaises(CLIInternalError): + dec_1._ensure_agentpool(None) + agentpool_1 = self.create_initialized_agentpool_instance() + # fail on inconsistent agentpool with internal context + with self.assertRaises(CLIInternalError): + dec_1._ensure_agentpool(agentpool_1) + + def common_update_auto_scaler_properties(self): + dec_1 = AKSAgentPoolUpdateDecorator( + self.cmd, + self.client, + { + "enable_cluster_autoscaler": False, + "disable_cluster_autoscaler": False, + "update_cluster_autoscaler": True, + "min_count": 1, + "max_count": 5, + }, + self.resource_type, + self.agentpool_decorator_mode, + ) + # fail on passing the wrong agentpool object + with self.assertRaises(CLIInternalError): + dec_1.update_auto_scaler_properties(None) + agentpool_1 = self.create_initialized_agentpool_instance( + enable_auto_scaling=True, node_count=3, min_count=2, max_count=4 + ) + dec_1.context.attach_agentpool(agentpool_1) + dec_agentpool_1 = dec_1.update_auto_scaler_properties(agentpool_1) + grond_truth_agentpool_1 = self.create_initialized_agentpool_instance( + enable_auto_scaling=True, node_count=3, min_count=1, max_count=5 + ) + self.assertEqual(dec_agentpool_1, grond_truth_agentpool_1) + + dec_2 = AKSAgentPoolUpdateDecorator( + self.cmd, + self.client, + { + "enable_cluster_autoscaler": False, + "disable_cluster_autoscaler": True, + "update_cluster_autoscaler": False, + "min_count": None, + "max_count": None, + }, + self.resource_type, + self.agentpool_decorator_mode, + ) + # fail on passing the wrong agentpool object + with self.assertRaises(CLIInternalError): + dec_2.update_auto_scaler_properties(None) + agentpool_2 = self.create_initialized_agentpool_instance( + enable_auto_scaling=True, node_count=3, min_count=2, max_count=4 + ) + dec_2.context.attach_agentpool(agentpool_2) + dec_agentpool_2 = dec_2.update_auto_scaler_properties(agentpool_2) + grond_truth_agentpool_2 = self.create_initialized_agentpool_instance( + enable_auto_scaling=False, node_count=3, min_count=None, max_count=None + ) + self.assertEqual(dec_agentpool_2, grond_truth_agentpool_2) + + def common_update_label_tag_taint(self): + dec_1 = AKSAgentPoolUpdateDecorator( + self.cmd, + self.client, + { + "nodepool_labels": "test_nodepool_labels", + "nodepool_tags": "test_nodepool_tags", + "labels": "test_labels", + "tags": "test_tags", + "node_taints": "", + }, + self.resource_type, + self.agentpool_decorator_mode, + ) + # fail on passing the wrong agentpool object + with self.assertRaises(CLIInternalError): + dec_1.update_label_tag_taint(None) + agentpool_1 = self.create_initialized_agentpool_instance( + node_labels={"abc": "xyz"}, tags={"123": "456"}, node_taints=["test_node_taints"] + ) + dec_1.context.attach_agentpool(agentpool_1) + dec_agentpool_1 = dec_1.update_label_tag_taint(agentpool_1) + if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER: + grond_truth_agentpool_1 = self.create_initialized_agentpool_instance( + node_labels="test_nodepool_labels", tags="test_nodepool_tags", node_taints=[] + ) + else: + grond_truth_agentpool_1 = self.create_initialized_agentpool_instance( + node_labels="test_labels", tags="test_tags", node_taints=[] + ) + self.assertEqual(dec_agentpool_1, grond_truth_agentpool_1) + + +class AKSAgentPoolUpdateDecoratorStandaloneModeTestCase(AKSAgentPoolUpdateDecoratorCommonTestCase): + def setUp(self): + self.cli_ctx = MockCLI() + self.cmd = MockCmd(self.cli_ctx) + self.resource_type = ResourceType.MGMT_CONTAINERSERVICE + self.agentpool_decorator_mode = AgentPoolDecoratorMode.STANDALONE + self.models = AKSAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode) + self.client = MockClient() + + def test_ensure_agentpool(self): + self.common_ensure_agentpool() + + def test_fetch_agentpool(self): + dec_1 = AKSAgentPoolUpdateDecorator( + self.cmd, + self.client, + { + "resource_group_name": "test_resource_group_name", + "cluster_name": "test_cluster_name", + "nodepool_name": "test_nodepool_name", + }, + self.resource_type, + self.agentpool_decorator_mode, + ) + self.client.get = Mock(return_value=self.create_initialized_agentpool_instance()) + with patch( + "azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools", + return_value=Mock(list=Mock(return_value=[])), + ): + dec_agentpool_1 = dec_1.fetch_agentpool() + ground_truth_agentpool_1 = self.create_initialized_agentpool_instance() + self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1) + self.assertEqual(dec_agentpool_1, dec_1.context.agentpool) + self.client.get.assert_called_once_with("test_resource_group_name", "test_cluster_name", "test_nodepool_name") + + def test_update_auto_scaler_properties(self): + self.common_update_auto_scaler_properties() + + def test_update_label_tag_taint(self): + self.common_update_label_tag_taint() + + def test_update_agentpool_profile_default(self): + import inspect + + from azure.cli.command_modules.acs.custom import aks_agentpool_update + + optional_params = {} + positional_params = [] + for _, v in inspect.signature(aks_agentpool_update).parameters.items(): + if v.default != v.empty: + optional_params[v.name] = v.default + else: + positional_params.append(v.name) + ground_truth_positional_params = [ + "cmd", + "client", + "resource_group_name", + "cluster_name", + "nodepool_name", + ] + self.assertEqual(positional_params, ground_truth_positional_params) + + # prepare a dictionary of default parameters + raw_param_dict = { + "resource_group_name": "test_rg_name", + "cluster_name": "test_cluster_name", + "nodepool_name": "test_nodepool_name", + } + raw_param_dict.update(optional_params) + + # default value in `aks_create` + dec_1 = AKSAgentPoolUpdateDecorator( + self.cmd, + self.client, + raw_param_dict, + self.resource_type, + self.agentpool_decorator_mode, + ) + self.client.get = Mock( + return_value=self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_name") + ) + dec_agentpool_1 = dec_1.update_agentpool_profile_default() + ground_truth_agentpool_1 = self.create_initialized_agentpool_instance( + nodepool_name="test_nodepool_name", + ) + self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1) + + dec_1.context.raw_param.print_usage_statistics() + + def test_update_agentpool(self): + dec_1 = AKSAgentPoolUpdateDecorator( + self.cmd, + self.client, + { + "resource_group_name": "test_resource_group_name", + "cluster_name": "test_cluster_name", + "nodepool_name": "test_nodepool_name", + }, + self.resource_type, + self.agentpool_decorator_mode, + ) + # fail on passing the wrong agentpool object + with self.assertRaises(CLIInternalError): + dec_1.update_agentpool(None) + agentpool_1 = self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_name") + dec_1.context.attach_agentpool(agentpool_1) + with patch("azure.cli.command_modules.acs.agentpool_decorator.sdk_no_wait") as put_agentpool: + dec_1.update_agentpool(agentpool_1) + put_agentpool.assert_called_once_with( + False, + self.client.begin_create_or_update, + "test_resource_group_name", + "test_cluster_name", + "test_nodepool_name", + agentpool_1, + headers={}, + ) + + +class AKSAgentPoolUpdateDecoratorManagedClusterModeTestCase(AKSAgentPoolUpdateDecoratorCommonTestCase): + def setUp(self): + self.cli_ctx = MockCLI() + self.cmd = MockCmd(self.cli_ctx) + self.resource_type = ResourceType.MGMT_CONTAINERSERVICE + self.agentpool_decorator_mode = AgentPoolDecoratorMode.MANAGED_CLUSTER + self.models = AKSAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode) + self.client = MockClient() + + def test_ensure_agentpool(self): + self.common_ensure_agentpool() + + def test_fetch_agentpool(self): + dec_1 = AKSAgentPoolUpdateDecorator( + self.cmd, + self.client, + { + "resource_group_name": "test_resource_group_name", + "name": "test_cluster_name", + "nodepool_name": "test_nodepool_name", + }, + self.resource_type, + self.agentpool_decorator_mode, + ) + agentpools = [ + self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_1"), + self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_2"), + ] + dec_agentpool_1 = dec_1.fetch_agentpool(agentpools) + ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_1") + self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1) + self.assertEqual(dec_agentpool_1, dec_1.context.agentpool) + + def test_update_auto_scaler_properties(self): + self.common_update_auto_scaler_properties() + + def test_update_label_tag_taint(self): + self.common_update_label_tag_taint() + + def test_update_agentpool_profile_default(self): + import inspect + + from azure.cli.command_modules.acs.custom import aks_update + + optional_params = {} + positional_params = [] + for _, v in inspect.signature(aks_update).parameters.items(): + if v.default != v.empty: + optional_params[v.name] = v.default + else: + positional_params.append(v.name) + ground_truth_positional_params = [ + "cmd", + "client", + "resource_group_name", + "name", + ] + self.assertEqual(positional_params, ground_truth_positional_params) + + # prepare a dictionary of default parameters + raw_param_dict = { + "resource_group_name": "test_rg_name", + "name": "test_cluster_name", + } + raw_param_dict.update(optional_params) + + # default value in `aks_create` + dec_1 = AKSAgentPoolUpdateDecorator( + self.cmd, + self.client, + raw_param_dict, + self.resource_type, + self.agentpool_decorator_mode, + ) + agentpools = [ + self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_1"), + self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_2"), + ] + dec_agentpool_1 = dec_1.update_agentpool_profile_default(agentpools) + ground_truth_agentpool_1 = self.create_initialized_agentpool_instance( + nodepool_name="test_nodepool_1", + ) + self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1) + + dec_1.context.raw_param.print_usage_statistics() if __name__ == "__main__":