diff --git a/src/containerapp/HISTORY.rst b/src/containerapp/HISTORY.rst index 2a1a1d2fc27..a29e589e1a4 100644 --- a/src/containerapp/HISTORY.rst +++ b/src/containerapp/HISTORY.rst @@ -3,6 +3,9 @@ Release History =============== +0.3.3 +++++++ +* Improved 'az containerapp up' handling of environment locations 0.3.2 ++++++ diff --git a/src/containerapp/azext_containerapp/_clients.py b/src/containerapp/azext_containerapp/_clients.py index 4362e1d51aa..d217e12be5e 100644 --- a/src/containerapp/azext_containerapp/_clients.py +++ b/src/containerapp/azext_containerapp/_clients.py @@ -14,7 +14,6 @@ logger = get_logger(__name__) -API_VERSION = "2021-03-01" PREVIEW_API_VERSION = "2022-01-01-preview" STABLE_API_VERSION = "2022-03-01" POLLING_TIMEOUT = 60 # how many seconds before exiting @@ -74,7 +73,7 @@ class ContainerAppClient(): @classmethod def create_or_update(cls, cmd, resource_group_name, name, container_app_envelope, no_wait=False): management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager - api_version = PREVIEW_API_VERSION + api_version = STABLE_API_VERSION sub_id = get_subscription_id(cmd.cli_ctx) url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}?api-version={}" request_url = url_fmt.format( diff --git a/src/containerapp/azext_containerapp/_up_utils.py b/src/containerapp/azext_containerapp/_up_utils.py index 013baabc681..aba2e580c83 100644 --- a/src/containerapp/azext_containerapp/_up_utils.py +++ b/src/containerapp/azext_containerapp/_up_utils.py @@ -38,10 +38,11 @@ _get_default_containerapps_location, safe_get, is_int, - create_service_principal_for_rbac, + create_service_principal_for_github_action, repo_url_to_name, get_container_app_if_exists, - trigger_workflow + trigger_workflow, + _ensure_location_allowed ) from ._constants import MAXIMUM_SECRET_LENGTH @@ -62,6 +63,8 @@ def __init__(self, cmd, name: str, location: str, exists: bool = None): self.cmd = cmd self.name = name self.location = _get_default_containerapps_location(cmd, location) + if self.location.lower() == "northcentralusstage": + self.location = "eastus" self.exists = exists self.check_exists() @@ -151,7 +154,7 @@ def __init__( rg = parse_resource_id(name)["resource_group"] if resource_group.name != rg: self.resource_group = ResourceGroup(cmd, rg, location) - self.location = _get_default_containerapps_location(cmd, location) + self.location = location self.logs_key = logs_key self.logs_customer_id = logs_customer_id @@ -164,7 +167,7 @@ def set_name(self, name_or_rid): self.resource_group = ResourceGroup( self.cmd, rg, - _get_default_containerapps_location(self.cmd, self.location), + self.location, ) else: self.name = name_or_rid @@ -188,6 +191,7 @@ def create_if_needed(self, app_name): ) # TODO use .info() def create(self): + self.location = validate_environment_location(self.cmd, self.location) env = create_managed_environment( self.cmd, self.name, @@ -290,8 +294,11 @@ def create_acr(self): registry_rg = self.resource_group url = self.registry_server registry_name = url[: url.rindex(".azurecr.io")] + location = "eastus" + if self.env.location and self.env.location.lower() != "northcentralusstage": + location = self.env.location registry_def = create_new_acr( - self.cmd, registry_name, registry_rg.name, self.env.location + self.cmd, registry_name, registry_rg.name, location ) self.registry_server = registry_def.login_server @@ -339,9 +346,9 @@ def _create_service_principal(cmd, resource_group_name, env_resource_group_name) scopes.append( f"/subscriptions/{get_subscription_id(cmd.cli_ctx)}/resourceGroups/{env_resource_group_name}" ) - sp = create_service_principal_for_rbac(cmd, scopes=scopes, role="contributor") + sp = create_service_principal_for_github_action(cmd, scopes=scopes, role="contributor") - logger.warning(f"Created service principal: {sp['displayName']} with ID {sp['appId']}") + logger.warning(f"Created service principal with ID {sp['appId']}") return sp["appId"], sp["password"], sp["tenant"] @@ -435,7 +442,13 @@ def _get_ingress_and_target_port(ingress, target_port, dockerfile_content: "list return ingress, target_port -def _validate_up_args(source, image, repo, registry_server): +def _validate_up_args(cmd, source, image, repo, registry_server): + disallowed_params = ["--only-show-errors", "--output", "-o"] + command_args = cmd.cli_ctx.data.get("safe_params", []) + for a in disallowed_params: + if a in command_args: + raise ValidationError(f"Argument {a} is not allowed for 'az containerapp up'") + if not source and not image and not repo: raise RequiredArgumentMissingError( "You must specify either --source, --repo, or --image" @@ -782,3 +795,69 @@ def find_existing_acr(cmd, app: "ContainerApp"): app.should_create_acr = False return acr.name, parse_resource_id(acr.id)["resource_group"] return None, None + + +def validate_environment_location(cmd, location): + MAX_ENV_PER_LOCATION = 2 + env_list = list_managed_environments(cmd) + + locations = [l["location"] for l in env_list] + locations = list(set(locations)) # remove duplicates + + location_count = {} + for loc in locations: + location_count[loc] = len([e for e in env_list if e["location"] == loc]) + + disallowed_locations = [] + for _, value in enumerate(location_count): + if location_count[value] > MAX_ENV_PER_LOCATION - 1: + disallowed_locations.append(value) + + res_locations = list_environment_locations(cmd) + res_locations = [l for l in res_locations if l not in disallowed_locations] + + allowed_locs = ", ".join(res_locations) + + if location: + try: + _ensure_location_allowed(cmd, location, "Microsoft.App", "managedEnvironments") + except Exception: # pylint: disable=broad-except + raise ValidationError("You cannot create a Containerapp environment in location {}. List of eligible locations: {}.".format(location, allowed_locs)) + + if len(res_locations) > 0: + if not location: + logger.warning("Creating environment on location {}.".format(res_locations[0])) + return res_locations[0] + if location in disallowed_locations: + raise ValidationError("You have more than {} environments in location {}. List of eligible locations: {}.".format(MAX_ENV_PER_LOCATION, location, allowed_locs)) + return location + else: + raise ValidationError("You cannot create any more environments. Environments are limited to {} per location in a subscription. Please specify an existing environment using --environment.".format(MAX_ENV_PER_LOCATION)) + + +def list_environment_locations(cmd): + from ._utils import providers_client_factory + providers_client = providers_client_factory(cmd.cli_ctx, get_subscription_id(cmd.cli_ctx)) + resource_types = getattr(providers_client.get("Microsoft.App"), 'resource_types', []) + res_locations = [] + for res in resource_types: + if res and getattr(res, 'resource_type', "") == "managedEnvironments": + res_locations = getattr(res, 'locations', []) + + res_locations = [res_loc.lower().replace(" ", "").replace("(", "").replace(")", "") for res_loc in res_locations if res_loc.strip()] + + return res_locations + + +def check_env_name_on_rg(cmd, managed_env, resource_group_name, location): + if location: + _ensure_location_allowed(cmd, location, "Microsoft.App", "managedEnvironments") + if managed_env and resource_group_name and location: + env_def = None + try: + env_def = ManagedEnvironmentClient.show(cmd, resource_group_name, parse_resource_id(managed_env)["name"]) + except: + pass + if env_def: + if location != env_def["location"]: + raise ValidationError("Environment {} already exists in resource group {} on location {}, cannot change location of existing environment to {}.".format(parse_resource_id(managed_env)["name"], resource_group_name, env_def["location"], location)) diff --git a/src/containerapp/azext_containerapp/_utils.py b/src/containerapp/azext_containerapp/_utils.py index 06bb2054276..fa34e6bb1c1 100644 --- a/src/containerapp/azext_containerapp/_utils.py +++ b/src/containerapp/azext_containerapp/_utils.py @@ -30,126 +30,36 @@ def validate_container_app_name(name): f"Please shorten {name}") -# original implementation at azure.cli.command_modules.role.custom.create_service_principal_for_rbac -# reimplemented to remove incorrect warning statements -def create_service_principal_for_rbac( # pylint:disable=too-many-statements,too-many-locals, too-many-branches, unused-argument, inconsistent-return-statements - cmd, name=None, years=None, create_cert=False, cert=None, scopes=None, role=None, - show_auth_for_sdk=None, skip_assignment=False, keyvault=None): - from azure.cli.command_modules.role.custom import (_graph_client_factory, TZ_UTC, _process_service_principal_creds, - _validate_app_dates, create_application, - _create_service_principal, _create_role_assignment, - _error_caused_by_role_assignment_exists) - - if role and not scopes or not role and scopes: - raise ArgumentUsageError("Usage error: To create role assignments, specify both --role and --scopes.") - - graph_client = _graph_client_factory(cmd.cli_ctx) - - years = years or 1 - _RETRY_TIMES = 36 - existing_sps = None - - if not name: - # No name is provided, create a new one - app_display_name = 'azure-cli-' + datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') - else: - app_display_name = name - # patch existing app with the same displayName to make the command idempotent - query_exp = "displayName eq '{}'".format(name) - existing_sps = list(graph_client.service_principals.list(filter=query_exp)) - - app_start_date = datetime.now(TZ_UTC) - app_end_date = app_start_date + relativedelta(years=years or 1) - - password, public_cert_string, cert_file, cert_start_date, cert_end_date = \ - _process_service_principal_creds(cmd.cli_ctx, years, app_start_date, app_end_date, cert, create_cert, - None, keyvault) - - app_start_date, app_end_date, cert_start_date, cert_end_date = \ - _validate_app_dates(app_start_date, app_end_date, cert_start_date, cert_end_date) - - aad_application = create_application(cmd, - display_name=app_display_name, - available_to_other_tenants=False, - password=password, - key_value=public_cert_string, - start_date=app_start_date, - end_date=app_end_date, - credential_description='rbac') - # pylint: disable=no-member - app_id = aad_application.app_id - - # retry till server replication is done - aad_sp = existing_sps[0] if existing_sps else None - if not aad_sp: - for retry_time in range(0, _RETRY_TIMES): - try: - aad_sp = _create_service_principal(cmd.cli_ctx, app_id, resolve_app=False) - break - except Exception as ex: # pylint: disable=broad-except - err_msg = str(ex) - if retry_time < _RETRY_TIMES and ( - ' does not reference ' in err_msg or - ' does not exist ' in err_msg or - 'service principal being created must in the local tenant' in err_msg): - logger.warning("Creating service principal failed with error '%s'. Retrying: %s/%s", - err_msg, retry_time + 1, _RETRY_TIMES) - time.sleep(5) - else: - logger.warning( - "Creating service principal failed for '%s'. Trace followed:\n%s", - app_id, ex.response.headers - if hasattr(ex, 'response') else ex) # pylint: disable=no-member - raise - sp_oid = aad_sp.object_id - - if role: - for scope in scopes: - # logger.warning("Creating '%s' role assignment under scope '%s'", role, scope) - # retry till server replication is done - for retry_time in range(0, _RETRY_TIMES): - try: - _create_role_assignment(cmd.cli_ctx, role, sp_oid, None, scope, resolve_assignee=False, - assignee_principal_type='ServicePrincipal') - break - except Exception as ex: - if retry_time < _RETRY_TIMES and ' does not exist in the directory ' in str(ex): - time.sleep(5) - logger.warning(' Retrying role assignment creation: %s/%s', retry_time + 1, - _RETRY_TIMES) - continue - if _error_caused_by_role_assignment_exists(ex): - logger.warning(' Role assignment already exists.\n') - break - - # dump out history for diagnoses - logger.warning(' Role assignment creation failed.\n') - if getattr(ex, 'response', None) is not None: - logger.warning(' role assignment response headers: %s\n', - ex.response.headers) # pylint: disable=no-member - raise - - if show_auth_for_sdk: - from azure.cli.core._profile import Profile - profile = Profile(cli_ctx=cmd.cli_ctx) - result = profile.get_sp_auth_info(scopes[0].split('/')[2] if scopes else None, - app_id, password, cert_file) - # sdk-auth file should be in json format all the time, hence the print - print(json.dumps(result, indent=2)) - return - - result = { - 'appId': app_id, - 'password': password, - 'displayName': app_display_name, - 'tenant': graph_client.config.tenant_id +def create_service_principal_for_github_action(cmd, scopes=None, role="contributor"): + from azure.cli.command_modules.role.custom import (create_application, create_service_principal, + create_role_assignment, show_service_principal) + from azure.cli.command_modules.role._graph_client import GraphClient + + client = GraphClient(cmd.cli_ctx) + now = datetime.utcnow() + app_display_name = 'azure-cli-' + now.strftime('%Y-%m-%d-%H-%M-%S') + app = create_application(cmd, client, display_name=app_display_name) + sp = create_service_principal(cmd, identifier=app["appId"]) + for scope in scopes: + create_role_assignment(cmd, role=role, assignee=sp["id"], scope=scope) + + service_principal = show_service_principal(client, sp["id"]) + + body = { + "passwordCredential": { + "displayName": None, + "startDateTime": now.strftime('%Y-%m-%dT%H:%M:%SZ'), + "endDateTime": (now + relativedelta(years=1)).strftime('%Y-%m-%dT%H:%M:%SZ'), + } + } + + add_password_result = client.service_principal_password_add(service_principal["id"], body) + + return { + 'appId': service_principal['appId'], + 'password': add_password_result['secretText'], + 'tenant': client.tenant } - if cert_file: - logger.warning( - "Please copy %s to a safe place. When you run 'az login', provide the file path in the --password argument", - cert_file) - result['fileWithCertAndPrivateKey'] = cert_file - return result def is_int(s): @@ -177,8 +87,9 @@ def get_workflow(github_repo, name): # pylint: disable=inconsistent-return-stat def trigger_workflow(token, repo, name, branch): - logger.warning("Triggering Github Action") - get_workflow(get_github_repo(token, repo), name).create_dispatch(branch) + wf = get_workflow(get_github_repo(token, repo), name) + logger.warning(f"Triggering Github Action: {wf.path}") + wf.create_dispatch(branch) def await_github_action(cmd, token, repo, branch, name, resource_group_name, timeout_secs=1200): diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index ce52bd8e5d8..676212133f5 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -2015,12 +2015,14 @@ def containerapp_up(cmd, service_principal_tenant_id=None): from ._up_utils import (_validate_up_args, _reformat_image, _get_dockerfile_content, _get_ingress_and_target_port, ResourceGroup, ContainerAppEnvironment, ContainerApp, _get_registry_from_app, - _get_registry_details, _create_github_action, _set_up_defaults, up_output, AzureContainerRegistry) + _get_registry_details, _create_github_action, _set_up_defaults, up_output, AzureContainerRegistry, + check_env_name_on_rg) HELLOWORLD = "mcr.microsoft.com/azuredocs/containerapps-helloworld" dockerfile = "Dockerfile" # for now the dockerfile name must be "Dockerfile" (until GH actions API is updated) - _validate_up_args(source, image, repo, registry_server) + _validate_up_args(cmd, source, image, repo, registry_server) validate_container_app_name(name) + check_env_name_on_rg(cmd, managed_env, resource_group_name, location) image = _reformat_image(source, repo, image) token = None if not repo else get_github_access_token(cmd, ["admin:repo_hook", "repo", "workflow"], token) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_scenario.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_scenario.py index f8b81030eeb..c438c4c53ba 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_scenario.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_scenario.py @@ -7,6 +7,7 @@ import platform from unittest import mock import time +import unittest from azext_containerapp.custom import containerapp_ssh from azure.cli.testsdk.reverse_dependency import get_dummy_cli @@ -18,7 +19,7 @@ TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..')) -@live_only() +@live_only() # Containerapp tests can only be run live due to log analytics name being randomly generated every time class ContainerappScenarioTest(ScenarioTest): @AllowLargeResponse(8192) @ResourceGroupPreparer(location="eastus2") @@ -218,6 +219,30 @@ def test_container_acr(self, resource_group): JMESPathCheck('length(properties.configuration.secrets)', 1), ]) + # Update Container App with ACR + update_string = 'containerapp update -g {} -n {} --min-replicas 0 --max-replicas 1 --set-env-vars testenv=testing'.format( + resource_group, containerapp_name) + self.cmd(update_string, checks=[ + JMESPathCheck('name', containerapp_name), + JMESPathCheck('properties.configuration.registries[0].server', registry_server), + JMESPathCheck('properties.configuration.registries[0].username', registry_username), + JMESPathCheck('length(properties.configuration.secrets)', 1), + JMESPathCheck('properties.template.scale.minReplicas', '0'), + JMESPathCheck('properties.template.scale.maxReplicas', '1'), + JMESPathCheck('length(properties.template.containers[0].env)', 1), + ]) + + # Add secrets to Container App with ACR + containerapp_secret = self.cmd('containerapp secret list -g {} -n {}'.format(resource_group, containerapp_name)).get_output_in_json() + secret_name = containerapp_secret[0]["name"] + secret_string = 'containerapp secret set -g {} -n {} --secrets newsecret=test'.format(resource_group, containerapp_name) + self.cmd(secret_string, checks=[ + JMESPathCheck('length(@)', 2), + ]) + + with self.assertRaises(CLIError): + # Removing ACR password should fail since it is needed for ACR + self.cmd('containerapp secret remove -g {} -n {} --secret-names {}'.format(resource_group, containerapp_name, secret_name)) @AllowLargeResponse(8192) @ResourceGroupPreparer(location="eastus") @@ -286,6 +311,7 @@ def test_containerapp_update(self, resource_group): JMESPathCheck('properties.template.containers[1].resources.memory', '1.5Gi'), ]) + @unittest.skip("API only on stage currently") @live_only() # VCR.py can't seem to handle websockets (only --live works) # @ResourceGroupPreparer(location="centraluseuap") @mock.patch("azext_containerapp._ssh_utils._resize_terminal") @@ -358,4 +384,4 @@ def test_containerapp_logstream(self, resource_group): self.cmd(f'containerapp env create -g {resource_group} -n {env_name}') self.cmd(f'containerapp create -g {resource_group} -n {containerapp_name} --environment {env_name} --min-replicas 1 --ingress external --target-port 80') - self.cmd(f'containerapp log tail -n {containerapp_name} -g {resource_group}') + self.cmd(f'containerapp logs show -n {containerapp_name} -g {resource_group}') diff --git a/src/containerapp/setup.py b/src/containerapp/setup.py index d0f615849f3..ca2dd65ff30 100644 --- a/src/containerapp/setup.py +++ b/src/containerapp/setup.py @@ -17,7 +17,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '0.3.2' +VERSION = '0.3.3' # The full list of classifiers is available at