diff --git a/.teamcity/components/generated/services.kt b/.teamcity/components/generated/services.kt index 97d56fdc3d54..36fc28db37e0 100644 --- a/.teamcity/components/generated/services.kt +++ b/.teamcity/components/generated/services.kt @@ -8,6 +8,7 @@ var services = mapOf( "appconfiguration" to "App Configuration", "appservice" to "AppService", "applicationinsights" to "Application Insights", + "arckubernetes" to "ArcKubernetes", "attestation" to "Attestation", "authorization" to "Authorization", "automation" to "Automation", diff --git a/examples/arckubernetes/README.md b/examples/arckubernetes/README.md new file mode 100644 index 000000000000..17b079fcb2d9 --- /dev/null +++ b/examples/arckubernetes/README.md @@ -0,0 +1,24 @@ +## Example: Azure Arc Kubernetes + +This example provisions the following Resources: + +## Creates + +1. A [Resource Group](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/resource_group) +2. A [Virtual Network](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/virtual_network) +3. A [Subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/subnet) +4. A [Linux Virtual Machine](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/linux_virtual_machine) +5. An [Arc Kubernetes Cluster](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/arc_kubernetes_cluster) +6. A [Kind Cluster](https://kind.sigs.k8s.io/) in [Linux Virtual Machine](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/linux_virtual_machine) by [remote-exec Provisioner](https://developer.hashicorp.com/terraform/language/resources/provisioners/remote-exec) +7. [Azure Arc agents](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/conceptual-agent-overview) in [Kind Cluster](https://kind.sigs.k8s.io/) by [remote-exec Provisioner](https://developer.hashicorp.com/terraform/language/resources/provisioners/remote-exec) + +~> **NOTE:** To connect an existing Kubernetes cluster to Azure Arc, the following conditions must be met: + +* An [Arc Kubernetes Cluster](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/arc_kubernetes_cluster) must be created in Azure +* [Azure Arc agents](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/conceptual-agent-overview) must be installed in the Kubernetes cluster which is connected to Azure + +## Usage + +- Provide values to all variables +- Create with `terraform apply` +- Destroy all with `terraform destroy` diff --git a/examples/arckubernetes/main.tf b/examples/arckubernetes/main.tf new file mode 100644 index 000000000000..289fa1e7f36f --- /dev/null +++ b/examples/arckubernetes/main.tf @@ -0,0 +1,154 @@ +provider "azurerm" { + features { + resource_group { + prevent_deletion_if_contains_resources = false + } + } +} + +data "azurerm_subscription" "current" {} +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "example" { + name = "${var.prefix}-rg" + location = var.location +} + +resource "azurerm_virtual_network" "example" { + name = "${var.prefix}-vn" + address_space = ["10.0.0.0/16"] + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name +} + +resource "azurerm_subnet" "example" { + name = "${var.prefix}-subnet" + resource_group_name = azurerm_resource_group.example.name + virtual_network_name = azurerm_virtual_network.example.name + address_prefixes = ["10.0.2.0/24"] +} + +resource "azurerm_public_ip" "example" { + name = "${var.prefix}-pi" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + allocation_method = "Static" +} + +resource "azurerm_network_interface" "example" { + name = "${var.prefix}-ni" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + ip_configuration { + name = "internal" + subnet_id = azurerm_subnet.example.id + private_ip_address_allocation = "Dynamic" + public_ip_address_id = azurerm_public_ip.example.id + } +} + +resource "azurerm_network_security_group" "example" { + name = "${var.prefix}NetworkSecurityGroup" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + security_rule { + name = "SSH" + priority = 1001 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "*" + } +} + +resource "azurerm_network_interface_security_group_association" "example" { + network_interface_id = azurerm_network_interface.example.id + network_security_group_id = azurerm_network_security_group.example.id +} + +resource "azurerm_linux_virtual_machine" "example" { + name = "${var.prefix}-lvm" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + size = "Standard_F2" + admin_username = var.user_name + admin_password = var.password + provision_vm_agent = false + allow_extension_operations = false + disable_password_authentication = false + network_interface_ids = [ + azurerm_network_interface.example.id, + ] + os_disk { + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "18.04-LTS" + version = "latest" + } +} + + +resource "azurerm_arc_kubernetes_cluster" "example" { + name = "${var.prefix}-akc" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + agent_public_key_certificate = var.public_key + identity { + type = "SystemAssigned" + } + + + connection { + type = "ssh" + host = azurerm_public_ip.example.ip_address + user = var.user_name + password = var.password + } + + provisioner "file" { + content = templatefile("testdata/install_agent.sh.tftpl", { + subscription_id = data.azurerm_subscription.current.subscription_id + resource_group_name = azurerm_resource_group.example.name + cluster_name = azurerm_arc_kubernetes_cluster.example.name + location = azurerm_resource_group.example.location + tenant_id = data.azurerm_client_config.current.tenant_id + working_dir = "/home/${var.user_name}" + }) + destination = "/home/${var.user_name}/install_agent.sh" + } + + provisioner "file" { + source = "testdata/install_agent.py" + destination = "/home/${var.user_name}/install_agent.py" + } + + provisioner "file" { + source = "testdata/kind.yaml" + destination = "/home/${var.user_name}/kind.yaml" + } + + provisioner "file" { + content = var.private_pem + destination = "/home/${var.user_name}/private.pem" + } + + provisioner "remote-exec" { + inline = [ + "sudo sed -i 's/\r$//' /home/${var.user_name}/install_agent.sh", + "sudo chmod +x /home/${var.user_name}/install_agent.sh", + "bash /home/${var.user_name}/install_agent.sh > /home/${var.user_name}/agent_log", + ] + } + + + depends_on = [ + azurerm_linux_virtual_machine.example + ] +} diff --git a/examples/arckubernetes/testdata/install_agent.py b/examples/arckubernetes/testdata/install_agent.py new file mode 100644 index 000000000000..c4623d8b2e25 --- /dev/null +++ b/examples/arckubernetes/testdata/install_agent.py @@ -0,0 +1,248 @@ +import argparse +import json +import logging as logger +import os +import platform +import shutil +import stat +import subprocess +import time +import urllib +from subprocess import PIPE, Popen +from urllib import request + +HELM_VERSION = 'v3.6.3' +HELM_STORAGE_URL = "https://k8connecthelm.azureedge.net" +Pre_Onboarding_Helm_Charts_Folder_Name = 'PreOnboardingChecksCharts' + + +def get_helm_registry(config_dp_endpoint): + # Setting uri + get_chart_location_url = "{}/{}/GetLatestHelmPackagePath?api-version=2019-11-01-preview".format( + config_dp_endpoint, 'azure-arc-k8sagents') + + try: + response = urllib.request.urlopen( + request.Request(get_chart_location_url, method="POST")) + except Exception as e: + raise Exception("Failed to get helm registry." + str(e)) + + try: + return json.load(response).get('repositoryPath') + except Exception as e: + raise Exception( + "Error while fetching helm chart registry path from JSON response: " + str(e)) + + +def pull_helm_chart(registry_path, kube_config, kube_context, helm_client_location, chart_name='azure-arc-k8sagents', retry_count=5, retry_delay=3): + cmd_helm_chart_pull = [helm_client_location, + "chart", "pull", registry_path] + if kube_config: + cmd_helm_chart_pull.extend(["--kubeconfig", kube_config]) + if kube_context: + cmd_helm_chart_pull.extend(["--kube-context", kube_context]) + for i in range(retry_count): + response_helm_chart_pull = subprocess.Popen( + cmd_helm_chart_pull, stdout=PIPE, stderr=PIPE) + _, error_helm_chart_pull = response_helm_chart_pull.communicate() + if response_helm_chart_pull.returncode != 0: + if i == retry_count - 1: + raise Exception("Unable to pull {} helm chart from the registry '{}': ".format( + chart_name, registry_path) + error_helm_chart_pull.decode("ascii")) + time.sleep(retry_delay) + else: + break + + +def export_helm_chart(registry_path, chart_export_path, kube_config, kube_context, helm_client_location, chart_name='azure-arc-k8sagents'): + cmd_helm_chart_export = [helm_client_location, "chart", + "export", registry_path, "--destination", chart_export_path] + if kube_config: + cmd_helm_chart_export.extend(["--kubeconfig", kube_config]) + if kube_context: + cmd_helm_chart_export.extend(["--kube-context", kube_context]) + response_helm_chart_export = subprocess.Popen( + cmd_helm_chart_export, stdout=PIPE, stderr=PIPE) + _, error_helm_chart_export = response_helm_chart_export.communicate() + if response_helm_chart_export.returncode != 0: + raise Exception("Unable to export {} helm chart from the registry '{}': ".format( + chart_name, registry_path) + error_helm_chart_export.decode("ascii")) + + +def get_chart_path(registry_path, kube_config, kube_context, helm_client_location, chart_folder_name='AzureArcCharts', chart_name='azure-arc-k8sagents'): + # Pulling helm chart from registry + os.environ['HELM_EXPERIMENTAL_OCI'] = '1' + pull_helm_chart(registry_path, kube_config, kube_context, + helm_client_location, chart_name) + + # Exporting helm chart after cleanup + chart_export_path = os.path.join( + os.path.expanduser('~'), '.azure', chart_folder_name) + try: + if os.path.isdir(chart_export_path): + shutil.rmtree(chart_export_path) + except: + logger.warning("Unable to cleanup the {} already present on the machine. In case of failure, please cleanup the directory '{}' and try again.".format( + chart_folder_name, chart_export_path)) + + export_helm_chart(registry_path, chart_export_path, kube_config, + kube_context, helm_client_location, chart_name) + + # Returning helm chart path + helm_chart_path = os.path.join(chart_export_path, chart_name) + if chart_folder_name == Pre_Onboarding_Helm_Charts_Folder_Name: + chart_path = helm_chart_path + else: + chart_path = os.getenv('HELMCHART') if os.getenv( + 'HELMCHART') else helm_chart_path + + return chart_path + + +def install_helm_client(): + # Fetch system related info + operating_system = platform.system().lower() + platform.machine() + + # Set helm binary download & install locations + if (operating_system == 'windows'): + download_location_string = f'.azure\\helm\\{HELM_VERSION}\\helm-{HELM_VERSION}-{operating_system}-amd64.zip' + install_location_string = f'.azure\\helm\\{HELM_VERSION}\\{operating_system}-amd64\\helm.exe' + requestUri = f'{HELM_STORAGE_URL}/helm/helm-{HELM_VERSION}-{operating_system}-amd64.zip' + elif (operating_system == 'linux' or operating_system == 'darwin'): + download_location_string = f'.azure/helm/{HELM_VERSION}/helm-{HELM_VERSION}-{operating_system}-amd64.tar.gz' + install_location_string = f'.azure/helm/{HELM_VERSION}/{operating_system}-amd64/helm' + requestUri = f'{HELM_STORAGE_URL}/helm/helm-{HELM_VERSION}-{operating_system}-amd64.tar.gz' + else: + raise Exception( + f'The {operating_system} platform is not currently supported for installing helm client.') + + download_location = os.path.expanduser( + os.path.join('~', download_location_string)) + download_dir = os.path.dirname(download_location) + install_location = os.path.expanduser( + os.path.join('~', install_location_string)) + + # Download compressed halm binary if not already present + if not os.path.isfile(download_location): + # Creating the helm folder if it doesnt exist + if not os.path.exists(download_dir): + try: + os.makedirs(download_dir) + except Exception as e: + raise Exception("Failed to create helm directory." + str(e)) + + # Downloading compressed helm client executable + logger.warning( + "Downloading helm client for first time. This can take few minutes...") + try: + response = urllib.request.urlopen(requestUri) + except Exception as e: + raise Exception("Failed to download helm client." + str(e)) + + responseContent = response.read() + response.close() + + # Creating the compressed helm binaries + try: + with open(download_location, 'wb') as f: + f.write(responseContent) + except Exception as e: + raise Exception("Failed to create helm executable." + str(e)) + + # Extract compressed helm binary + if not os.path.isfile(install_location): + try: + shutil.unpack_archive(download_location, download_dir) + os.chmod(install_location, os.stat( + install_location).st_mode | stat.S_IXUSR) + except Exception as e: + raise Exception("Failed to extract helm executable." + str(e)) + + return install_location + + +def helm_install_release(chart_path, subscription_id, kubernetes_distro, kubernetes_infra, resource_group_name, cluster_name, + location, onboarding_tenant_id, private_key_pem, + no_wait, cloud_name, helm_client_location, onboarding_timeout="600"): + cmd_helm_install = [helm_client_location, "upgrade", "--install", "azure-arc", chart_path, + "--set", "global.subscriptionId={}".format( + subscription_id), + "--set", "global.kubernetesDistro={}".format( + kubernetes_distro), + "--set", "global.kubernetesInfra={}".format( + kubernetes_infra), + "--set", "global.resourceGroupName={}".format( + resource_group_name), + "--set", "global.resourceName={}".format(cluster_name), + "--set", "global.location={}".format(location), + "--set", "global.tenantId={}".format( + onboarding_tenant_id), + "--set", "global.onboardingPrivateKey={}".format( + private_key_pem), + "--set", "systemDefaultValues.spnOnboarding=false", + "--set", "global.azureEnvironment={}".format( + cloud_name), + "--set", "systemDefaultValues.clusterconnect-agent.enabled=true", + "--namespace", "{}".format("azure-arc-release"), + "--create-namespace", + "--output", "json"] + + if not no_wait: + # Change --timeout format for helm client to understand + onboarding_timeout = onboarding_timeout + "s" + cmd_helm_install.extend( + ["--wait", "--timeout", "{}".format(onboarding_timeout)]) + response_helm_install = Popen(cmd_helm_install, stdout=PIPE, stderr=PIPE) + _, error_helm_install = response_helm_install.communicate() + if response_helm_install.returncode != 0: + raise Exception("Unable to install helm release" + error_helm_install.decode("ascii")) + + +def install_agent(): + parser = argparse.ArgumentParser( + description='Install Connected Cluster Agent') + parser.add_argument('--subscriptionId', type=str, required=True) + parser.add_argument('--resourceGroupName', type=str, required=True) + parser.add_argument('--clusterName', type=str, required=True) + parser.add_argument('--location', type=str, required=True) + parser.add_argument('--tenantId', type=str, required=True) + parser.add_argument('--privatePem', type=str, required=True) + + try: + args = parser.parse_args() + except Exception as e: + raise Exception("Failed to parse arguments." + str(e)) + + try: + with open(args.privatePem, "r") as f: + privateKey = f.read() + except Exception as e: + raise Exception("Failed to get private key." + str(e)) + + # Install helm client + helm_client_location = install_helm_client() + + # Retrieving Helm chart OCI Artifact location + registry_path = get_helm_registry("https://westeurope.dp.kubernetesconfiguration.azure.com") + + # Get helm chart path + chart_path = get_chart_path( + registry_path, None, None, helm_client_location) + + helm_install_release(chart_path, + args.subscriptionId, + "generic", + "generic", + args.resourceGroupName, + args.clusterName, + args.location, + args.tenantId, + privateKey, + False, + "AZUREPUBLICCLOUD", + helm_client_location) + + +if __name__ == "__main__": + install_agent() diff --git a/examples/arckubernetes/testdata/install_agent.sh.tftpl b/examples/arckubernetes/testdata/install_agent.sh.tftpl new file mode 100644 index 000000000000..9bb2cbcbd0bf --- /dev/null +++ b/examples/arckubernetes/testdata/install_agent.sh.tftpl @@ -0,0 +1,38 @@ +#!/bin/bash + +# install docker +sudo apt-get update +sudo apt-get -y install ca-certificates curl gnupg lsb-release +sudo mkdir -m 0755 -p /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo chmod a+r /etc/apt/keyrings/docker.gpg +sudo apt-get update +sudo apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + +#Manage Docker as a non-root user +sudo groupadd docker || true +sudo usermod -aG docker $USER +newgrp docker << END + +#install golang and kind +sudo snap install go --classic +sudo go install sigs.k8s.io/kind@latest + +#create a new cluster +export PATH="$HOME/go/bin:$PATH" +export KUBECONFIG="${working_dir}/kind-config" +kind create cluster --name arc-kind --config kind.yaml --kubeconfig kind-config + +#install agent +python3 "${working_dir}/install_agent.py" \ +--subscriptionId "${subscription_id}" \ +--resourceGroupName "${resource_group_name}" \ +--clusterName "${cluster_name}" \ +--location "${location}" \ +--tenantId "${tenant_id}" \ +--privatePem "${working_dir}/private.pem" + +END \ No newline at end of file diff --git a/examples/arckubernetes/testdata/kind.yaml b/examples/arckubernetes/testdata/kind.yaml new file mode 100644 index 000000000000..fe336a44025d --- /dev/null +++ b/examples/arckubernetes/testdata/kind.yaml @@ -0,0 +1,20 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + # port forward 80 on the host to 80 on this node + extraPortMappings: + - containerPort: 81 + hostPort: 81 + listenAddress: "127.0.0.1" + protocol: TCP + - containerPort: 443 + hostPort: 443 + listenAddress: "127.0.0.1" + protocol: TCP diff --git a/examples/arckubernetes/variables.tf b/examples/arckubernetes/variables.tf new file mode 100644 index 000000000000..908fb80ad802 --- /dev/null +++ b/examples/arckubernetes/variables.tf @@ -0,0 +1,26 @@ +variable "prefix" { + description = "The prefix used for all resources in this example" +} + +variable "location" { + description = "The Azure location where all resources in this example should be created" +} + +variable "user_name" { + description = "The user name of virtual machine" +} + +variable "password" { + description = "The password of virtual machine" + sensitive = true +} + +# Refer to https://github.com/Azure/azure-cli-extensions/blob/ed3f463e9ef7980eff196504a8bb29800c123eba/src/connectedk8s/azext_connectedk8s/custom.py#L365 to generate the private key +variable "private_pem" { + description = "The private certificate used by the agent" +} + +# Refer to https://github.com/Azure/azure-cli-extensions/blob/ed3f463e9ef7980eff196504a8bb29800c123eba/src/connectedk8s/azext_connectedk8s/custom.py#L359 to generate the public key +variable "public_key" { + description = "The base64-encoded public certificate used by the agent" +} \ No newline at end of file diff --git a/internal/clients/client.go b/internal/clients/client.go index d55325436a10..65b1aef06f31 100644 --- a/internal/clients/client.go +++ b/internal/clients/client.go @@ -24,6 +24,7 @@ import ( appConfiguration "github.com/hashicorp/terraform-provider-azurerm/internal/services/appconfiguration/client" applicationInsights "github.com/hashicorp/terraform-provider-azurerm/internal/services/applicationinsights/client" appService "github.com/hashicorp/terraform-provider-azurerm/internal/services/appservice/client" + arckubernetes "github.com/hashicorp/terraform-provider-azurerm/internal/services/arckubernetes/client" attestation "github.com/hashicorp/terraform-provider-azurerm/internal/services/attestation/client" authorization "github.com/hashicorp/terraform-provider-azurerm/internal/services/authorization/client" automation "github.com/hashicorp/terraform-provider-azurerm/internal/services/automation/client" @@ -146,6 +147,7 @@ type Client struct { AppInsights *applicationInsights.Client AppPlatform *appPlatform.Client AppService *appService.Client + ArcKubernetes *arckubernetes.Client Attestation *attestation.Client Authorization *authorization.Client Automation *automation.Client @@ -276,6 +278,7 @@ func (client *Client) Build(ctx context.Context, o *common.ClientOptions) error client.AppInsights = applicationInsights.NewClient(o) client.AppPlatform = appPlatform.NewClient(o) client.AppService = appService.NewClient(o) + client.ArcKubernetes = arckubernetes.NewClient(o) client.Attestation = attestation.NewClient(o) client.Authorization = authorization.NewClient(o) client.Automation = automation.NewClient(o) diff --git a/internal/provider/services.go b/internal/provider/services.go index 719df7d4d1f8..bf6b5239f733 100644 --- a/internal/provider/services.go +++ b/internal/provider/services.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/services/appconfiguration" "github.com/hashicorp/terraform-provider-azurerm/internal/services/applicationinsights" "github.com/hashicorp/terraform-provider-azurerm/internal/services/appservice" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/arckubernetes" "github.com/hashicorp/terraform-provider-azurerm/internal/services/attestation" "github.com/hashicorp/terraform-provider-azurerm/internal/services/authorization" "github.com/hashicorp/terraform-provider-azurerm/internal/services/automation" @@ -184,6 +185,7 @@ func SupportedUntypedServices() []sdk.UntypedServiceRegistration { appconfiguration.Registration{}, springcloud.Registration{}, applicationinsights.Registration{}, + arckubernetes.Registration{}, attestation.Registration{}, authorization.Registration{}, automation.Registration{}, diff --git a/internal/services/arckubernetes/arc_kubernetes_cluster_resource.go b/internal/services/arckubernetes/arc_kubernetes_cluster_resource.go new file mode 100644 index 000000000000..b1750c24d419 --- /dev/null +++ b/internal/services/arckubernetes/arc_kubernetes_cluster_resource.go @@ -0,0 +1,230 @@ +package arckubernetes + +import ( + "fmt" + "regexp" + "time" + + "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" + + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/go-azure-helpers/resourcemanager/location" + "github.com/hashicorp/go-azure-helpers/resourcemanager/tags" + arckubernetes "github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters" + azValidate "github.com/hashicorp/terraform-provider-azurerm/helpers/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" + "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" +) + +func resourceArcKubernetesCluster() *pluginsdk.Resource { + return &pluginsdk.Resource{ + Create: resourceArcKubernetesClusterCreate, + Read: resourceArcKubernetesClusterRead, + Update: resourceArcKubernetesClusterUpdate, + Delete: resourceArcKubernetesClusterDelete, + + Timeouts: &pluginsdk.ResourceTimeout{ + Create: pluginsdk.DefaultTimeout(30 * time.Minute), + Read: pluginsdk.DefaultTimeout(5 * time.Minute), + Update: pluginsdk.DefaultTimeout(30 * time.Minute), + Delete: pluginsdk.DefaultTimeout(30 * time.Minute), + }, + + Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { + _, err := arckubernetes.ParseConnectedClusterID(id) + return err + }), + + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[-_a-zA-Z0-9]{1,260}$"), + "The name of Arc Kubernetes Cluster can only include alphanumeric characters, underscores, hyphens, has a maximum length of 260 characters, and must be unique.", + ), + }, + + "resource_group_name": commonschema.ResourceGroupName(), + + "agent_public_key_certificate": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azValidate.Base64EncodedString, + }, + + "identity": commonschema.SystemAssignedIdentityRequiredForceNew(), + + "location": commonschema.Location(), + + "agent_version": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "distribution": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "infrastructure": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "kubernetes_version": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "offering": { + Type: pluginsdk.TypeString, + Computed: true, + }, + + "total_core_count": { + Type: pluginsdk.TypeInt, + Computed: true, + }, + + "total_node_count": { + Type: pluginsdk.TypeInt, + Computed: true, + }, + + "tags": commonschema.Tags(), + }, + } +} + +func resourceArcKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ArcKubernetes.ArcKubernetesClient + subscriptionId := meta.(*clients.Client).Account.SubscriptionId + ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id := arckubernetes.NewConnectedClusterID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) + + existing, err := client.ConnectedClusterGet(ctx, id) + if err != nil { + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for existing %s: %+v", id, err) + } + } + + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_arc_kubernetes_cluster", id.ID()) + } + + identityValue, err := identity.ExpandSystemAssigned(d.Get("identity").([]interface{})) + if err != nil { + return fmt.Errorf("expanding `identity`: %+v", err) + } + + location := location.Normalize(d.Get("location").(string)) + props := arckubernetes.ConnectedCluster{ + Identity: *identityValue, + Location: location, + Properties: arckubernetes.ConnectedClusterProperties{ + AgentPublicKeyCertificate: d.Get("agent_public_key_certificate").(string), + }, + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + } + + if err := client.ConnectedClusterCreateThenPoll(ctx, id, props); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + + d.SetId(id.ID()) + return resourceArcKubernetesClusterRead(d, meta) +} + +func resourceArcKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ArcKubernetes.ArcKubernetesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := arckubernetes.ParseConnectedClusterID(d.Id()) + if err != nil { + return err + } + + resp, err := client.ConnectedClusterGet(ctx, *id) + if err != nil { + if response.WasNotFound(resp.HttpResponse) { + d.SetId("") + return nil + } + + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + d.Set("name", id.ConnectedClusterName) + d.Set("resource_group_name", id.ResourceGroupName) + if model := resp.Model; model != nil { + if err := d.Set("identity", identity.FlattenSystemAssigned(&model.Identity)); err != nil { + return fmt.Errorf("setting `identity`: %+v", err) + } + + d.Set("location", location.Normalize(model.Location)) + props := model.Properties + d.Set("agent_public_key_certificate", props.AgentPublicKeyCertificate) + d.Set("agent_version", props.AgentVersion) + d.Set("distribution", props.Distribution) + d.Set("infrastructure", props.Infrastructure) + d.Set("kubernetes_version", props.KubernetesVersion) + d.Set("offering", props.Offering) + d.Set("total_core_count", props.TotalCoreCount) + d.Set("total_node_count", props.TotalNodeCount) + + if err := tags.FlattenAndSet(d, model.Tags); err != nil { + return err + } + } + + return nil +} + +func resourceArcKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ArcKubernetes.ArcKubernetesClient + ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := arckubernetes.ParseConnectedClusterID(d.Id()) + if err != nil { + return err + } + + props := arckubernetes.ConnectedClusterPatch{ + Tags: tags.Expand(d.Get("tags").(map[string]interface{})), + } + + if _, err := client.ConnectedClusterUpdate(ctx, *id, props); err != nil { + return fmt.Errorf("updating %s: %+v", *id, err) + } + + return resourceArcKubernetesClusterRead(d, meta) +} + +func resourceArcKubernetesClusterDelete(d *pluginsdk.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).ArcKubernetes.ArcKubernetesClient + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := arckubernetes.ParseConnectedClusterID(d.Id()) + if err != nil { + return err + } + + if err := client.ConnectedClusterDeleteThenPoll(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", *id, err) + } + + return nil +} diff --git a/internal/services/arckubernetes/arc_kubernetes_cluster_resource_test.go b/internal/services/arckubernetes/arc_kubernetes_cluster_resource_test.go new file mode 100644 index 000000000000..eaa320afd179 --- /dev/null +++ b/internal/services/arckubernetes/arc_kubernetes_cluster_resource_test.go @@ -0,0 +1,389 @@ +package arckubernetes_test + +import ( + "context" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + + cryptoRand "crypto/rand" + "math/rand" + "os" + "testing" + + arckubernetes "github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters" + + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type ArcKubernetesClusterResource struct{} + +func TestAccArcKubernetesCluster_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_arc_kubernetes_cluster", "test") + r := ArcKubernetesClusterResource{} + privateKey, publicKey, err := r.generateKey() + if err != nil { + t.Fatalf("failed to generate key: %+v", err) + } + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data, privateKey, publicKey), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.update(data, privateKey, publicKey), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("distribution").HasValue("kind"), + check.That(data.ResourceName).Key("infrastructure").HasValue("generic"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccArcKubernetesCluster_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_arc_kubernetes_cluster", "test") + r := ArcKubernetesClusterResource{} + privateKey, publicKey, err := r.generateKey() + if err != nil { + t.Fatalf("failed to generate key: %+v", err) + } + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data, privateKey, publicKey), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + { + Config: r.requiresImport(data, privateKey, publicKey), + ExpectError: acceptance.RequiresImportError(data.ResourceType), + }, + }) +} + +func TestAccArcKubernetesCluster_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_arc_kubernetes_cluster", "test") + r := ArcKubernetesClusterResource{} + privateKey, publicKey, err := r.generateKey() + if err != nil { + t.Fatalf("failed to generate key: %+v", err) + } + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data, privateKey, publicKey), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.update(data, privateKey, publicKey), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("distribution").HasValue("kind"), + check.That(data.ResourceName).Key("infrastructure").HasValue("generic"), + ), + }, + data.ImportStep(), + }) +} + +func (r ArcKubernetesClusterResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := arckubernetes.ParseConnectedClusterID(state.ID) + if err != nil { + return nil, err + } + + client := clients.ArcKubernetes.ArcKubernetesClient + resp, err := client.ConnectedClusterGet(ctx, *id) + if err != nil { + if response.WasNotFound(resp.HttpResponse) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving %s: %+v", id, err) + } + return utils.Bool(resp.Model != nil), nil +} + +func (r ArcKubernetesClusterResource) template(data acceptance.TestData, credential string) string { + return fmt.Sprintf(` +provider "azurerm" { + features { + resource_group { + prevent_deletion_if_contains_resources = false + } + } +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctestnw-%[1]d" + address_space = ["10.0.0.0/16"] + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_subnet" "test" { + name = "internal" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.2.0/24"] +} + +resource "azurerm_public_ip" "test" { + name = "acctestpip-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + allocation_method = "Static" +} + +resource "azurerm_network_interface" "test" { + name = "acctestnic-%[1]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + ip_configuration { + name = "internal" + subnet_id = azurerm_subnet.test.id + private_ip_address_allocation = "Dynamic" + public_ip_address_id = azurerm_public_ip.test.id + } +} + +resource "azurerm_network_security_group" "my_terraform_nsg" { + name = "myNetworkSecurityGroup" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + security_rule { + name = "SSH" + priority = 1001 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "*" + } +} + +resource "azurerm_network_interface_security_group_association" "example" { + network_interface_id = azurerm_network_interface.test.id + network_security_group_id = azurerm_network_security_group.my_terraform_nsg.id +} + +resource "azurerm_linux_virtual_machine" "test" { + name = "acctestVM-%[1]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + size = "Standard_F2" + admin_username = "adminuser" + admin_password = "%[3]s" + provision_vm_agent = false + allow_extension_operations = false + disable_password_authentication = false + network_interface_ids = [ + azurerm_network_interface.test.id, + ] + os_disk { + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + source_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "18.04-LTS" + version = "latest" + } +} +`, data.RandomInteger, data.Locations.Primary, credential) +} + +func (r ArcKubernetesClusterResource) provisionTemplate(data acceptance.TestData, credential string, privateKey string) string { + return fmt.Sprintf(` +connection { + type = "ssh" + host = azurerm_public_ip.test.ip_address + user = "adminuser" + password = "%[1]s" +} + +provisioner "file" { + content = templatefile("testdata/install_agent.sh.tftpl", { + subscription_id = "%[4]s" + resource_group_name = azurerm_resource_group.test.name + cluster_name = "acctest-akcc-%[2]d" + location = azurerm_resource_group.test.location + tenant_id = "%[5]s" + working_dir = "%[3]s" + }) + destination = "%[3]s/install_agent.sh" +} + +provisioner "file" { + source = "testdata/install_agent.py" + destination = "%[3]s/install_agent.py" +} + +provisioner "file" { + source = "testdata/kind.yaml" + destination = "%[3]s/kind.yaml" +} + +provisioner "file" { + content = < %[3]s/agent_log", + ] +} +`, credential, data.RandomInteger, "/home/adminuser", os.Getenv("ARM_SUBSCRIPTION_ID"), os.Getenv("ARM_TENANT_ID"), privateKey) +} + +func (r ArcKubernetesClusterResource) basic(data acceptance.TestData, privateKey string, publicKey string) string { + credential := fmt.Sprintf("P@$$w0rd%d!", rand.Intn(10000)) + template := r.template(data, credential) + provisionTemplate := r.provisionTemplate(data, credential, privateKey) + return fmt.Sprintf(` + %[1]s + +resource "azurerm_arc_kubernetes_cluster" "test" { + name = "acctest-akcc-%[2]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + agent_public_key_certificate = "%[4]s" + identity { + type = "SystemAssigned" + } + + %[3]s + + depends_on = [ + azurerm_linux_virtual_machine.test + ] +} +`, template, data.RandomInteger, provisionTemplate, publicKey) +} + +func (r ArcKubernetesClusterResource) requiresImport(data acceptance.TestData, privateKey string, publicKey string) string { + config := r.basic(data, privateKey, publicKey) + return fmt.Sprintf(` + %s + +resource "azurerm_arc_kubernetes_cluster" "import" { + name = azurerm_arc_kubernetes_cluster.test.name + resource_group_name = azurerm_arc_kubernetes_cluster.test.resource_group_name + location = azurerm_arc_kubernetes_cluster.test.location + agent_public_key_certificate = azurerm_arc_kubernetes_cluster.test.agent_public_key_certificate + + identity { + type = "SystemAssigned" + } +} +`, config) +} + +func (r ArcKubernetesClusterResource) complete(data acceptance.TestData, privateKey string, publicKey string) string { + credential := fmt.Sprintf("P@$$w0rd%d!", rand.Intn(10000)) + template := r.template(data, credential) + provisionTemplate := r.provisionTemplate(data, credential, privateKey) + return fmt.Sprintf(` + %[1]s + +resource "azurerm_arc_kubernetes_cluster" "test" { + name = "acctest-akcc-%[2]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + agent_public_key_certificate = "%[4]s" + + identity { + type = "SystemAssigned" + } + + tags = { + ENV = "Test" + } + + %[3]s + + depends_on = [ + azurerm_linux_virtual_machine.test + ] +} +`, template, data.RandomInteger, provisionTemplate, publicKey) +} + +func (r ArcKubernetesClusterResource) update(data acceptance.TestData, privateKey string, publicKey string) string { + credential := fmt.Sprintf("P@$$w0rd%d!", rand.Intn(10000)) + template := r.template(data, credential) + provisionTemplate := r.provisionTemplate(data, credential, privateKey) + return fmt.Sprintf(` + %[1]s + +resource "azurerm_arc_kubernetes_cluster" "test" { + name = "acctest-akcc-%[2]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + agent_public_key_certificate = "%[4]s" + + identity { + type = "SystemAssigned" + } + + tags = { + ENV = "TestUpdate" + } + + %[3]s + + depends_on = [ + azurerm_linux_virtual_machine.test + ] +} +`, template, data.RandomInteger, provisionTemplate, publicKey) +} + +func (r ArcKubernetesClusterResource) generateKey() (string, string, error) { + privateKey, err := rsa.GenerateKey(cryptoRand.Reader, 4096) + if err != nil { + return "", "", fmt.Errorf("failed to generate RSA key") + } + + privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey) + privateKeyBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: privateKeyBytes, + } + + privatePem := pem.EncodeToMemory(privateKeyBlock) + if privatePem == nil { + return "", "", fmt.Errorf("failed to encode pem") + } + + return string(privatePem), base64.StdEncoding.EncodeToString(x509.MarshalPKCS1PublicKey(&privateKey.PublicKey)), nil +} diff --git a/internal/services/arckubernetes/client/client.go b/internal/services/arckubernetes/client/client.go new file mode 100644 index 000000000000..952a90e0d220 --- /dev/null +++ b/internal/services/arckubernetes/client/client.go @@ -0,0 +1,20 @@ +package client + +import ( + "github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters" + "github.com/hashicorp/terraform-provider-azurerm/internal/common" +) + +type Client struct { + ArcKubernetesClient *connectedclusters.ConnectedClustersClient +} + +func NewClient(o *common.ClientOptions) *Client { + + arcKubernetesClient := connectedclusters.NewConnectedClustersClientWithBaseURI(o.ResourceManagerEndpoint) + o.ConfigureClient(&arcKubernetesClient.Client, o.ResourceManagerAuthorizer) + + return &Client{ + ArcKubernetesClient: &arcKubernetesClient, + } +} diff --git a/internal/services/arckubernetes/registration.go b/internal/services/arckubernetes/registration.go new file mode 100644 index 000000000000..8a9257a24970 --- /dev/null +++ b/internal/services/arckubernetes/registration.go @@ -0,0 +1,29 @@ +package arckubernetes + +import "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + +type Registration struct{} + +// Name is the name of this Service +func (r Registration) Name() string { + return "ArcKubernetes" +} + +// WebsiteCategories returns a list of categories which can be used for the sidebar +func (r Registration) WebsiteCategories() []string { + return []string{ + "ArcKubernetes", + } +} + +// SupportedDataSources returns the supported Data Sources supported by this Service +func (r Registration) SupportedDataSources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{} +} + +// SupportedResources returns the supported Resources supported by this Service +func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { + return map[string]*pluginsdk.Resource{ + "azurerm_arc_kubernetes_cluster": resourceArcKubernetesCluster(), + } +} diff --git a/internal/services/arckubernetes/testdata/install_agent.py b/internal/services/arckubernetes/testdata/install_agent.py new file mode 100644 index 000000000000..c4623d8b2e25 --- /dev/null +++ b/internal/services/arckubernetes/testdata/install_agent.py @@ -0,0 +1,248 @@ +import argparse +import json +import logging as logger +import os +import platform +import shutil +import stat +import subprocess +import time +import urllib +from subprocess import PIPE, Popen +from urllib import request + +HELM_VERSION = 'v3.6.3' +HELM_STORAGE_URL = "https://k8connecthelm.azureedge.net" +Pre_Onboarding_Helm_Charts_Folder_Name = 'PreOnboardingChecksCharts' + + +def get_helm_registry(config_dp_endpoint): + # Setting uri + get_chart_location_url = "{}/{}/GetLatestHelmPackagePath?api-version=2019-11-01-preview".format( + config_dp_endpoint, 'azure-arc-k8sagents') + + try: + response = urllib.request.urlopen( + request.Request(get_chart_location_url, method="POST")) + except Exception as e: + raise Exception("Failed to get helm registry." + str(e)) + + try: + return json.load(response).get('repositoryPath') + except Exception as e: + raise Exception( + "Error while fetching helm chart registry path from JSON response: " + str(e)) + + +def pull_helm_chart(registry_path, kube_config, kube_context, helm_client_location, chart_name='azure-arc-k8sagents', retry_count=5, retry_delay=3): + cmd_helm_chart_pull = [helm_client_location, + "chart", "pull", registry_path] + if kube_config: + cmd_helm_chart_pull.extend(["--kubeconfig", kube_config]) + if kube_context: + cmd_helm_chart_pull.extend(["--kube-context", kube_context]) + for i in range(retry_count): + response_helm_chart_pull = subprocess.Popen( + cmd_helm_chart_pull, stdout=PIPE, stderr=PIPE) + _, error_helm_chart_pull = response_helm_chart_pull.communicate() + if response_helm_chart_pull.returncode != 0: + if i == retry_count - 1: + raise Exception("Unable to pull {} helm chart from the registry '{}': ".format( + chart_name, registry_path) + error_helm_chart_pull.decode("ascii")) + time.sleep(retry_delay) + else: + break + + +def export_helm_chart(registry_path, chart_export_path, kube_config, kube_context, helm_client_location, chart_name='azure-arc-k8sagents'): + cmd_helm_chart_export = [helm_client_location, "chart", + "export", registry_path, "--destination", chart_export_path] + if kube_config: + cmd_helm_chart_export.extend(["--kubeconfig", kube_config]) + if kube_context: + cmd_helm_chart_export.extend(["--kube-context", kube_context]) + response_helm_chart_export = subprocess.Popen( + cmd_helm_chart_export, stdout=PIPE, stderr=PIPE) + _, error_helm_chart_export = response_helm_chart_export.communicate() + if response_helm_chart_export.returncode != 0: + raise Exception("Unable to export {} helm chart from the registry '{}': ".format( + chart_name, registry_path) + error_helm_chart_export.decode("ascii")) + + +def get_chart_path(registry_path, kube_config, kube_context, helm_client_location, chart_folder_name='AzureArcCharts', chart_name='azure-arc-k8sagents'): + # Pulling helm chart from registry + os.environ['HELM_EXPERIMENTAL_OCI'] = '1' + pull_helm_chart(registry_path, kube_config, kube_context, + helm_client_location, chart_name) + + # Exporting helm chart after cleanup + chart_export_path = os.path.join( + os.path.expanduser('~'), '.azure', chart_folder_name) + try: + if os.path.isdir(chart_export_path): + shutil.rmtree(chart_export_path) + except: + logger.warning("Unable to cleanup the {} already present on the machine. In case of failure, please cleanup the directory '{}' and try again.".format( + chart_folder_name, chart_export_path)) + + export_helm_chart(registry_path, chart_export_path, kube_config, + kube_context, helm_client_location, chart_name) + + # Returning helm chart path + helm_chart_path = os.path.join(chart_export_path, chart_name) + if chart_folder_name == Pre_Onboarding_Helm_Charts_Folder_Name: + chart_path = helm_chart_path + else: + chart_path = os.getenv('HELMCHART') if os.getenv( + 'HELMCHART') else helm_chart_path + + return chart_path + + +def install_helm_client(): + # Fetch system related info + operating_system = platform.system().lower() + platform.machine() + + # Set helm binary download & install locations + if (operating_system == 'windows'): + download_location_string = f'.azure\\helm\\{HELM_VERSION}\\helm-{HELM_VERSION}-{operating_system}-amd64.zip' + install_location_string = f'.azure\\helm\\{HELM_VERSION}\\{operating_system}-amd64\\helm.exe' + requestUri = f'{HELM_STORAGE_URL}/helm/helm-{HELM_VERSION}-{operating_system}-amd64.zip' + elif (operating_system == 'linux' or operating_system == 'darwin'): + download_location_string = f'.azure/helm/{HELM_VERSION}/helm-{HELM_VERSION}-{operating_system}-amd64.tar.gz' + install_location_string = f'.azure/helm/{HELM_VERSION}/{operating_system}-amd64/helm' + requestUri = f'{HELM_STORAGE_URL}/helm/helm-{HELM_VERSION}-{operating_system}-amd64.tar.gz' + else: + raise Exception( + f'The {operating_system} platform is not currently supported for installing helm client.') + + download_location = os.path.expanduser( + os.path.join('~', download_location_string)) + download_dir = os.path.dirname(download_location) + install_location = os.path.expanduser( + os.path.join('~', install_location_string)) + + # Download compressed halm binary if not already present + if not os.path.isfile(download_location): + # Creating the helm folder if it doesnt exist + if not os.path.exists(download_dir): + try: + os.makedirs(download_dir) + except Exception as e: + raise Exception("Failed to create helm directory." + str(e)) + + # Downloading compressed helm client executable + logger.warning( + "Downloading helm client for first time. This can take few minutes...") + try: + response = urllib.request.urlopen(requestUri) + except Exception as e: + raise Exception("Failed to download helm client." + str(e)) + + responseContent = response.read() + response.close() + + # Creating the compressed helm binaries + try: + with open(download_location, 'wb') as f: + f.write(responseContent) + except Exception as e: + raise Exception("Failed to create helm executable." + str(e)) + + # Extract compressed helm binary + if not os.path.isfile(install_location): + try: + shutil.unpack_archive(download_location, download_dir) + os.chmod(install_location, os.stat( + install_location).st_mode | stat.S_IXUSR) + except Exception as e: + raise Exception("Failed to extract helm executable." + str(e)) + + return install_location + + +def helm_install_release(chart_path, subscription_id, kubernetes_distro, kubernetes_infra, resource_group_name, cluster_name, + location, onboarding_tenant_id, private_key_pem, + no_wait, cloud_name, helm_client_location, onboarding_timeout="600"): + cmd_helm_install = [helm_client_location, "upgrade", "--install", "azure-arc", chart_path, + "--set", "global.subscriptionId={}".format( + subscription_id), + "--set", "global.kubernetesDistro={}".format( + kubernetes_distro), + "--set", "global.kubernetesInfra={}".format( + kubernetes_infra), + "--set", "global.resourceGroupName={}".format( + resource_group_name), + "--set", "global.resourceName={}".format(cluster_name), + "--set", "global.location={}".format(location), + "--set", "global.tenantId={}".format( + onboarding_tenant_id), + "--set", "global.onboardingPrivateKey={}".format( + private_key_pem), + "--set", "systemDefaultValues.spnOnboarding=false", + "--set", "global.azureEnvironment={}".format( + cloud_name), + "--set", "systemDefaultValues.clusterconnect-agent.enabled=true", + "--namespace", "{}".format("azure-arc-release"), + "--create-namespace", + "--output", "json"] + + if not no_wait: + # Change --timeout format for helm client to understand + onboarding_timeout = onboarding_timeout + "s" + cmd_helm_install.extend( + ["--wait", "--timeout", "{}".format(onboarding_timeout)]) + response_helm_install = Popen(cmd_helm_install, stdout=PIPE, stderr=PIPE) + _, error_helm_install = response_helm_install.communicate() + if response_helm_install.returncode != 0: + raise Exception("Unable to install helm release" + error_helm_install.decode("ascii")) + + +def install_agent(): + parser = argparse.ArgumentParser( + description='Install Connected Cluster Agent') + parser.add_argument('--subscriptionId', type=str, required=True) + parser.add_argument('--resourceGroupName', type=str, required=True) + parser.add_argument('--clusterName', type=str, required=True) + parser.add_argument('--location', type=str, required=True) + parser.add_argument('--tenantId', type=str, required=True) + parser.add_argument('--privatePem', type=str, required=True) + + try: + args = parser.parse_args() + except Exception as e: + raise Exception("Failed to parse arguments." + str(e)) + + try: + with open(args.privatePem, "r") as f: + privateKey = f.read() + except Exception as e: + raise Exception("Failed to get private key." + str(e)) + + # Install helm client + helm_client_location = install_helm_client() + + # Retrieving Helm chart OCI Artifact location + registry_path = get_helm_registry("https://westeurope.dp.kubernetesconfiguration.azure.com") + + # Get helm chart path + chart_path = get_chart_path( + registry_path, None, None, helm_client_location) + + helm_install_release(chart_path, + args.subscriptionId, + "generic", + "generic", + args.resourceGroupName, + args.clusterName, + args.location, + args.tenantId, + privateKey, + False, + "AZUREPUBLICCLOUD", + helm_client_location) + + +if __name__ == "__main__": + install_agent() diff --git a/internal/services/arckubernetes/testdata/install_agent.sh.tftpl b/internal/services/arckubernetes/testdata/install_agent.sh.tftpl new file mode 100644 index 000000000000..9bb2cbcbd0bf --- /dev/null +++ b/internal/services/arckubernetes/testdata/install_agent.sh.tftpl @@ -0,0 +1,38 @@ +#!/bin/bash + +# install docker +sudo apt-get update +sudo apt-get -y install ca-certificates curl gnupg lsb-release +sudo mkdir -m 0755 -p /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo chmod a+r /etc/apt/keyrings/docker.gpg +sudo apt-get update +sudo apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + +#Manage Docker as a non-root user +sudo groupadd docker || true +sudo usermod -aG docker $USER +newgrp docker << END + +#install golang and kind +sudo snap install go --classic +sudo go install sigs.k8s.io/kind@latest + +#create a new cluster +export PATH="$HOME/go/bin:$PATH" +export KUBECONFIG="${working_dir}/kind-config" +kind create cluster --name arc-kind --config kind.yaml --kubeconfig kind-config + +#install agent +python3 "${working_dir}/install_agent.py" \ +--subscriptionId "${subscription_id}" \ +--resourceGroupName "${resource_group_name}" \ +--clusterName "${cluster_name}" \ +--location "${location}" \ +--tenantId "${tenant_id}" \ +--privatePem "${working_dir}/private.pem" + +END \ No newline at end of file diff --git a/internal/services/arckubernetes/testdata/kind.yaml b/internal/services/arckubernetes/testdata/kind.yaml new file mode 100644 index 000000000000..fe336a44025d --- /dev/null +++ b/internal/services/arckubernetes/testdata/kind.yaml @@ -0,0 +1,20 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + # port forward 80 on the host to 80 on this node + extraPortMappings: + - containerPort: 81 + hostPort: 81 + listenAddress: "127.0.0.1" + protocol: TCP + - containerPort: 443 + hostPort: 443 + listenAddress: "127.0.0.1" + protocol: TCP diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/README.md new file mode 100644 index 000000000000..6b58fde3c10e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/README.md @@ -0,0 +1,141 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters` Documentation + +The `connectedclusters` SDK allows for interaction with the Azure Resource Manager Service `hybridkubernetes` (API Version `2021-10-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters" +``` + + +### Client Initialization + +```go +client := connectedclusters.NewConnectedClustersClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `ConnectedClustersClient.ConnectedClusterCreate` + +```go +ctx := context.TODO() +id := connectedclusters.NewConnectedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "connectedClusterValue") + +payload := connectedclusters.ConnectedCluster{ + // ... +} + + +if err := client.ConnectedClusterCreateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ConnectedClustersClient.ConnectedClusterDelete` + +```go +ctx := context.TODO() +id := connectedclusters.NewConnectedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "connectedClusterValue") + +if err := client.ConnectedClusterDeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ConnectedClustersClient.ConnectedClusterGet` + +```go +ctx := context.TODO() +id := connectedclusters.NewConnectedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "connectedClusterValue") + +read, err := client.ConnectedClusterGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ConnectedClustersClient.ConnectedClusterListByResourceGroup` + +```go +ctx := context.TODO() +id := connectedclusters.NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + +// alternatively `client.ConnectedClusterListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ConnectedClusterListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ConnectedClustersClient.ConnectedClusterListBySubscription` + +```go +ctx := context.TODO() +id := connectedclusters.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ConnectedClusterListBySubscription(ctx, id)` can be used to do batched pagination +items, err := client.ConnectedClusterListBySubscriptionComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ConnectedClustersClient.ConnectedClusterListClusterUserCredential` + +```go +ctx := context.TODO() +id := connectedclusters.NewConnectedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "connectedClusterValue") + +payload := connectedclusters.ListClusterUserCredentialProperties{ + // ... +} + + +read, err := client.ConnectedClusterListClusterUserCredential(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ConnectedClustersClient.ConnectedClusterUpdate` + +```go +ctx := context.TODO() +id := connectedclusters.NewConnectedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "connectedClusterValue") + +payload := connectedclusters.ConnectedClusterPatch{ + // ... +} + + +read, err := client.ConnectedClusterUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/client.go new file mode 100644 index 000000000000..8927fd807b8f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/client.go @@ -0,0 +1,18 @@ +package connectedclusters + +import "github.com/Azure/go-autorest/autorest" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClustersClient struct { + Client autorest.Client + baseUri string +} + +func NewConnectedClustersClientWithBaseURI(endpoint string) ConnectedClustersClient { + return ConnectedClustersClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/constants.go new file mode 100644 index 000000000000..dbe53c5b621b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/constants.go @@ -0,0 +1,111 @@ +package connectedclusters + +import "strings" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationMethod string + +const ( + AuthenticationMethodAAD AuthenticationMethod = "AAD" + AuthenticationMethodToken AuthenticationMethod = "Token" +) + +func PossibleValuesForAuthenticationMethod() []string { + return []string{ + string(AuthenticationMethodAAD), + string(AuthenticationMethodToken), + } +} + +func parseAuthenticationMethod(input string) (*AuthenticationMethod, error) { + vals := map[string]AuthenticationMethod{ + "aad": AuthenticationMethodAAD, + "token": AuthenticationMethodToken, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationMethod(input) + return &out, nil +} + +type ConnectivityStatus string + +const ( + ConnectivityStatusConnected ConnectivityStatus = "Connected" + ConnectivityStatusConnecting ConnectivityStatus = "Connecting" + ConnectivityStatusExpired ConnectivityStatus = "Expired" + ConnectivityStatusOffline ConnectivityStatus = "Offline" +) + +func PossibleValuesForConnectivityStatus() []string { + return []string{ + string(ConnectivityStatusConnected), + string(ConnectivityStatusConnecting), + string(ConnectivityStatusExpired), + string(ConnectivityStatusOffline), + } +} + +func parseConnectivityStatus(input string) (*ConnectivityStatus, error) { + vals := map[string]ConnectivityStatus{ + "connected": ConnectivityStatusConnected, + "connecting": ConnectivityStatusConnecting, + "expired": ConnectivityStatusExpired, + "offline": ConnectivityStatusOffline, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ConnectivityStatus(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCanceled), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "canceled": ProvisioningStateCanceled, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/id_connectedcluster.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/id_connectedcluster.go new file mode 100644 index 000000000000..c89b6451ad6a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/id_connectedcluster.go @@ -0,0 +1,127 @@ +package connectedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = ConnectedClusterId{} + +// ConnectedClusterId is a struct representing the Resource ID for a Connected Cluster +type ConnectedClusterId struct { + SubscriptionId string + ResourceGroupName string + ConnectedClusterName string +} + +// NewConnectedClusterID returns a new ConnectedClusterId struct +func NewConnectedClusterID(subscriptionId string, resourceGroupName string, connectedClusterName string) ConnectedClusterId { + return ConnectedClusterId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ConnectedClusterName: connectedClusterName, + } +} + +// ParseConnectedClusterID parses 'input' into a ConnectedClusterId +func ParseConnectedClusterID(input string) (*ConnectedClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(ConnectedClusterId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ConnectedClusterId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ConnectedClusterName, ok = parsed.Parsed["connectedClusterName"]; !ok { + return nil, fmt.Errorf("the segment 'connectedClusterName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseConnectedClusterIDInsensitively parses 'input' case-insensitively into a ConnectedClusterId +// note: this method should only be used for API response data and not user input +func ParseConnectedClusterIDInsensitively(input string) (*ConnectedClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(ConnectedClusterId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ConnectedClusterId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ConnectedClusterName, ok = parsed.Parsed["connectedClusterName"]; !ok { + return nil, fmt.Errorf("the segment 'connectedClusterName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateConnectedClusterID checks that 'input' can be parsed as a Connected Cluster ID +func ValidateConnectedClusterID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseConnectedClusterID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Connected Cluster ID +func (id ConnectedClusterId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Kubernetes/connectedClusters/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ConnectedClusterName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Connected Cluster ID +func (id ConnectedClusterId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftKubernetes", "Microsoft.Kubernetes", "Microsoft.Kubernetes"), + resourceids.StaticSegment("staticConnectedClusters", "connectedClusters", "connectedClusters"), + resourceids.UserSpecifiedSegment("connectedClusterName", "connectedClusterValue"), + } +} + +// String returns a human-readable description of this Connected Cluster ID +func (id ConnectedClusterId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Connected Cluster Name: %q", id.ConnectedClusterName), + } + return fmt.Sprintf("Connected Cluster (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclustercreate_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclustercreate_autorest.go new file mode 100644 index 000000000000..46ed03fa0a75 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclustercreate_autorest.go @@ -0,0 +1,79 @@ +package connectedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterCreateOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// ConnectedClusterCreate ... +func (c ConnectedClustersClient) ConnectedClusterCreate(ctx context.Context, id ConnectedClusterId, input ConnectedCluster) (result ConnectedClusterCreateOperationResponse, err error) { + req, err := c.preparerForConnectedClusterCreate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterCreate", nil, "Failure preparing request") + return + } + + result, err = c.senderForConnectedClusterCreate(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterCreate", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// ConnectedClusterCreateThenPoll performs ConnectedClusterCreate then polls until it's completed +func (c ConnectedClustersClient) ConnectedClusterCreateThenPoll(ctx context.Context, id ConnectedClusterId, input ConnectedCluster) error { + result, err := c.ConnectedClusterCreate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ConnectedClusterCreate: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after ConnectedClusterCreate: %+v", err) + } + + return nil +} + +// preparerForConnectedClusterCreate prepares the ConnectedClusterCreate request. +func (c ConnectedClustersClient) preparerForConnectedClusterCreate(ctx context.Context, id ConnectedClusterId, input ConnectedCluster) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForConnectedClusterCreate sends the ConnectedClusterCreate request. The method will close the +// http.Response Body if it receives an error. +func (c ConnectedClustersClient) senderForConnectedClusterCreate(ctx context.Context, req *http.Request) (future ConnectedClusterCreateOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterdelete_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterdelete_autorest.go new file mode 100644 index 000000000000..3c075e92fb78 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterdelete_autorest.go @@ -0,0 +1,78 @@ +package connectedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterDeleteOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// ConnectedClusterDelete ... +func (c ConnectedClustersClient) ConnectedClusterDelete(ctx context.Context, id ConnectedClusterId) (result ConnectedClusterDeleteOperationResponse, err error) { + req, err := c.preparerForConnectedClusterDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterDelete", nil, "Failure preparing request") + return + } + + result, err = c.senderForConnectedClusterDelete(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterDelete", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// ConnectedClusterDeleteThenPoll performs ConnectedClusterDelete then polls until it's completed +func (c ConnectedClustersClient) ConnectedClusterDeleteThenPoll(ctx context.Context, id ConnectedClusterId) error { + result, err := c.ConnectedClusterDelete(ctx, id) + if err != nil { + return fmt.Errorf("performing ConnectedClusterDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after ConnectedClusterDelete: %+v", err) + } + + return nil +} + +// preparerForConnectedClusterDelete prepares the ConnectedClusterDelete request. +func (c ConnectedClustersClient) preparerForConnectedClusterDelete(ctx context.Context, id ConnectedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForConnectedClusterDelete sends the ConnectedClusterDelete request. The method will close the +// http.Response Body if it receives an error. +func (c ConnectedClustersClient) senderForConnectedClusterDelete(ctx context.Context, req *http.Request) (future ConnectedClusterDeleteOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterget_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterget_autorest.go new file mode 100644 index 000000000000..b5150631856c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterget_autorest.go @@ -0,0 +1,68 @@ +package connectedclusters + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterGetOperationResponse struct { + HttpResponse *http.Response + Model *ConnectedCluster +} + +// ConnectedClusterGet ... +func (c ConnectedClustersClient) ConnectedClusterGet(ctx context.Context, id ConnectedClusterId) (result ConnectedClusterGetOperationResponse, err error) { + req, err := c.preparerForConnectedClusterGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterGet", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterGet", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForConnectedClusterGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterGet", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForConnectedClusterGet prepares the ConnectedClusterGet request. +func (c ConnectedClustersClient) preparerForConnectedClusterGet(ctx context.Context, id ConnectedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForConnectedClusterGet handles the response to the ConnectedClusterGet request. The method always +// closes the http.Response Body. +func (c ConnectedClustersClient) responderForConnectedClusterGet(resp *http.Response) (result ConnectedClusterGetOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterlistbyresourcegroup_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterlistbyresourcegroup_autorest.go new file mode 100644 index 000000000000..ff6e9cc79d71 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterlistbyresourcegroup_autorest.go @@ -0,0 +1,187 @@ +package connectedclusters + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + Model *[]ConnectedCluster + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ConnectedClusterListByResourceGroupOperationResponse, error) +} + +type ConnectedClusterListByResourceGroupCompleteResult struct { + Items []ConnectedCluster +} + +func (r ConnectedClusterListByResourceGroupOperationResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ConnectedClusterListByResourceGroupOperationResponse) LoadMore(ctx context.Context) (resp ConnectedClusterListByResourceGroupOperationResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// ConnectedClusterListByResourceGroup ... +func (c ConnectedClustersClient) ConnectedClusterListByResourceGroup(ctx context.Context, id commonids.ResourceGroupId) (resp ConnectedClusterListByResourceGroupOperationResponse, err error) { + req, err := c.preparerForConnectedClusterListByResourceGroup(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListByResourceGroup", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListByResourceGroup", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForConnectedClusterListByResourceGroup(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListByResourceGroup", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// preparerForConnectedClusterListByResourceGroup prepares the ConnectedClusterListByResourceGroup request. +func (c ConnectedClustersClient) preparerForConnectedClusterListByResourceGroup(ctx context.Context, id commonids.ResourceGroupId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.Kubernetes/connectedClusters", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForConnectedClusterListByResourceGroupWithNextLink prepares the ConnectedClusterListByResourceGroup request with the given nextLink token. +func (c ConnectedClustersClient) preparerForConnectedClusterListByResourceGroupWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForConnectedClusterListByResourceGroup handles the response to the ConnectedClusterListByResourceGroup request. The method always +// closes the http.Response Body. +func (c ConnectedClustersClient) responderForConnectedClusterListByResourceGroup(resp *http.Response) (result ConnectedClusterListByResourceGroupOperationResponse, err error) { + type page struct { + Values []ConnectedCluster `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ConnectedClusterListByResourceGroupOperationResponse, err error) { + req, err := c.preparerForConnectedClusterListByResourceGroupWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListByResourceGroup", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListByResourceGroup", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForConnectedClusterListByResourceGroup(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListByResourceGroup", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} + +// ConnectedClusterListByResourceGroupComplete retrieves all of the results into a single object +func (c ConnectedClustersClient) ConnectedClusterListByResourceGroupComplete(ctx context.Context, id commonids.ResourceGroupId) (ConnectedClusterListByResourceGroupCompleteResult, error) { + return c.ConnectedClusterListByResourceGroupCompleteMatchingPredicate(ctx, id, ConnectedClusterOperationPredicate{}) +} + +// ConnectedClusterListByResourceGroupCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c ConnectedClustersClient) ConnectedClusterListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id commonids.ResourceGroupId, predicate ConnectedClusterOperationPredicate) (resp ConnectedClusterListByResourceGroupCompleteResult, err error) { + items := make([]ConnectedCluster, 0) + + page, err := c.ConnectedClusterListByResourceGroup(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ConnectedClusterListByResourceGroupCompleteResult{ + Items: items, + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterlistbysubscription_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterlistbysubscription_autorest.go new file mode 100644 index 000000000000..52786074e08d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterlistbysubscription_autorest.go @@ -0,0 +1,187 @@ +package connectedclusters + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterListBySubscriptionOperationResponse struct { + HttpResponse *http.Response + Model *[]ConnectedCluster + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ConnectedClusterListBySubscriptionOperationResponse, error) +} + +type ConnectedClusterListBySubscriptionCompleteResult struct { + Items []ConnectedCluster +} + +func (r ConnectedClusterListBySubscriptionOperationResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ConnectedClusterListBySubscriptionOperationResponse) LoadMore(ctx context.Context) (resp ConnectedClusterListBySubscriptionOperationResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// ConnectedClusterListBySubscription ... +func (c ConnectedClustersClient) ConnectedClusterListBySubscription(ctx context.Context, id commonids.SubscriptionId) (resp ConnectedClusterListBySubscriptionOperationResponse, err error) { + req, err := c.preparerForConnectedClusterListBySubscription(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListBySubscription", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListBySubscription", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForConnectedClusterListBySubscription(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListBySubscription", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// preparerForConnectedClusterListBySubscription prepares the ConnectedClusterListBySubscription request. +func (c ConnectedClustersClient) preparerForConnectedClusterListBySubscription(ctx context.Context, id commonids.SubscriptionId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.Kubernetes/connectedClusters", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForConnectedClusterListBySubscriptionWithNextLink prepares the ConnectedClusterListBySubscription request with the given nextLink token. +func (c ConnectedClustersClient) preparerForConnectedClusterListBySubscriptionWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForConnectedClusterListBySubscription handles the response to the ConnectedClusterListBySubscription request. The method always +// closes the http.Response Body. +func (c ConnectedClustersClient) responderForConnectedClusterListBySubscription(resp *http.Response) (result ConnectedClusterListBySubscriptionOperationResponse, err error) { + type page struct { + Values []ConnectedCluster `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ConnectedClusterListBySubscriptionOperationResponse, err error) { + req, err := c.preparerForConnectedClusterListBySubscriptionWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListBySubscription", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListBySubscription", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForConnectedClusterListBySubscription(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListBySubscription", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} + +// ConnectedClusterListBySubscriptionComplete retrieves all of the results into a single object +func (c ConnectedClustersClient) ConnectedClusterListBySubscriptionComplete(ctx context.Context, id commonids.SubscriptionId) (ConnectedClusterListBySubscriptionCompleteResult, error) { + return c.ConnectedClusterListBySubscriptionCompleteMatchingPredicate(ctx, id, ConnectedClusterOperationPredicate{}) +} + +// ConnectedClusterListBySubscriptionCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c ConnectedClustersClient) ConnectedClusterListBySubscriptionCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate ConnectedClusterOperationPredicate) (resp ConnectedClusterListBySubscriptionCompleteResult, err error) { + items := make([]ConnectedCluster, 0) + + page, err := c.ConnectedClusterListBySubscription(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ConnectedClusterListBySubscriptionCompleteResult{ + Items: items, + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterlistclusterusercredential_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterlistclusterusercredential_autorest.go new file mode 100644 index 000000000000..ae613bee39f9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterlistclusterusercredential_autorest.go @@ -0,0 +1,70 @@ +package connectedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterListClusterUserCredentialOperationResponse struct { + HttpResponse *http.Response + Model *CredentialResults +} + +// ConnectedClusterListClusterUserCredential ... +func (c ConnectedClustersClient) ConnectedClusterListClusterUserCredential(ctx context.Context, id ConnectedClusterId, input ListClusterUserCredentialProperties) (result ConnectedClusterListClusterUserCredentialOperationResponse, err error) { + req, err := c.preparerForConnectedClusterListClusterUserCredential(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListClusterUserCredential", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListClusterUserCredential", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForConnectedClusterListClusterUserCredential(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterListClusterUserCredential", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForConnectedClusterListClusterUserCredential prepares the ConnectedClusterListClusterUserCredential request. +func (c ConnectedClustersClient) preparerForConnectedClusterListClusterUserCredential(ctx context.Context, id ConnectedClusterId, input ListClusterUserCredentialProperties) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/listClusterUserCredential", id.ID())), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForConnectedClusterListClusterUserCredential handles the response to the ConnectedClusterListClusterUserCredential request. The method always +// closes the http.Response Body. +func (c ConnectedClustersClient) responderForConnectedClusterListClusterUserCredential(resp *http.Response) (result ConnectedClusterListClusterUserCredentialOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterupdate_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterupdate_autorest.go new file mode 100644 index 000000000000..d12988702916 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/method_connectedclusterupdate_autorest.go @@ -0,0 +1,69 @@ +package connectedclusters + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterUpdateOperationResponse struct { + HttpResponse *http.Response + Model *ConnectedCluster +} + +// ConnectedClusterUpdate ... +func (c ConnectedClustersClient) ConnectedClusterUpdate(ctx context.Context, id ConnectedClusterId, input ConnectedClusterPatch) (result ConnectedClusterUpdateOperationResponse, err error) { + req, err := c.preparerForConnectedClusterUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterUpdate", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterUpdate", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForConnectedClusterUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "connectedclusters.ConnectedClustersClient", "ConnectedClusterUpdate", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForConnectedClusterUpdate prepares the ConnectedClusterUpdate request. +func (c ConnectedClustersClient) preparerForConnectedClusterUpdate(ctx context.Context, id ConnectedClusterId, input ConnectedClusterPatch) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForConnectedClusterUpdate handles the response to the ConnectedClusterUpdate request. The method always +// closes the http.Response Body. +func (c ConnectedClustersClient) responderForConnectedClusterUpdate(resp *http.Response) (result ConnectedClusterUpdateOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_connectedcluster.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_connectedcluster.go new file mode 100644 index 000000000000..d1f61f12e1a9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_connectedcluster.go @@ -0,0 +1,20 @@ +package connectedclusters + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedCluster struct { + Id *string `json:"id,omitempty"` + Identity identity.SystemAssigned `json:"identity"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties ConnectedClusterProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_connectedclusterpatch.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_connectedclusterpatch.go new file mode 100644 index 000000000000..560b8bccc64f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_connectedclusterpatch.go @@ -0,0 +1,9 @@ +package connectedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterPatch struct { + Properties *interface{} `json:"properties,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_connectedclusterproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_connectedclusterproperties.go new file mode 100644 index 000000000000..eb7e5c945f1d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_connectedclusterproperties.go @@ -0,0 +1,49 @@ +package connectedclusters + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterProperties struct { + AgentPublicKeyCertificate string `json:"agentPublicKeyCertificate"` + AgentVersion *string `json:"agentVersion,omitempty"` + ConnectivityStatus *ConnectivityStatus `json:"connectivityStatus,omitempty"` + Distribution *string `json:"distribution,omitempty"` + Infrastructure *string `json:"infrastructure,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + LastConnectivityTime *string `json:"lastConnectivityTime,omitempty"` + ManagedIdentityCertificateExpirationTime *string `json:"managedIdentityCertificateExpirationTime,omitempty"` + Offering *string `json:"offering,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + TotalCoreCount *int64 `json:"totalCoreCount,omitempty"` + TotalNodeCount *int64 `json:"totalNodeCount,omitempty"` +} + +func (o *ConnectedClusterProperties) GetLastConnectivityTimeAsTime() (*time.Time, error) { + if o.LastConnectivityTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastConnectivityTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *ConnectedClusterProperties) SetLastConnectivityTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastConnectivityTime = &formatted +} + +func (o *ConnectedClusterProperties) GetManagedIdentityCertificateExpirationTimeAsTime() (*time.Time, error) { + if o.ManagedIdentityCertificateExpirationTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.ManagedIdentityCertificateExpirationTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *ConnectedClusterProperties) SetManagedIdentityCertificateExpirationTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.ManagedIdentityCertificateExpirationTime = &formatted +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_credentialresult.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_credentialresult.go new file mode 100644 index 000000000000..dc74c9fd42e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_credentialresult.go @@ -0,0 +1,9 @@ +package connectedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CredentialResult struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_credentialresults.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_credentialresults.go new file mode 100644 index 000000000000..a64afb76a343 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_credentialresults.go @@ -0,0 +1,9 @@ +package connectedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CredentialResults struct { + HybridConnectionConfig *HybridConnectionConfig `json:"hybridConnectionConfig,omitempty"` + Kubeconfigs *[]CredentialResult `json:"kubeconfigs,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_hybridconnectionconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_hybridconnectionconfig.go new file mode 100644 index 000000000000..2f2c102d61a2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_hybridconnectionconfig.go @@ -0,0 +1,11 @@ +package connectedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type HybridConnectionConfig struct { + ExpirationTime *int64 `json:"expirationTime,omitempty"` + HybridConnectionName *string `json:"hybridConnectionName,omitempty"` + Relay *string `json:"relay,omitempty"` + Token *string `json:"token,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_listclusterusercredentialproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_listclusterusercredentialproperties.go new file mode 100644 index 000000000000..e8e0c57aaab7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/model_listclusterusercredentialproperties.go @@ -0,0 +1,9 @@ +package connectedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListClusterUserCredentialProperties struct { + AuthenticationMethod AuthenticationMethod `json:"authenticationMethod"` + ClientProxy bool `json:"clientProxy"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/predicates.go new file mode 100644 index 000000000000..004e1bd53cbd --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/predicates.go @@ -0,0 +1,32 @@ +package connectedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectedClusterOperationPredicate struct { + Id *string + Location *string + Name *string + Type *string +} + +func (p ConnectedClusterOperationPredicate) Matches(input ConnectedCluster) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/version.go new file mode 100644 index 000000000000..078fe01f8f68 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters/version.go @@ -0,0 +1,12 @@ +package connectedclusters + +import "fmt" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2021-10-01" + +func userAgent() string { + return fmt.Sprintf("hashicorp/go-azure-sdk/connectedclusters/%s", defaultApiVersion) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index cfa1bac39fab..e5881d3a6cd8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -329,6 +329,7 @@ github.com/hashicorp/go-azure-sdk/resource-manager/healthbot/2020-12-08/healthbo github.com/hashicorp/go-azure-sdk/resource-manager/hybridcompute/2022-11-10/machineextensions github.com/hashicorp/go-azure-sdk/resource-manager/hybridcompute/2022-11-10/machines github.com/hashicorp/go-azure-sdk/resource-manager/hybridcompute/2022-11-10/privateendpointconnections +github.com/hashicorp/go-azure-sdk/resource-manager/hybridkubernetes/2021-10-01/connectedclusters github.com/hashicorp/go-azure-sdk/resource-manager/insights/2021-05-01-preview/diagnosticsettings github.com/hashicorp/go-azure-sdk/resource-manager/insights/2021-05-01-preview/diagnosticsettingscategories github.com/hashicorp/go-azure-sdk/resource-manager/insights/2021-08-01/scheduledqueryrules diff --git a/website/allowed-subcategories b/website/allowed-subcategories index 4c6be465d524..3dfbefb74e1f 100644 --- a/website/allowed-subcategories +++ b/website/allowed-subcategories @@ -6,6 +6,7 @@ Analysis Services App Configuration App Service (Web Apps) Application Insights +ArcKubernetes Attestation Authorization Automation diff --git a/website/docs/r/arc_kubernetes_cluster.html.markdown b/website/docs/r/arc_kubernetes_cluster.html.markdown new file mode 100644 index 000000000000..c426dd146f6e --- /dev/null +++ b/website/docs/r/arc_kubernetes_cluster.html.markdown @@ -0,0 +1,108 @@ +--- +subcategory: "ArcKubernetes" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_arc_kubernetes_cluster" +description: |- + Manages an Arc Kubernetes Cluster. +--- + +# azurerm_arc_kubernetes_cluster + +Manages an Arc Kubernetes Cluster. + +-> **Note:** Installing and configuring the Azure Arc Agent on your Kubernetes Cluster to establish connectivity is outside the scope of this document. For more details refer to [Deploy agents to your cluster](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/conceptual-agent-overview#deploy-agents-to-your-cluster) and [Connect an existing Kubernetes Cluster](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/quickstart-connect-cluster?tabs=azure-cli#connect-an-existing-kubernetes-cluster). If you encounter issues connecting your Kubernetes Cluster to Azure Arc, we'd recommend opening a ticket with Microsoft Support. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_arc_kubernetes_cluster" "example" { + name = "example-akcc" + resource_group_name = azurerm_resource_group.example.name + location = "West Europe" + agent_public_key_certificate = filebase64("testdata/public.cer") + + identity { + type = "SystemAssigned" + } + + tags = { + ENV = "Test" + } +} +``` + +-> **Note:** An extensive example on connecting the `azurerm_arc_kubernetes_cluster` to an external kubernetes cluster can be found in [the `./examples/arckubernetes` directory within the GitHub Repository](https://github.com/hashicorp/terraform-provider-azurerm/tree/main/examples/arckubernetes). + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) Specifies the name which should be used for this Arc Kubernetes Cluster. Changing this forces a new Arc Kubernetes Cluster to be created. + +* `resource_group_name` - (Required) Specifies the name of the Resource Group where the Arc Kubernetes Cluster should exist. Changing this forces a new Arc Kubernetes Cluster to be created. + +* `agent_public_key_certificate` - (Required) Specifies the base64-encoded public certificate used by the agent to do the initial handshake to the backend services in Azure. Changing this forces a new Arc Kubernetes Cluster to be created. + +* `identity` - (Required) An `identity` block as defined below. Changing this forces a new Arc Kubernetes Cluster to be created. + +* `location` - (Required) Specifies the Azure Region where the Arc Kubernetes Cluster should exist. Changing this forces a new Arc Kubernetes Cluster to be created. + +* `tags` - (Optional) A mapping of tags which should be assigned to the Arc Kubernetes Cluster. + +--- + +An `identity` block supports the following: + +* `type` - (Required) Specifies the type of Managed Service Identity assigned to this Arc Kubernetes Cluster. At this time the only possible value is `SystemAssigned`. + +## Attributes Reference + +In addition to the Arguments listed above - the following Attributes are exported: + +* `id` - The ID of the Arc Kubernetes Cluster. + +* `agent_version` - Version of the agent running on the cluster resource. + +* `distribution` - The distribution running on this Arc Kubernetes Cluster. + +* `identity` - An `identity` block as defined below. + +* `infrastructure` - The infrastructure on which the Arc Kubernetes Cluster is running on. + +* `kubernetes_version` - The Kubernetes version of the cluster resource. + +* `offering` - The cluster offering. + +* `total_core_count` - Number of CPU cores present in the cluster resource. + +* `total_node_count` - Number of nodes present in the cluster resource. + +--- + +An `identity` block exports the following: + +* `principal_id` - The Principal ID associated with this Managed Service Identity. + +* `tenant_id` - The Tenant ID associated with this Managed Service Identity. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the Arc Kubernetes Cluster. +* `read` - (Defaults to 5 minutes) Used when retrieving the Arc Kubernetes Cluster. +* `update` - (Defaults to 30 minutes) Used when updating the Arc Kubernetes Cluster. +* `delete` - (Defaults to 30 minutes) Used when deleting the Arc Kubernetes Cluster. + +## Import + +Arc Kubernetes Cluster can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_arc_kubernetes_cluster.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resourceGroup1/providers/Microsoft.Kubernetes/connectedClusters/cluster1 +```