diff --git a/internal/services/containers/client/client.go b/internal/services/containers/client/client.go index e8f509528d54..02d30dd0f8f5 100644 --- a/internal/services/containers/client/client.go +++ b/internal/services/containers/client/client.go @@ -3,18 +3,20 @@ package client import ( legacy "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2019-08-01/containerservice" "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2021-08-01-preview/containerregistry" - "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" "github.com/Azure/go-autorest/autorest/azure" "github.com/hashicorp/go-azure-sdk/resource-manager/containerinstance/2021-10-01/containerinstance" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/terraform-provider-azurerm/internal/common" ) type Client struct { - AgentPoolsClient *containerservice.AgentPoolsClient + AgentPoolsClient *agentpools.AgentPoolsClient ContainerRegistryAgentPoolsClient *containerregistry.AgentPoolsClient ContainerInstanceClient *containerinstance.ContainerInstanceClient - KubernetesClustersClient *containerservice.ManagedClustersClient - MaintenanceConfigurationsClient *containerservice.MaintenanceConfigurationsClient + KubernetesClustersClient *managedclusters.ManagedClustersClient + MaintenanceConfigurationsClient *maintenanceconfigurations.MaintenanceConfigurationsClient RegistriesClient *containerregistry.RegistriesClient ReplicationsClient *containerregistry.ReplicationsClient ServicesClient *legacy.ContainerServicesClient @@ -57,13 +59,13 @@ func NewClient(o *common.ClientOptions) *Client { o.ConfigureClient(&containerInstanceClient.Client, o.ResourceManagerAuthorizer) // AKS - kubernetesClustersClient := containerservice.NewManagedClustersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + kubernetesClustersClient := managedclusters.NewManagedClustersClientWithBaseURI(o.ResourceManagerEndpoint) o.ConfigureClient(&kubernetesClustersClient.Client, o.ResourceManagerAuthorizer) - agentPoolsClient := containerservice.NewAgentPoolsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + agentPoolsClient := agentpools.NewAgentPoolsClientWithBaseURI(o.ResourceManagerEndpoint) o.ConfigureClient(&agentPoolsClient.Client, o.ResourceManagerAuthorizer) - maintenanceConfigurationsClient := containerservice.NewMaintenanceConfigurationsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + maintenanceConfigurationsClient := maintenanceconfigurations.NewMaintenanceConfigurationsClientWithBaseURI(o.ResourceManagerEndpoint) o.ConfigureClient(&maintenanceConfigurationsClient.Client, o.ResourceManagerAuthorizer) servicesClient := legacy.NewContainerServicesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) diff --git a/internal/services/containers/kubernetes_addons.go b/internal/services/containers/kubernetes_addons.go index 5789cc6ec84b..4edafb502cee 100644 --- a/internal/services/containers/kubernetes_addons.go +++ b/internal/services/containers/kubernetes_addons.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/go-azure-sdk/resource-manager/operationalinsights/2020-08-01/workspaces" commonValidate "github.com/hashicorp/terraform-provider-azurerm/helpers/validate" containerValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/validate" @@ -241,63 +241,63 @@ func schemaKubernetesAddOns() map[string]*pluginsdk.Schema { return out } -func expandKubernetesAddOns(d *pluginsdk.ResourceData, input map[string]interface{}, env azure.Environment) (*map[string]*containerservice.ManagedClusterAddonProfile, error) { - disabled := containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(false), +func expandKubernetesAddOns(d *pluginsdk.ResourceData, input map[string]interface{}, env azure.Environment) (*map[string]managedclusters.ManagedClusterAddonProfile, error) { + disabled := managedclusters.ManagedClusterAddonProfile{ + Enabled: false, } - addonProfiles := map[string]*containerservice.ManagedClusterAddonProfile{} + addonProfiles := map[string]managedclusters.ManagedClusterAddonProfile{} if d.HasChange("http_application_routing_enabled") { - addonProfiles[httpApplicationRoutingKey] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(input["http_application_routing_enabled"].(bool)), + addonProfiles[httpApplicationRoutingKey] = managedclusters.ManagedClusterAddonProfile{ + Enabled: input["http_application_routing_enabled"].(bool), } } omsAgent := input["oms_agent"].([]interface{}) if len(omsAgent) > 0 && omsAgent[0] != nil { value := omsAgent[0].(map[string]interface{}) - config := make(map[string]*string) + config := make(map[string]string) if workspaceID, ok := value["log_analytics_workspace_id"]; ok && workspaceID != "" { lawid, err := workspaces.ParseWorkspaceID(workspaceID.(string)) if err != nil { return nil, fmt.Errorf("parsing Log Analytics Workspace ID: %+v", err) } - config["logAnalyticsWorkspaceResourceID"] = utils.String(lawid.ID()) + config["logAnalyticsWorkspaceResourceID"] = lawid.ID() } - addonProfiles[omsAgentKey] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(true), - Config: config, + addonProfiles[omsAgentKey] = managedclusters.ManagedClusterAddonProfile{ + Enabled: true, + Config: &config, } } else if len(omsAgent) == 0 && d.HasChange("oms_agent") { - addonProfiles[omsAgentKey] = &disabled + addonProfiles[omsAgentKey] = disabled } aciConnector := input["aci_connector_linux"].([]interface{}) if len(aciConnector) > 0 && aciConnector[0] != nil { value := aciConnector[0].(map[string]interface{}) - config := make(map[string]*string) + config := make(map[string]string) if subnetName, ok := value["subnet_name"]; ok && subnetName != "" { - config["SubnetName"] = utils.String(subnetName.(string)) + config["SubnetName"] = subnetName.(string) } - addonProfiles[aciConnectorKey] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(true), - Config: config, + addonProfiles[aciConnectorKey] = managedclusters.ManagedClusterAddonProfile{ + Enabled: true, + Config: &config, } } else if len(aciConnector) == 0 && d.HasChange("aci_connector_linux") { - addonProfiles[aciConnectorKey] = &disabled + addonProfiles[aciConnectorKey] = disabled } if ok := d.HasChange("azure_policy_enabled"); ok { v := input["azure_policy_enabled"].(bool) - props := &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(v), - Config: map[string]*string{ - "version": utils.String("v2"), - }, + props := managedclusters.ManagedClusterAddonProfile{ + Enabled: v, + Config: utils.ToPtr(map[string]string{ + "version": "v2", + }), } addonProfiles[azurePolicyKey] = props } @@ -305,35 +305,35 @@ func expandKubernetesAddOns(d *pluginsdk.ResourceData, input map[string]interfac ingressApplicationGateway := input["ingress_application_gateway"].([]interface{}) if len(ingressApplicationGateway) > 0 && ingressApplicationGateway[0] != nil { value := ingressApplicationGateway[0].(map[string]interface{}) - config := make(map[string]*string) + config := make(map[string]string) if gatewayId, ok := value["gateway_id"]; ok && gatewayId != "" { - config["applicationGatewayId"] = utils.String(gatewayId.(string)) + config["applicationGatewayId"] = gatewayId.(string) } if gatewayName, ok := value["gateway_name"]; ok && gatewayName != "" { - config["applicationGatewayName"] = utils.String(gatewayName.(string)) + config["applicationGatewayName"] = gatewayName.(string) } if subnetCIDR, ok := value["subnet_cidr"]; ok && subnetCIDR != "" { - config["subnetCIDR"] = utils.String(subnetCIDR.(string)) + config["subnetCIDR"] = subnetCIDR.(string) } if subnetId, ok := value["subnet_id"]; ok && subnetId != "" { - config["subnetId"] = utils.String(subnetId.(string)) + config["subnetId"] = subnetId.(string) } - addonProfiles[ingressApplicationGatewayKey] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(true), - Config: config, + addonProfiles[ingressApplicationGatewayKey] = managedclusters.ManagedClusterAddonProfile{ + Enabled: true, + Config: &config, } } else if len(ingressApplicationGateway) == 0 && d.HasChange("ingress_application_gateway") { - addonProfiles[ingressApplicationGatewayKey] = &disabled + addonProfiles[ingressApplicationGatewayKey] = disabled } if ok := d.HasChange("open_service_mesh_enabled"); ok { - addonProfiles[openServiceMeshKey] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(input["open_service_mesh_enabled"].(bool)), + addonProfiles[openServiceMeshKey] = managedclusters.ManagedClusterAddonProfile{ + Enabled: input["open_service_mesh_enabled"].(bool), Config: nil, } } @@ -341,28 +341,28 @@ func expandKubernetesAddOns(d *pluginsdk.ResourceData, input map[string]interfac azureKeyVaultSecretsProvider := input["key_vault_secrets_provider"].([]interface{}) if len(azureKeyVaultSecretsProvider) > 0 && azureKeyVaultSecretsProvider[0] != nil { value := azureKeyVaultSecretsProvider[0].(map[string]interface{}) - config := make(map[string]*string) + config := make(map[string]string) enableSecretRotation := fmt.Sprintf("%t", value["secret_rotation_enabled"].(bool)) - config["enableSecretRotation"] = utils.String(enableSecretRotation) - config["rotationPollInterval"] = utils.String(value["secret_rotation_interval"].(string)) + config["enableSecretRotation"] = enableSecretRotation + config["rotationPollInterval"] = value["secret_rotation_interval"].(string) - addonProfiles[azureKeyvaultSecretsProviderKey] = &containerservice.ManagedClusterAddonProfile{ - Enabled: utils.Bool(true), - Config: config, + addonProfiles[azureKeyvaultSecretsProviderKey] = managedclusters.ManagedClusterAddonProfile{ + Enabled: true, + Config: &config, } } else if len(azureKeyVaultSecretsProvider) == 0 && d.HasChange("key_vault_secrets_provider") { - addonProfiles[azureKeyvaultSecretsProviderKey] = &disabled + addonProfiles[azureKeyvaultSecretsProviderKey] = disabled } return filterUnsupportedKubernetesAddOns(addonProfiles, env) } -func filterUnsupportedKubernetesAddOns(input map[string]*containerservice.ManagedClusterAddonProfile, env azure.Environment) (*map[string]*containerservice.ManagedClusterAddonProfile, error) { - filter := func(input map[string]*containerservice.ManagedClusterAddonProfile, key string) (*map[string]*containerservice.ManagedClusterAddonProfile, error) { +func filterUnsupportedKubernetesAddOns(input map[string]managedclusters.ManagedClusterAddonProfile, env azure.Environment) (*map[string]managedclusters.ManagedClusterAddonProfile, error) { + filter := func(input map[string]managedclusters.ManagedClusterAddonProfile, key string) (map[string]managedclusters.ManagedClusterAddonProfile, error) { output := input if v, ok := output[key]; ok { - if v.Enabled != nil && *v.Enabled { + if v.Enabled { return nil, fmt.Errorf("The addon %q is not supported for a Kubernetes Cluster located in %q", key, env.Name) } @@ -370,7 +370,7 @@ func filterUnsupportedKubernetesAddOns(input map[string]*containerservice.Manage delete(output, key) } - return &output, nil + return output, nil } output := input @@ -381,135 +381,130 @@ func filterUnsupportedKubernetesAddOns(input map[string]*containerservice.Manage return nil, err } - output = *out + output = out } } return &output, nil } -func flattenKubernetesAddOns(profile map[string]*containerservice.ManagedClusterAddonProfile) map[string]interface{} { +func flattenKubernetesAddOns(profile map[string]managedclusters.ManagedClusterAddonProfile) map[string]interface{} { aciConnectors := make([]interface{}, 0) - if aciConnector := kubernetesAddonProfileLocate(profile, aciConnectorKey); aciConnector != nil { - if enabled := aciConnector.Enabled; enabled != nil && *enabled { - subnetName := "" - if v := aciConnector.Config["SubnetName"]; v != nil { - subnetName = *v - } - - aciConnectors = append(aciConnectors, map[string]interface{}{ - "subnet_name": subnetName, - }) + aciConnector := kubernetesAddonProfileLocate(profile, aciConnectorKey) + if enabled := aciConnector.Enabled; enabled { + subnetName := "" + if v := aciConnector.Config; v != nil && (*v)["SubnetName"] != "" { + subnetName = (*v)["SubnetName"] } + aciConnectors = append(aciConnectors, map[string]interface{}{ + "subnet_name": subnetName, + }) } azurePolicyEnabled := false - if azurePolicy := kubernetesAddonProfileLocate(profile, azurePolicyKey); azurePolicy != nil { - if enabledVal := azurePolicy.Enabled; enabledVal != nil { - azurePolicyEnabled = *enabledVal - } + azurePolicy := kubernetesAddonProfileLocate(profile, azurePolicyKey) + if enabledVal := azurePolicy.Enabled; enabledVal { + azurePolicyEnabled = enabledVal } httpApplicationRoutingEnabled := false httpApplicationRoutingZone := "" - if httpApplicationRouting := kubernetesAddonProfileLocate(profile, httpApplicationRoutingKey); httpApplicationRouting != nil { - if enabledVal := httpApplicationRouting.Enabled; enabledVal != nil { - httpApplicationRoutingEnabled = *enabledVal - } + httpApplicationRouting := kubernetesAddonProfileLocate(profile, httpApplicationRoutingKey) + if enabledVal := httpApplicationRouting.Enabled; enabledVal { + httpApplicationRoutingEnabled = enabledVal + } - if v := kubernetesAddonProfilelocateInConfig(httpApplicationRouting.Config, "HTTPApplicationRoutingZoneName"); v != nil { - httpApplicationRoutingZone = *v - } + if v := kubernetesAddonProfilelocateInConfig(httpApplicationRouting.Config, "HTTPApplicationRoutingZoneName"); v != "" { + httpApplicationRoutingZone = v } omsAgents := make([]interface{}, 0) - if omsAgent := kubernetesAddonProfileLocate(profile, omsAgentKey); omsAgent != nil { - if enabled := omsAgent.Enabled; enabled != nil && *enabled { - workspaceID := "" - if v := kubernetesAddonProfilelocateInConfig(omsAgent.Config, "logAnalyticsWorkspaceResourceID"); v != nil { - if lawid, err := workspaces.ParseWorkspaceID(*v); err == nil { - workspaceID = lawid.ID() - } + omsAgent := kubernetesAddonProfileLocate(profile, omsAgentKey) + if enabled := omsAgent.Enabled; enabled { + workspaceID := "" + if v := kubernetesAddonProfilelocateInConfig(omsAgent.Config, "logAnalyticsWorkspaceResourceID"); v != "" { + if lawid, err := workspaces.ParseWorkspaceID(v); err == nil { + workspaceID = lawid.ID() } + } - omsAgentIdentity := flattenKubernetesClusterAddOnIdentityProfile(omsAgent.Identity) + omsAgentIdentity := flattenKubernetesClusterAddOnIdentityProfile(omsAgent.Identity) + + omsAgents = append(omsAgents, map[string]interface{}{ + "log_analytics_workspace_id": workspaceID, + "oms_agent_identity": omsAgentIdentity, + }) - omsAgents = append(omsAgents, map[string]interface{}{ - "log_analytics_workspace_id": workspaceID, - "oms_agent_identity": omsAgentIdentity, - }) - } } ingressApplicationGateways := make([]interface{}, 0) - if ingressApplicationGateway := kubernetesAddonProfileLocate(profile, ingressApplicationGatewayKey); ingressApplicationGateway != nil { - if enabled := ingressApplicationGateway.Enabled; enabled != nil && *enabled { - gatewayId := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "applicationGatewayId"); v != nil { - gatewayId = *v - } - - gatewayName := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "applicationGatewayName"); v != nil { - gatewayName = *v - } + ingressApplicationGateway := kubernetesAddonProfileLocate(profile, ingressApplicationGatewayKey) + if enabled := ingressApplicationGateway.Enabled; enabled { + gatewayId := "" - effectiveGatewayId := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "effectiveApplicationGatewayId"); v != nil { - effectiveGatewayId = *v - } + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "applicationGatewayId"); v != "" { + gatewayId = v + } - subnetCIDR := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "subnetCIDR"); v != nil { - subnetCIDR = *v - } + gatewayName := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "applicationGatewayName"); v != "" { + gatewayName = v + } - subnetId := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "subnetId"); v != nil { - subnetId = *v - } + effectiveGatewayId := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "effectiveApplicationGatewayId"); v != "" { + effectiveGatewayId = v + } - ingressApplicationGatewayIdentity := flattenKubernetesClusterAddOnIdentityProfile(ingressApplicationGateway.Identity) + subnetCIDR := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "subnetCIDR"); v != "" { + subnetCIDR = v + } - ingressApplicationGateways = append(ingressApplicationGateways, map[string]interface{}{ - "gateway_id": gatewayId, - "gateway_name": gatewayName, - "effective_gateway_id": effectiveGatewayId, - "subnet_cidr": subnetCIDR, - "subnet_id": subnetId, - "ingress_application_gateway_identity": ingressApplicationGatewayIdentity, - }) + subnetId := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "subnetId"); v != "" { + subnetId = v } + + ingressApplicationGatewayIdentity := flattenKubernetesClusterAddOnIdentityProfile(ingressApplicationGateway.Identity) + + ingressApplicationGateways = append(ingressApplicationGateways, map[string]interface{}{ + "gateway_id": gatewayId, + "gateway_name": gatewayName, + "effective_gateway_id": effectiveGatewayId, + "subnet_cidr": subnetCIDR, + "subnet_id": subnetId, + "ingress_application_gateway_identity": ingressApplicationGatewayIdentity, + }) } openServiceMeshEnabled := false - if openServiceMesh := kubernetesAddonProfileLocate(profile, openServiceMeshKey); openServiceMesh != nil { - if enabledVal := openServiceMesh.Enabled; enabledVal != nil { - openServiceMeshEnabled = *enabledVal - } + openServiceMesh := kubernetesAddonProfileLocate(profile, openServiceMeshKey) + if enabledVal := openServiceMesh.Enabled; enabledVal { + openServiceMeshEnabled = enabledVal } azureKeyVaultSecretsProviders := make([]interface{}, 0) - if azureKeyVaultSecretsProvider := kubernetesAddonProfileLocate(profile, azureKeyvaultSecretsProviderKey); azureKeyVaultSecretsProvider != nil { - if enabled := azureKeyVaultSecretsProvider.Enabled; enabled != nil && *enabled { - enableSecretRotation := false - if v := kubernetesAddonProfilelocateInConfig(azureKeyVaultSecretsProvider.Config, "enableSecretRotation"); v != nil && *v != "false" { - enableSecretRotation = true - } + azureKeyVaultSecretsProvider := kubernetesAddonProfileLocate(profile, azureKeyvaultSecretsProviderKey) + if enabled := azureKeyVaultSecretsProvider.Enabled; enabled { + enableSecretRotation := false + rotationPollInterval := "" - rotationPollInterval := "" - if v := kubernetesAddonProfilelocateInConfig(azureKeyVaultSecretsProvider.Config, "rotationPollInterval"); v != nil { - rotationPollInterval = *v - } - - azureKeyvaultSecretsProviderIdentity := flattenKubernetesClusterAddOnIdentityProfile(azureKeyVaultSecretsProvider.Identity) + if v := kubernetesAddonProfilelocateInConfig(azureKeyVaultSecretsProvider.Config, "enableSecretRotation"); v != "false" { + enableSecretRotation = true + } - azureKeyVaultSecretsProviders = append(azureKeyVaultSecretsProviders, map[string]interface{}{ - "secret_rotation_enabled": enableSecretRotation, - "secret_rotation_interval": rotationPollInterval, - "secret_identity": azureKeyvaultSecretsProviderIdentity, - }) + if v := kubernetesAddonProfilelocateInConfig(azureKeyVaultSecretsProvider.Config, "rotationPollInterval"); v != "" { + rotationPollInterval = v } + + azureKeyvaultSecretsProviderIdentity := flattenKubernetesClusterAddOnIdentityProfile(azureKeyVaultSecretsProvider.Identity) + + azureKeyVaultSecretsProviders = append(azureKeyVaultSecretsProviders, map[string]interface{}{ + "secret_rotation_enabled": enableSecretRotation, + "secret_rotation_interval": rotationPollInterval, + "secret_identity": azureKeyvaultSecretsProviderIdentity, + }) } return map[string]interface{}{ @@ -524,24 +519,24 @@ func flattenKubernetesAddOns(profile map[string]*containerservice.ManagedCluster } } -func flattenKubernetesClusterAddOnIdentityProfile(profile *containerservice.ManagedClusterAddonProfileIdentity) []interface{} { +func flattenKubernetesClusterAddOnIdentityProfile(profile *managedclusters.UserAssignedIdentity) []interface{} { if profile == nil { return []interface{}{} } identity := make([]interface{}, 0) clientID := "" - if clientid := profile.ClientID; clientid != nil { + if clientid := profile.ClientId; clientid != nil { clientID = *clientid } objectID := "" - if objectid := profile.ObjectID; objectid != nil { + if objectid := profile.ObjectId; objectid != nil { objectID = *objectid } userAssignedIdentityID := "" - if resourceid := profile.ResourceID; resourceid != nil { + if resourceid := profile.ResourceId; resourceid != nil { userAssignedIdentityID = *resourceid } @@ -568,25 +563,29 @@ func collectKubernetesAddons(d *pluginsdk.ResourceData) map[string]interface{} { // when the Kubernetes Cluster is updated in the Portal - Azure updates the casing on the keys // meaning what's submitted could be different to what's returned.. -func kubernetesAddonProfileLocate(profile map[string]*containerservice.ManagedClusterAddonProfile, key string) *containerservice.ManagedClusterAddonProfile { +func kubernetesAddonProfileLocate(profile map[string]managedclusters.ManagedClusterAddonProfile, key string) managedclusters.ManagedClusterAddonProfile { for k, v := range profile { if strings.EqualFold(k, key) { return v } } - return nil + return managedclusters.ManagedClusterAddonProfile{} } // when the Kubernetes Cluster is updated in the Portal - Azure updates the casing on the keys // meaning what's submitted could be different to what's returned.. // Related issue: https://github.com/Azure/azure-rest-api-specs/issues/10716 -func kubernetesAddonProfilelocateInConfig(config map[string]*string, key string) *string { - for k, v := range config { +func kubernetesAddonProfilelocateInConfig(config *map[string]string, key string) string { + if config == nil { + return "" + } + + for k, v := range *config { if strings.EqualFold(k, key) { return v } } - return nil + return "" } diff --git a/internal/services/containers/kubernetes_cluster_data_source.go b/internal/services/containers/kubernetes_cluster_data_source.go index 15fbef21b025..9370a396038c 100644 --- a/internal/services/containers/kubernetes_cluster_data_source.go +++ b/internal/services/containers/kubernetes_cluster_data_source.go @@ -5,17 +5,17 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/go-azure-helpers/resourcemanager/tags" "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/go-azure-sdk/resource-manager/operationalinsights/2020-08-01/workspaces" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/kubernetes" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/parse" - "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" "github.com/hashicorp/terraform-provider-azurerm/utils" @@ -626,167 +626,184 @@ func dataSourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{} ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id := parse.NewClusterID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) - - resp, err := client.Get(ctx, id.ResourceGroup, id.ManagedClusterName) + id := managedclusters.NewManagedClusterID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) + resp, err := client.Get(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return fmt.Errorf("%s was not found", id) } return fmt.Errorf("retrieving %s: %+v", id, err) } - profile, err := client.GetAccessProfile(ctx, id.ResourceGroup, id.ManagedClusterName, "clusterUser") + profileId := managedclusters.NewAccessProfileID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string), "clusterUser") + profile, err := client.GetAccessProfile(ctx, profileId) if err != nil { return fmt.Errorf("retrieving Access Profile for %s: %+v", id, err) } + if profile.Model == nil { + return fmt.Errorf("retrieving Access Profile for %s: payload is empty", id) + } + profileModel := profile.Model d.SetId(id.ID()) + if model := resp.Model; model != nil { + d.Set("name", model.Name) + d.Set("resource_group_name", id.ResourceGroupName) + if location := model.Location; location != "" { + d.Set("location", azure.NormalizeLocation(location)) + } - d.Set("name", resp.Name) - d.Set("resource_group_name", id.ResourceGroup) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } + if props := model.Properties; props != nil { + d.Set("dns_prefix", props.DnsPrefix) + d.Set("fqdn", props.Fqdn) + d.Set("disk_encryption_set_id", props.DiskEncryptionSetID) + d.Set("private_fqdn", props.PrivateFQDN) + d.Set("kubernetes_version", props.KubernetesVersion) + d.Set("node_resource_group", props.NodeResourceGroup) + + if accessProfile := props.ApiServerAccessProfile; accessProfile != nil { + apiServerAuthorizedIPRanges := utils.FlattenStringSlice(accessProfile.AuthorizedIPRanges) + if err := d.Set("api_server_authorized_ip_ranges", apiServerAuthorizedIPRanges); err != nil { + return fmt.Errorf("setting `api_server_authorized_ip_ranges`: %+v", err) + } - if props := resp.ManagedClusterProperties; props != nil { - d.Set("dns_prefix", props.DNSPrefix) - d.Set("fqdn", props.Fqdn) - d.Set("disk_encryption_set_id", props.DiskEncryptionSetID) - d.Set("private_fqdn", props.PrivateFQDN) - d.Set("kubernetes_version", props.KubernetesVersion) - d.Set("node_resource_group", props.NodeResourceGroup) - - if accessProfile := props.APIServerAccessProfile; accessProfile != nil { - apiServerAuthorizedIPRanges := utils.FlattenStringSlice(accessProfile.AuthorizedIPRanges) - if err := d.Set("api_server_authorized_ip_ranges", apiServerAuthorizedIPRanges); err != nil { - return fmt.Errorf("setting `api_server_authorized_ip_ranges`: %+v", err) + d.Set("private_cluster_enabled", accessProfile.EnablePrivateCluster) } - d.Set("private_cluster_enabled", accessProfile.EnablePrivateCluster) - } + if addonProfiles := props.AddonProfiles; addonProfiles != nil { + addOns := flattenKubernetesClusterDataSourceAddOns(*addonProfiles) + d.Set("aci_connector_linux", addOns["aci_connector_linux"]) + d.Set("azure_policy_enabled", addOns["azure_policy_enabled"].(bool)) + d.Set("http_application_routing_enabled", addOns["http_application_routing_enabled"].(bool)) + d.Set("http_application_routing_zone_name", addOns["http_application_routing_zone_name"]) + d.Set("oms_agent", addOns["oms_agent"]) + d.Set("ingress_application_gateway", addOns["ingress_application_gateway"]) + d.Set("open_service_mesh_enabled", addOns["open_service_mesh_enabled"].(bool)) + d.Set("key_vault_secrets_provider", addOns["key_vault_secrets_provider"]) + } - addOns := flattenKubernetesClusterDataSourceAddOns(props.AddonProfiles) - d.Set("aci_connector_linux", addOns["aci_connector_linux"]) - d.Set("azure_policy_enabled", addOns["azure_policy_enabled"].(bool)) - d.Set("http_application_routing_enabled", addOns["http_application_routing_enabled"].(bool)) - d.Set("http_application_routing_zone_name", addOns["http_application_routing_zone_name"]) - d.Set("oms_agent", addOns["oms_agent"]) - d.Set("ingress_application_gateway", addOns["ingress_application_gateway"]) - d.Set("open_service_mesh_enabled", addOns["open_service_mesh_enabled"].(bool)) - d.Set("key_vault_secrets_provider", addOns["key_vault_secrets_provider"]) - - agentPoolProfiles := flattenKubernetesClusterDataSourceAgentPoolProfiles(props.AgentPoolProfiles) - if err := d.Set("agent_pool_profile", agentPoolProfiles); err != nil { - return fmt.Errorf("setting `agent_pool_profile`: %+v", err) - } + agentPoolProfiles := flattenKubernetesClusterDataSourceAgentPoolProfiles(props.AgentPoolProfiles) + if err := d.Set("agent_pool_profile", agentPoolProfiles); err != nil { + return fmt.Errorf("setting `agent_pool_profile`: %+v", err) + } - kubeletIdentity, err := flattenKubernetesClusterDataSourceIdentityProfile(props.IdentityProfile) - if err != nil { - return err - } - if err := d.Set("kubelet_identity", kubeletIdentity); err != nil { - return fmt.Errorf("setting `kubelet_identity`: %+v", err) - } + kubeletIdentity, err := flattenKubernetesClusterDataSourceIdentityProfile(props.IdentityProfile) + if err != nil { + return err + } + if err := d.Set("kubelet_identity", kubeletIdentity); err != nil { + return fmt.Errorf("setting `kubelet_identity`: %+v", err) + } - linuxProfile := flattenKubernetesClusterDataSourceLinuxProfile(props.LinuxProfile) - if err := d.Set("linux_profile", linuxProfile); err != nil { - return fmt.Errorf("setting `linux_profile`: %+v", err) - } + linuxProfile := flattenKubernetesClusterDataSourceLinuxProfile(props.LinuxProfile) + if err := d.Set("linux_profile", linuxProfile); err != nil { + return fmt.Errorf("setting `linux_profile`: %+v", err) + } - windowsProfile := flattenKubernetesClusterDataSourceWindowsProfile(props.WindowsProfile) - if err := d.Set("windows_profile", windowsProfile); err != nil { - return fmt.Errorf("setting `windows_profile`: %+v", err) - } + windowsProfile := flattenKubernetesClusterDataSourceWindowsProfile(props.WindowsProfile) + if err := d.Set("windows_profile", windowsProfile); err != nil { + return fmt.Errorf("setting `windows_profile`: %+v", err) + } - networkProfile := flattenKubernetesClusterDataSourceNetworkProfile(props.NetworkProfile) - if err := d.Set("network_profile", networkProfile); err != nil { - return fmt.Errorf("setting `network_profile`: %+v", err) - } + networkProfile := flattenKubernetesClusterDataSourceNetworkProfile(props.NetworkProfile) + if err := d.Set("network_profile", networkProfile); err != nil { + return fmt.Errorf("setting `network_profile`: %+v", err) + } - oidcIssuerEnabled := false - oidcIssuerUrl := "" - if props.OidcIssuerProfile != nil { - if props.OidcIssuerProfile.Enabled != nil { - oidcIssuerEnabled = *props.OidcIssuerProfile.Enabled + oidcIssuerEnabled := false + oidcIssuerUrl := "" + if props.OidcIssuerProfile != nil { + if props.OidcIssuerProfile.Enabled != nil { + oidcIssuerEnabled = *props.OidcIssuerProfile.Enabled + } + if props.OidcIssuerProfile.IssuerURL != nil { + oidcIssuerUrl = *props.OidcIssuerProfile.IssuerURL + } } - if props.OidcIssuerProfile.IssuerURL != nil { - oidcIssuerUrl = *props.OidcIssuerProfile.IssuerURL + + if err := d.Set("oidc_issuer_enabled", oidcIssuerEnabled); err != nil { + return fmt.Errorf("setting `oidc_issuer_enabled`: %+v", err) + } + if err := d.Set("oidc_issuer_url", oidcIssuerUrl); err != nil { + return fmt.Errorf("setting `oidc_issuer_url`: %+v", err) } - } - if err := d.Set("oidc_issuer_enabled", oidcIssuerEnabled); err != nil { - return fmt.Errorf("setting `oidc_issuer_enabled`: %+v", err) - } - if err := d.Set("oidc_issuer_url", oidcIssuerUrl); err != nil { - return fmt.Errorf("setting `oidc_issuer_url`: %+v", err) - } + rbacEnabled := true + if props.EnableRBAC != nil { + rbacEnabled = *props.EnableRBAC + } + d.Set("role_based_access_control_enabled", rbacEnabled) - rbacEnabled := true - if props.EnableRBAC != nil { - rbacEnabled = *props.EnableRBAC - } - d.Set("role_based_access_control_enabled", rbacEnabled) + microsoftDefender := flattenKubernetesClusterDataSourceMicrosoftDefender(props.SecurityProfile) + if err := d.Set("microsoft_defender", microsoftDefender); err != nil { + return fmt.Errorf("setting `microsoft_defender`: %+v", err) + } - microsoftDefender := flattenKubernetesClusterDataSourceMicrosoftDefender(props.SecurityProfile) - if err := d.Set("microsoft_defender", microsoftDefender); err != nil { - return fmt.Errorf("setting `microsoft_defender`: %+v", err) - } + aadRbac := flattenKubernetesClusterDataSourceAzureActiveDirectoryRoleBasedAccessControl(props) + if err := d.Set("azure_active_directory_role_based_access_control", aadRbac); err != nil { + return fmt.Errorf("setting `azure_active_directory_role_based_access_control`: %+v", err) + } - aadRbac := flattenKubernetesClusterDataSourceAzureActiveDirectoryRoleBasedAccessControl(props) - if err := d.Set("azure_active_directory_role_based_access_control", aadRbac); err != nil { - return fmt.Errorf("setting `azure_active_directory_role_based_access_control`: %+v", err) - } + servicePrincipal := flattenKubernetesClusterDataSourceServicePrincipalProfile(props.ServicePrincipalProfile) + if err := d.Set("service_principal", servicePrincipal); err != nil { + return fmt.Errorf("setting `service_principal`: %+v", err) + } - servicePrincipal := flattenKubernetesClusterDataSourceServicePrincipalProfile(props.ServicePrincipalProfile) - if err := d.Set("service_principal", servicePrincipal); err != nil { - return fmt.Errorf("setting `service_principal`: %+v", err) - } + // adminProfile is only available for RBAC enabled clusters with AAD and without local accounts disabled + if props.AadProfile != nil && (props.DisableLocalAccounts == nil || !*props.DisableLocalAccounts) { + profileId := managedclusters.NewAccessProfileID(subscriptionId, id.ResourceGroupName, id.ResourceName, "clusterAdmin") + adminProfile, err := client.GetAccessProfile(ctx, profileId) + if err != nil { + return fmt.Errorf("retrieving Admin Access Profile for %s: %+v", id, err) + } - // adminProfile is only available for RBAC enabled clusters with AAD and without local accounts disabled - if props.AadProfile != nil && (props.DisableLocalAccounts == nil || !*props.DisableLocalAccounts) { - adminProfile, err := client.GetAccessProfile(ctx, id.ResourceGroup, id.ManagedClusterName, "clusterAdmin") - if err != nil { - return fmt.Errorf("retrieving Admin Access Profile for %s: %+v", id, err) - } + if adminProfileModel := adminProfile.Model; adminProfileModel != nil { - adminKubeConfigRaw, adminKubeConfig := flattenKubernetesClusterAccessProfile(adminProfile) - d.Set("kube_admin_config_raw", adminKubeConfigRaw) - if err := d.Set("kube_admin_config", adminKubeConfig); err != nil { - return fmt.Errorf("setting `kube_admin_config`: %+v", err) + adminKubeConfigRaw, adminKubeConfig := flattenKubernetesClusterAccessProfile(*adminProfileModel) + d.Set("kube_admin_config_raw", adminKubeConfigRaw) + if err := d.Set("kube_admin_config", adminKubeConfig); err != nil { + return fmt.Errorf("setting `kube_admin_config`: %+v", err) + } + } + } else { + d.Set("kube_admin_config_raw", "") + d.Set("kube_admin_config", []interface{}{}) } - } else { - d.Set("kube_admin_config_raw", "") - d.Set("kube_admin_config", []interface{}{}) } - } - identity, err := flattenClusterDataSourceIdentity(resp.Identity) - if err != nil { - return fmt.Errorf("setting `identity`: %+v", err) - } + identity, err := flattenClusterDataSourceIdentity(model.Identity) + if err != nil { + return fmt.Errorf("setting `identity`: %+v", err) + } - if err := d.Set("identity", identity); err != nil { - return fmt.Errorf("setting `identity`: %+v", err) - } + if err := d.Set("identity", identity); err != nil { + return fmt.Errorf("setting `identity`: %+v", err) + } - kubeConfigRaw, kubeConfig := flattenKubernetesClusterDataSourceAccessProfile(profile) - d.Set("kube_config_raw", kubeConfigRaw) - if err := d.Set("kube_config", kubeConfig); err != nil { - return fmt.Errorf("setting `kube_config`: %+v", err) + kubeConfigRaw, kubeConfig := flattenKubernetesClusterDataSourceAccessProfile(*profileModel) + d.Set("kube_config_raw", kubeConfigRaw) + if err := d.Set("kube_config", kubeConfig); err != nil { + return fmt.Errorf("setting `kube_config`: %+v", err) + } + + d.Set("tags", tags.Flatten(model.Tags)) } - return tags.FlattenAndSet(d, resp.Tags) + return nil } -func flattenKubernetesClusterDataSourceAccessProfile(profile containerservice.ManagedClusterAccessProfile) (*string, []interface{}) { - if profile.AccessProfile == nil { +func flattenKubernetesClusterDataSourceAccessProfile(profile managedclusters.ManagedClusterAccessProfile) (*string, []interface{}) { + if profile.Properties == nil { return nil, []interface{}{} } - if kubeConfigRaw := profile.AccessProfile.KubeConfig; kubeConfigRaw != nil { - rawConfig := string(*kubeConfigRaw) + if kubeConfigRaw := profile.Properties.KubeConfig; kubeConfigRaw != nil { + rawConfig := *kubeConfigRaw + if base64IsEncoded(*kubeConfigRaw) { + rawConfig, _ = base64Decode(*kubeConfigRaw) + } + var flattenedKubeConfig []interface{} if strings.Contains(rawConfig, "apiserver-id:") || strings.Contains(rawConfig, "exec") { @@ -811,129 +828,121 @@ func flattenKubernetesClusterDataSourceAccessProfile(profile containerservice.Ma return nil, []interface{}{} } -func flattenKubernetesClusterDataSourceAddOns(profile map[string]*containerservice.ManagedClusterAddonProfile) map[string]interface{} { +func flattenKubernetesClusterDataSourceAddOns(profile map[string]managedclusters.ManagedClusterAddonProfile) map[string]interface{} { aciConnectors := make([]interface{}, 0) - if aciConnector := kubernetesAddonProfileLocate(profile, aciConnectorKey); aciConnector != nil { - if enabled := aciConnector.Enabled; enabled != nil && *enabled { - subnetName := "" - if v := aciConnector.Config["SubnetName"]; v != nil { - subnetName = *v - } - - aciConnectors = append(aciConnectors, map[string]interface{}{ - "subnet_name": subnetName, - }) + aciConnector := kubernetesAddonProfileLocate(profile, aciConnectorKey) + if enabled := aciConnector.Enabled; enabled { + subnetName := "" + if v := aciConnector.Config; v != nil && (*v)["SubnetName"] != "" { + subnetName = (*v)["SubnetName"] } + aciConnectors = append(aciConnectors, map[string]interface{}{ + "subnet_name": subnetName, + }) } azurePolicyEnabled := false - if azurePolicy := kubernetesAddonProfileLocate(profile, azurePolicyKey); azurePolicy != nil { - if enabledVal := azurePolicy.Enabled; enabledVal != nil { - azurePolicyEnabled = *enabledVal - } + azurePolicy := kubernetesAddonProfileLocate(profile, azurePolicyKey) + if enabledVal := azurePolicy.Enabled; enabledVal { + azurePolicyEnabled = enabledVal } httpApplicationRoutingEnabled := false httpApplicationRoutingZone := "" - if httpApplicationRouting := kubernetesAddonProfileLocate(profile, httpApplicationRoutingKey); httpApplicationRouting != nil { - if enabledVal := httpApplicationRouting.Enabled; enabledVal != nil { - httpApplicationRoutingEnabled = *enabledVal - } + httpApplicationRouting := kubernetesAddonProfileLocate(profile, httpApplicationRoutingKey) + if enabledVal := httpApplicationRouting.Enabled; enabledVal { + httpApplicationRoutingEnabled = enabledVal + } - if v := kubernetesAddonProfilelocateInConfig(httpApplicationRouting.Config, "HTTPApplicationRoutingZoneName"); v != nil { - httpApplicationRoutingZone = *v - } + if v := kubernetesAddonProfilelocateInConfig(httpApplicationRouting.Config, "HTTPApplicationRoutingZoneName"); v != "" { + httpApplicationRoutingZone = v } omsAgents := make([]interface{}, 0) - if omsAgent := kubernetesAddonProfileLocate(profile, omsAgentKey); omsAgent != nil { - if enabled := omsAgent.Enabled; enabled != nil && *enabled { - workspaceID := "" - if v := kubernetesAddonProfilelocateInConfig(omsAgent.Config, "logAnalyticsWorkspaceResourceID"); v != nil { - if lawid, err := workspaces.ParseWorkspaceID(*v); err == nil { - workspaceID = lawid.ID() - } + omsAgent := kubernetesAddonProfileLocate(profile, omsAgentKey) + if enabled := omsAgent.Enabled; enabled { + workspaceID := "" + if v := kubernetesAddonProfilelocateInConfig(omsAgent.Config, "logAnalyticsWorkspaceResourceID"); v != "" { + if lawid, err := workspaces.ParseWorkspaceID(v); err == nil { + workspaceID = lawid.ID() } + } - omsAgentIdentity := flattenKubernetesClusterAddOnIdentityProfile(omsAgent.Identity) + omsAgentIdentity := flattenKubernetesClusterAddOnIdentityProfile(omsAgent.Identity) - omsAgents = append(omsAgents, map[string]interface{}{ - "log_analytics_workspace_id": workspaceID, - "oms_agent_identity": omsAgentIdentity, - }) - } + omsAgents = append(omsAgents, map[string]interface{}{ + "log_analytics_workspace_id": workspaceID, + "oms_agent_identity": omsAgentIdentity, + }) } ingressApplicationGateways := make([]interface{}, 0) - if ingressApplicationGateway := kubernetesAddonProfileLocate(profile, ingressApplicationGatewayKey); ingressApplicationGateway != nil { - if enabled := ingressApplicationGateway.Enabled; enabled != nil && *enabled { - gatewayId := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "applicationGatewayId"); v != nil { - gatewayId = *v - } + ingressApplicationGateway := kubernetesAddonProfileLocate(profile, ingressApplicationGatewayKey) + if enabled := ingressApplicationGateway.Enabled; enabled { + gatewayId := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "applicationGatewayId"); v != "" { + gatewayId = v + } - gatewayName := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "applicationGatewayName"); v != nil { - gatewayName = *v - } + gatewayName := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "applicationGatewayName"); v != "" { + gatewayName = v + } - effectiveGatewayId := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "effectiveApplicationGatewayId"); v != nil { - effectiveGatewayId = *v - } + effectiveGatewayId := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "effectiveApplicationGatewayId"); v != "" { + effectiveGatewayId = v + } - subnetCIDR := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "subnetCIDR"); v != nil { - subnetCIDR = *v - } + subnetCIDR := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "subnetCIDR"); v != "" { + subnetCIDR = v + } - subnetId := "" - if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "subnetId"); v != nil { - subnetId = *v - } + subnetId := "" + if v := kubernetesAddonProfilelocateInConfig(ingressApplicationGateway.Config, "subnetId"); v != "" { + subnetId = v + } - ingressApplicationGatewayIdentity := flattenKubernetesClusterAddOnIdentityProfile(ingressApplicationGateway.Identity) + ingressApplicationGatewayIdentity := flattenKubernetesClusterAddOnIdentityProfile(ingressApplicationGateway.Identity) - ingressApplicationGateways = append(ingressApplicationGateways, map[string]interface{}{ - "gateway_id": gatewayId, - "gateway_name": gatewayName, - "effective_gateway_id": effectiveGatewayId, - "subnet_cidr": subnetCIDR, - "subnet_id": subnetId, - "ingress_application_gateway_identity": ingressApplicationGatewayIdentity, - }) - } + ingressApplicationGateways = append(ingressApplicationGateways, map[string]interface{}{ + "gateway_id": gatewayId, + "gateway_name": gatewayName, + "effective_gateway_id": effectiveGatewayId, + "subnet_cidr": subnetCIDR, + "subnet_id": subnetId, + "ingress_application_gateway_identity": ingressApplicationGatewayIdentity, + }) } openServiceMeshEnabled := false - if openServiceMesh := kubernetesAddonProfileLocate(profile, openServiceMeshKey); openServiceMesh != nil { - if enabledVal := openServiceMesh.Enabled; enabledVal != nil { - openServiceMeshEnabled = *enabledVal - } + openServiceMesh := kubernetesAddonProfileLocate(profile, openServiceMeshKey) + if enabledVal := openServiceMesh.Enabled; enabledVal { + openServiceMeshEnabled = enabledVal } azureKeyVaultSecretsProviders := make([]interface{}, 0) - if azureKeyVaultSecretsProvider := kubernetesAddonProfileLocate(profile, azureKeyvaultSecretsProviderKey); azureKeyVaultSecretsProvider != nil { - if enabled := azureKeyVaultSecretsProvider.Enabled; enabled != nil && *enabled { - enableSecretRotation := false - if v := kubernetesAddonProfilelocateInConfig(azureKeyVaultSecretsProvider.Config, "enableSecretRotation"); v != nil && *v != "false" { - enableSecretRotation = true - } + azureKeyVaultSecretsProvider := kubernetesAddonProfileLocate(profile, azureKeyvaultSecretsProviderKey) + if enabled := azureKeyVaultSecretsProvider.Enabled; enabled { + enableSecretRotation := false + if v := kubernetesAddonProfilelocateInConfig(azureKeyVaultSecretsProvider.Config, "enableSecretRotation"); v != "false" { + enableSecretRotation = true + } - rotationPollInterval := "" - if v := kubernetesAddonProfilelocateInConfig(azureKeyVaultSecretsProvider.Config, "rotationPollInterval"); v != nil { - rotationPollInterval = *v - } + rotationPollInterval := "" + if v := kubernetesAddonProfilelocateInConfig(azureKeyVaultSecretsProvider.Config, "rotationPollInterval"); v != "" { + rotationPollInterval = v + } - azureKeyvaultSecretsProviderIdentity := flattenKubernetesClusterAddOnIdentityProfile(azureKeyVaultSecretsProvider.Identity) + azureKeyvaultSecretsProviderIdentity := flattenKubernetesClusterAddOnIdentityProfile(azureKeyVaultSecretsProvider.Identity) - azureKeyVaultSecretsProviders = append(azureKeyVaultSecretsProviders, map[string]interface{}{ - "secret_rotation_enabled": enableSecretRotation, - "secret_rotation_interval": rotationPollInterval, - "secret_identity": azureKeyvaultSecretsProviderIdentity, - }) - } + azureKeyVaultSecretsProviders = append(azureKeyVaultSecretsProviders, map[string]interface{}{ + "secret_rotation_enabled": enableSecretRotation, + "secret_rotation_interval": rotationPollInterval, + "secret_identity": azureKeyvaultSecretsProviderIdentity, + }) } return map[string]interface{}{ @@ -948,7 +957,7 @@ func flattenKubernetesClusterDataSourceAddOns(profile map[string]*containerservi } } -func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservice.ManagedClusterAgentPoolProfile) []interface{} { +func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]managedclusters.ManagedClusterAgentPoolProfile) []interface{} { agentPoolProfiles := make([]interface{}, 0) if input == nil { @@ -981,15 +990,9 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi enableAutoScaling = *profile.EnableAutoScaling } - name := "" - if profile.Name != nil { - name = *profile.Name - } + name := profile.Name - nodePublicIPPrefixID := "" - if profile.NodePublicIPPrefixID != nil { - nodePublicIPPrefixID = *profile.NodePublicIPPrefixID - } + nodePublicIPPrefixID := profile.NodePublicIPPrefixID osDiskSizeGb := 0 if profile.OsDiskSizeGB != nil { @@ -1013,12 +1016,12 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi nodeLabels := make(map[string]string) if profile.NodeLabels != nil { - for k, v := range profile.NodeLabels { - if v == nil { + for k, v := range *profile.NodeLabels { + if v == "" { continue } - nodeLabels[k] = *v + nodeLabels[k] = v } } @@ -1027,10 +1030,7 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi nodeTaints = *profile.NodeTaints } - vmSize := "" - if profile.VMSize != nil { - vmSize = *profile.VMSize - } + vmSize := profile.VmSize out := map[string]interface{}{ "count": count, @@ -1045,10 +1045,10 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi "node_taints": nodeTaints, "orchestrator_version": orchestratorVersion, "os_disk_size_gb": osDiskSizeGb, - "os_type": string(profile.OsType), + "os_type": string(*profile.OsType), "tags": tags.Flatten(profile.Tags), - "type": string(profile.Type), - "upgrade_settings": flattenUpgradeSettings(profile.UpgradeSettings), + "type": string(*profile.Type), + "upgrade_settings": flattenKubernetesClusterDataSourceUpgradeSettings(profile.UpgradeSettings), "vm_size": vmSize, "vnet_subnet_id": vnetSubnetId, "zones": zones.Flatten(profile.AvailabilityZones), @@ -1059,7 +1059,7 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi return agentPoolProfiles } -func flattenKubernetesClusterDataSourceAzureActiveDirectoryRoleBasedAccessControl(input *containerservice.ManagedClusterProperties) []interface{} { +func flattenKubernetesClusterDataSourceAzureActiveDirectoryRoleBasedAccessControl(input *managedclusters.ManagedClusterProperties) []interface{} { results := make([]interface{}, 0) if profile := input.AadProfile; profile != nil { adminGroupObjectIds := utils.FlattenStringSlice(profile.AdminGroupObjectIDs) @@ -1102,62 +1102,61 @@ func flattenKubernetesClusterDataSourceAzureActiveDirectoryRoleBasedAccessContro return results } -func flattenKubernetesClusterDataSourceIdentityProfile(profile map[string]*containerservice.UserAssignedIdentity) ([]interface{}, error) { - if profile == nil { +func flattenKubernetesClusterDataSourceIdentityProfile(profile *map[string]managedclusters.UserAssignedIdentity) ([]interface{}, error) { + if profile == nil || *profile == nil { return []interface{}{}, nil } kubeletIdentity := make([]interface{}, 0) - if kubeletidentity := profile["kubeletidentity"]; kubeletidentity != nil { - clientId := "" - if clientid := kubeletidentity.ClientID; clientid != nil { - clientId = *clientid - } + kubeletidentity := (*profile)["kubeletidentity"] + clientId := "" + if clientid := kubeletidentity.ClientId; clientid != nil { + clientId = *clientid + } - objectId := "" - if objectid := kubeletidentity.ObjectID; objectid != nil { - objectId = *objectid - } + objectId := "" + if objectid := kubeletidentity.ObjectId; objectid != nil { + objectId = *objectid + } - userAssignedIdentityId := "" - if resourceid := kubeletidentity.ResourceID; resourceid != nil { - parsedId, err := commonids.ParseUserAssignedIdentityIDInsensitively(*resourceid) - if err != nil { - return nil, err - } - userAssignedIdentityId = parsedId.ID() + userAssignedIdentityId := "" + if resourceid := kubeletidentity.ResourceId; resourceid != nil { + parsedId, err := commonids.ParseUserAssignedIdentityIDInsensitively(*resourceid) + if err != nil { + return nil, err } - - kubeletIdentity = append(kubeletIdentity, map[string]interface{}{ - "client_id": clientId, - "object_id": objectId, - "user_assigned_identity_id": userAssignedIdentityId, - }) + userAssignedIdentityId = parsedId.ID() } + kubeletIdentity = append(kubeletIdentity, map[string]interface{}{ + "client_id": clientId, + "object_id": objectId, + "user_assigned_identity_id": userAssignedIdentityId, + }) + return kubeletIdentity, nil } -func flattenKubernetesClusterDataSourceLinuxProfile(input *containerservice.LinuxProfile) []interface{} { +func flattenKubernetesClusterDataSourceLinuxProfile(input *managedclusters.ContainerServiceLinuxProfile) []interface{} { values := make(map[string]interface{}) sshKeys := make([]interface{}, 0) if profile := input; profile != nil { - if username := profile.AdminUsername; username != nil { - values["admin_username"] = *username + if username := profile.AdminUsername; username != "" { + values["admin_username"] = username } - if ssh := profile.SSH; ssh != nil { - if keys := ssh.PublicKeys; keys != nil { - for _, sshKey := range *keys { - if keyData := sshKey.KeyData; keyData != nil { - outputs := make(map[string]interface{}) - outputs["key_data"] = *keyData - sshKeys = append(sshKeys, outputs) - } + ssh := profile.Ssh + if keys := ssh.PublicKeys; keys != nil { + for _, sshKey := range keys { + if keyData := sshKey.KeyData; keyData != "" { + outputs := make(map[string]interface{}) + outputs["key_data"] = keyData + sshKeys = append(sshKeys, outputs) } } } + } values["ssh_key"] = sshKeys @@ -1165,34 +1164,34 @@ func flattenKubernetesClusterDataSourceLinuxProfile(input *containerservice.Linu return []interface{}{values} } -func flattenKubernetesClusterDataSourceWindowsProfile(input *containerservice.ManagedClusterWindowsProfile) []interface{} { +func flattenKubernetesClusterDataSourceWindowsProfile(input *managedclusters.ManagedClusterWindowsProfile) []interface{} { if input == nil { return []interface{}{} } values := make(map[string]interface{}) - if username := input.AdminUsername; username != nil { - values["admin_username"] = *username + if username := input.AdminUsername; username != "" { + values["admin_username"] = username } return []interface{}{values} } -func flattenKubernetesClusterDataSourceNetworkProfile(profile *containerservice.NetworkProfile) []interface{} { +func flattenKubernetesClusterDataSourceNetworkProfile(profile *managedclusters.ContainerServiceNetworkProfile) []interface{} { values := make(map[string]interface{}) values["network_plugin"] = profile.NetworkPlugin - if profile.NetworkPolicy != "" { - values["network_policy"] = string(profile.NetworkPolicy) + if profile.NetworkPolicy != nil { + values["network_policy"] = string(*profile.NetworkPolicy) } if profile.ServiceCidr != nil { values["service_cidr"] = *profile.ServiceCidr } - if profile.DNSServiceIP != nil { - values["dns_service_ip"] = *profile.DNSServiceIP + if profile.DnsServiceIP != nil { + values["dns_service_ip"] = *profile.DnsServiceIP } if profile.DockerBridgeCidr != nil { @@ -1203,22 +1202,22 @@ func flattenKubernetesClusterDataSourceNetworkProfile(profile *containerservice. values["pod_cidr"] = *profile.PodCidr } - if profile.LoadBalancerSku != "" { - values["load_balancer_sku"] = string(profile.LoadBalancerSku) + if profile.LoadBalancerSku != nil { + values["load_balancer_sku"] = string(*profile.LoadBalancerSku) } return []interface{}{values} } -func flattenKubernetesClusterDataSourceServicePrincipalProfile(profile *containerservice.ManagedClusterServicePrincipalProfile) []interface{} { +func flattenKubernetesClusterDataSourceServicePrincipalProfile(profile *managedclusters.ManagedClusterServicePrincipalProfile) []interface{} { if profile == nil { return []interface{}{} } values := make(map[string]interface{}) - if clientID := profile.ClientID; clientID != nil { - values["client_id"] = *clientID + if clientID := profile.ClientId; clientID != "" { + values["client_id"] = clientID } return []interface{}{values} @@ -1259,38 +1258,17 @@ func flattenKubernetesClusterDataSourceKubeConfigAAD(config kubernetes.KubeConfi return []interface{}{values} } -func flattenClusterDataSourceIdentity(input *containerservice.ManagedClusterIdentity) (*[]interface{}, error) { - var transform *identity.SystemOrUserAssignedMap - - if input != nil { - transform = &identity.SystemOrUserAssignedMap{ - Type: identity.Type(string(input.Type)), - IdentityIds: make(map[string]identity.UserAssignedIdentityDetails), - } - if input.PrincipalID != nil { - transform.PrincipalId = *input.PrincipalID - } - if input.TenantID != nil { - transform.TenantId = *input.TenantID - } - for k, v := range input.UserAssignedIdentities { - transform.IdentityIds[k] = identity.UserAssignedIdentityDetails{ - ClientId: v.ClientID, - PrincipalId: v.PrincipalID, - } - } - } - - return identity.FlattenSystemOrUserAssignedMap(transform) +func flattenClusterDataSourceIdentity(input *identity.SystemOrUserAssignedMap) (*[]interface{}, error) { + return identity.FlattenSystemOrUserAssignedMap(input) } -func flattenKubernetesClusterDataSourceMicrosoftDefender(input *containerservice.ManagedClusterSecurityProfile) []interface{} { - if input == nil || input.AzureDefender == nil || (input.AzureDefender.Enabled != nil && !*input.AzureDefender.Enabled) { +func flattenKubernetesClusterDataSourceMicrosoftDefender(input *managedclusters.ManagedClusterSecurityProfile) []interface{} { + if input == nil || input.Defender == nil || input.Defender.SecurityMonitoring == nil || (input.Defender.SecurityMonitoring.Enabled != nil && !*input.Defender.SecurityMonitoring.Enabled) { return []interface{}{} } logAnalyticsWorkspace := "" - if v := input.AzureDefender.LogAnalyticsWorkspaceResourceID; v != nil { + if v := input.Defender.LogAnalyticsWorkspaceResourceId; v != nil { logAnalyticsWorkspace = *v } @@ -1300,3 +1278,20 @@ func flattenKubernetesClusterDataSourceMicrosoftDefender(input *containerservice }, } } + +func flattenKubernetesClusterDataSourceUpgradeSettings(input *managedclusters.AgentPoolUpgradeSettings) []interface{} { + maxSurge := "" + if input != nil && input.MaxSurge != nil { + maxSurge = *input.MaxSurge + } + + if maxSurge == "" { + return []interface{}{} + } + + return []interface{}{ + map[string]interface{}{ + "max_surge": maxSurge, + }, + } +} diff --git a/internal/services/containers/kubernetes_cluster_node_pool_data_source.go b/internal/services/containers/kubernetes_cluster_node_pool_data_source.go index 8b0e38ba8522..cda7f515d3eb 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_data_source.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_data_source.go @@ -4,13 +4,14 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" + "github.com/hashicorp/go-azure-helpers/resourcemanager/tags" "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/validate" - "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" @@ -164,22 +165,22 @@ func dataSourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta int ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - clusterId := parse.NewClusterID(subscriptionId, d.Get("resource_group_name").(string), d.Get("kubernetes_cluster_name").(string)) + clusterId := managedclusters.NewManagedClusterID(subscriptionId, d.Get("resource_group_name").(string), d.Get("kubernetes_cluster_name").(string)) // if the parent cluster doesn't exist then the node pool won't - cluster, err := clustersClient.Get(ctx, clusterId.ResourceGroup, clusterId.ManagedClusterName) + cluster, err := clustersClient.Get(ctx, clusterId) if err != nil { - if utils.ResponseWasNotFound(cluster.Response) { + if response.WasNotFound(cluster.HttpResponse) { return fmt.Errorf("%s was not found", clusterId) } return fmt.Errorf("retrieving %s: %+v", clusterId, err) } - id := parse.NewNodePoolID(clusterId.SubscriptionId, clusterId.ResourceGroup, clusterId.ManagedClusterName, d.Get("name").(string)) - resp, err := poolsClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName, id.AgentPoolName) + id := agentpools.NewAgentPoolID(clusterId.SubscriptionId, clusterId.ResourceGroupName, clusterId.ResourceName, d.Get("name").(string)) + resp, err := poolsClient.Get(ctx, id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return fmt.Errorf("%s was not found", id) } @@ -188,18 +189,19 @@ func dataSourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta int d.SetId(id.ID()) d.Set("name", id.AgentPoolName) - d.Set("kubernetes_cluster_name", id.ManagedClusterName) - d.Set("resource_group_name", id.ResourceGroup) + d.Set("kubernetes_cluster_name", id.ResourceName) + d.Set("resource_group_name", id.ResourceGroupName) - if props := resp.ManagedClusterAgentPoolProfileProperties; props != nil { + if model := resp.Model; model != nil && model.Properties != nil { + props := model.Properties d.Set("zones", zones.Flatten(props.AvailabilityZones)) d.Set("enable_auto_scaling", props.EnableAutoScaling) d.Set("enable_node_public_ip", props.EnableNodePublicIP) evictionPolicy := "" - if props.ScaleSetEvictionPolicy != "" { - evictionPolicy = string(props.ScaleSetEvictionPolicy) + if props.ScaleSetEvictionPolicy != nil && *props.ScaleSetEvictionPolicy != "" { + evictionPolicy = string(*props.ScaleSetEvictionPolicy) } d.Set("eviction_policy", evictionPolicy) @@ -221,9 +223,9 @@ func dataSourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta int } d.Set("min_count", minCount) - mode := string(containerservice.AgentPoolModeUser) - if props.Mode != "" { - mode = string(props.Mode) + mode := string(agentpools.AgentPoolModeUser) + if props.Mode != nil && *props.Mode != "" { + mode = string(*props.Mode) } d.Set("mode", mode) @@ -250,17 +252,20 @@ func dataSourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta int } d.Set("os_disk_size_gb", osDiskSizeGB) - osDiskType := containerservice.OSDiskTypeManaged - if props.OsDiskType != "" { - osDiskType = props.OsDiskType + osDiskType := agentpools.OSDiskTypeManaged + if props.OsDiskType != nil && *props.OsDiskType != "" { + osDiskType = *props.OsDiskType } d.Set("os_disk_type", string(osDiskType)) - d.Set("os_type", string(props.OsType)) + + if props.OsDiskType != nil { + d.Set("os_type", string(*props.OsType)) + } // not returned from the API if not Spot - priority := string(containerservice.ScaleSetPriorityRegular) - if props.ScaleSetPriority != "" { - priority = string(props.ScaleSetPriority) + priority := string(agentpools.ScaleSetPriorityRegular) + if props.ScaleSetPriority != nil && *props.ScaleSetPriority != "" { + priority = string(*props.ScaleSetPriority) } d.Set("priority", priority) @@ -276,13 +281,15 @@ func dataSourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta int } d.Set("spot_max_price", spotMaxPrice) - if err := d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings)); err != nil { + if err := d.Set("upgrade_settings", flattenAgentPoolUpgradeSettings(props.UpgradeSettings)); err != nil { return fmt.Errorf("setting `upgrade_settings`: %+v", err) } d.Set("vnet_subnet_id", props.VnetSubnetID) - d.Set("vm_size", props.VMSize) + d.Set("vm_size", props.VmSize) + + d.Set("tags", tags.Flatten(props.Tags)) } - return tags.FlattenAndSet(d, resp.Tags) + return nil } diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/internal/services/containers/kubernetes_cluster_node_pool_resource.go index ba4fd5e948ec..02c62a2967eb 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -4,12 +4,17 @@ import ( "encoding/base64" "fmt" "log" + "regexp" + "strconv" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" + "github.com/hashicorp/go-azure-helpers/resourcemanager/tags" "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" "github.com/hashicorp/go-azure-sdk/resource-manager/compute/2021-11-01/proximityplacementgroups" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" @@ -19,7 +24,6 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/parse" containerValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/validate" networkValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/network/validate" - "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" @@ -118,8 +122,8 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.ScaleSetEvictionPolicyDelete), - string(containerservice.ScaleSetEvictionPolicyDeallocate), + string(agentpools.ScaleSetEvictionPolicyDelete), + string(agentpools.ScaleSetEvictionPolicyDeallocate), }, false), }, @@ -138,8 +142,8 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.KubeletDiskTypeOS), - string(containerservice.KubeletDiskTypeTemporary), + string(agentpools.KubeletDiskTypeOS), + string(agentpools.KubeletDiskTypeTemporary), }, false), }, @@ -166,10 +170,10 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { "mode": { Type: pluginsdk.TypeString, Optional: true, - Default: string(containerservice.AgentPoolModeUser), + Default: string(agentpools.AgentPoolModeUser), ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.AgentPoolModeSystem), - string(containerservice.AgentPoolModeUser), + string(agentpools.AgentPoolModeSystem), + string(agentpools.AgentPoolModeUser), }, false), }, @@ -226,10 +230,10 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: containerservice.OSDiskTypeManaged, + Default: agentpools.OSDiskTypeManaged, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.OSDiskTypeEphemeral), - string(containerservice.OSDiskTypeManaged), + string(agentpools.OSDiskTypeEphemeral), + string(agentpools.OSDiskTypeManaged), }, false), }, @@ -239,8 +243,8 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { ForceNew: true, Computed: true, // defaults to Ubuntu if using Linux ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.OSSKUUbuntu), - string(containerservice.OSSKUCBLMariner), + string(agentpools.OSSKUUbuntu), + string(agentpools.OSSKUCBLMariner), }, false), }, @@ -248,10 +252,10 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: string(containerservice.OSTypeLinux), + Default: string(agentpools.OSTypeLinux), ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.OSTypeLinux), - string(containerservice.OSTypeWindows), + string(agentpools.OSTypeLinux), + string(agentpools.OSTypeWindows), }, false), }, @@ -266,10 +270,10 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: string(containerservice.ScaleSetPriorityRegular), + Default: string(agentpools.ScaleSetPriorityRegular), ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.ScaleSetPriorityRegular), - string(containerservice.ScaleSetPrioritySpot), + string(agentpools.ScaleSetPriorityRegular), + string(agentpools.ScaleSetPrioritySpot), }, false), }, @@ -291,10 +295,10 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { "scale_down_mode": { Type: pluginsdk.TypeString, Optional: true, - Default: string(containerservice.ScaleDownModeDelete), + Default: string(agentpools.ScaleDownModeDelete), ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.ScaleDownModeDeallocate), - string(containerservice.ScaleDownModeDelete), + string(agentpools.ScaleDownModeDeallocate), + string(agentpools.ScaleDownModeDelete), }, false), }, @@ -318,8 +322,8 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.WorkloadRuntimeOCIContainer), - string(containerservice.WorkloadRuntimeWasmWasi), + string(agentpools.WorkloadRuntimeOCIContainer), + string(agentpools.WorkloadRuntimeWasmWasi), }, false), }, "zones": commonschema.ZonesMultipleOptionalForceNew(), @@ -334,17 +338,17 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() - clusterId, err := parse.ClusterID(d.Get("kubernetes_cluster_id").(string)) + clusterId, err := managedclusters.ParseManagedClusterID(d.Get("kubernetes_cluster_id").(string)) if err != nil { return err } - id := parse.NewNodePoolID(poolsClient.SubscriptionID, clusterId.ResourceGroup, clusterId.ManagedClusterName, d.Get("name").(string)) + id := agentpools.NewAgentPoolID(clusterId.SubscriptionId, clusterId.ResourceGroupName, clusterId.ResourceName, d.Get("name").(string)) log.Printf("[DEBUG] Retrieving %s...", *clusterId) - cluster, err := clustersClient.Get(ctx, clusterId.ResourceGroup, clusterId.ManagedClusterName) + cluster, err := clustersClient.Get(ctx, *clusterId) if err != nil { - if utils.ResponseWasNotFound(cluster.Response) { + if response.WasNotFound(cluster.HttpResponse) { return fmt.Errorf("%s was not found", *clusterId) } @@ -353,10 +357,11 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int // try to provide a more helpful error here defaultPoolIsVMSS := false - if props := cluster.ManagedClusterProperties; props != nil { + if model := cluster.Model; model != nil && model.Properties != nil { + props := model.Properties if pools := props.AgentPoolProfiles; pools != nil { for _, p := range *pools { - if p.Type == containerservice.AgentPoolTypeVirtualMachineScaleSets { + if p.Type != nil && *p.Type == managedclusters.AgentPoolTypeVirtualMachineScaleSets { defaultPoolIsVMSS = true break } @@ -367,58 +372,59 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int return fmt.Errorf("multiple node pools are only supported when the Default Node Pool uses a VMScaleSet (but %s doesn't)", *clusterId) } - existing, err := poolsClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName, id.AgentPoolName) + existing, err := poolsClient.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { + if !response.WasNotFound(existing.HttpResponse) { return fmt.Errorf("checking for presence of existing %s: %+v", id, err) } } - if !utils.ResponseWasNotFound(existing.Response) { + if !response.WasNotFound(existing.HttpResponse) { return tf.ImportAsExistsError("azurerm_kubernetes_cluster_node_pool", id.ID()) } count := d.Get("node_count").(int) enableAutoScaling := d.Get("enable_auto_scaling").(bool) evictionPolicy := d.Get("eviction_policy").(string) - mode := containerservice.AgentPoolMode(d.Get("mode").(string)) + mode := agentpools.AgentPoolMode(d.Get("mode").(string)) osType := d.Get("os_type").(string) priority := d.Get("priority").(string) spotMaxPrice := d.Get("spot_max_price").(float64) t := d.Get("tags").(map[string]interface{}) - profile := containerservice.ManagedClusterAgentPoolProfileProperties{ - OsType: containerservice.OSType(osType), + profile := agentpools.ManagedClusterAgentPoolProfileProperties{ + OsType: utils.ToPtr(agentpools.OSType(osType)), EnableAutoScaling: utils.Bool(enableAutoScaling), EnableFIPS: utils.Bool(d.Get("fips_enabled").(bool)), EnableEncryptionAtHost: utils.Bool(d.Get("enable_host_encryption").(bool)), EnableUltraSSD: utils.Bool(d.Get("ultra_ssd_enabled").(bool)), EnableNodePublicIP: utils.Bool(d.Get("enable_node_public_ip").(bool)), - KubeletDiskType: containerservice.KubeletDiskType(d.Get("kubelet_disk_type").(string)), - Mode: mode, - ScaleSetPriority: containerservice.ScaleSetPriority(priority), + KubeletDiskType: utils.ToPtr(agentpools.KubeletDiskType(d.Get("kubelet_disk_type").(string))), + Mode: utils.ToPtr(mode), + ScaleSetPriority: utils.ToPtr(agentpools.ScaleSetPriority(d.Get("priority").(string))), Tags: tags.Expand(t), - Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, - VMSize: utils.String(d.Get("vm_size").(string)), - UpgradeSettings: expandUpgradeSettings(d.Get("upgrade_settings").([]interface{})), + Type: utils.ToPtr(agentpools.AgentPoolTypeVirtualMachineScaleSets), + VmSize: utils.String(d.Get("vm_size").(string)), + UpgradeSettings: expandAgentPoolUpgradeSettings(d.Get("upgrade_settings").([]interface{})), // this must always be sent during creation, but is optional for auto-scaled clusters during update - Count: utils.Int32(int32(count)), + Count: utils.Int64(int64(count)), } if osSku := d.Get("os_sku").(string); osSku != "" { - profile.OsSKU = containerservice.OSSKU(osSku) + profile.OsSKU = utils.ToPtr(agentpools.OSSKU(osSku)) } if scaleDownMode := d.Get("scale_down_mode").(string); scaleDownMode != "" { - profile.ScaleDownMode = containerservice.ScaleDownMode(scaleDownMode) + profile.ScaleDownMode = utils.ToPtr(agentpools.ScaleDownMode(scaleDownMode)) } + if workloadRuntime := d.Get("workload_runtime").(string); workloadRuntime != "" { - profile.WorkloadRuntime = containerservice.WorkloadRuntime(workloadRuntime) + profile.WorkloadRuntime = utils.ToPtr(agentpools.WorkloadRuntime(workloadRuntime)) } - if priority == string(containerservice.ScaleSetPrioritySpot) { - profile.ScaleSetEvictionPolicy = containerservice.ScaleSetEvictionPolicy(evictionPolicy) + if priority == string(managedclusters.ScaleSetPrioritySpot) { + profile.ScaleSetEvictionPolicy = utils.ToPtr(agentpools.ScaleSetEvictionPolicy(evictionPolicy)) profile.SpotMaxPrice = utils.Float(spotMaxPrice) } else { if evictionPolicy != "" { @@ -444,12 +450,12 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int profile.AvailabilityZones = &zones } - if maxPods := int32(d.Get("max_pods").(int)); maxPods > 0 { - profile.MaxPods = utils.Int32(maxPods) + if maxPods := int64(d.Get("max_pods").(int)); maxPods > 0 { + profile.MaxPods = utils.Int64(maxPods) } nodeLabelsRaw := d.Get("node_labels").(map[string]interface{}) - if nodeLabels := utils.ExpandMapStringPtrString(nodeLabelsRaw); len(nodeLabels) > 0 { + if nodeLabels := expandNodeLabels(nodeLabelsRaw); len(*nodeLabels) > 0 { profile.NodeLabels = nodeLabels } @@ -463,7 +469,7 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int } if v := d.Get("message_of_the_day").(string); v != "" { - if profile.OsType == containerservice.OSTypeWindows { + if profile.OsType != nil && *profile.OsType == agentpools.OSTypeWindows { return fmt.Errorf("`message_of_the_day` cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script)") } messageOfTheDayEncoded := base64.StdEncoding.EncodeToString([]byte(v)) @@ -471,7 +477,7 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int } if osDiskSizeGB := d.Get("os_disk_size_gb").(int); osDiskSizeGB > 0 { - profile.OsDiskSizeGB = utils.Int32(int32(osDiskSizeGB)) + profile.OsDiskSizeGB = utils.Int64(int64(osDiskSizeGB)) } proximityPlacementGroupId := d.Get("proximity_placement_group_id").(string) @@ -480,7 +486,7 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int } if osDiskType := d.Get("os_disk_type").(string); osDiskType != "" { - profile.OsDiskType = containerservice.OSDiskType(osDiskType) + profile.OsDiskType = utils.ToPtr(agentpools.OSDiskType(osDiskType)) } if podSubnetID := d.Get("pod_subnet_id").(string); podSubnetID != "" { @@ -505,17 +511,17 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int if enableAutoScaling { // handle count being optional if count == 0 { - profile.Count = utils.Int32(int32(minCount)) + profile.Count = utils.Int64(int64(minCount)) } if maxCount >= 0 { - profile.MaxCount = utils.Int32(int32(maxCount)) + profile.MaxCount = utils.Int64(int64(maxCount)) } else { return fmt.Errorf("`max_count` must be configured when `enable_auto_scaling` is set to `true`") } if minCount >= 0 { - profile.MinCount = utils.Int32(int32(minCount)) + profile.MinCount = utils.Int64(int64(minCount)) } else { return fmt.Errorf("`min_count` must be configured when `enable_auto_scaling` is set to `true`") } @@ -532,7 +538,7 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int } if linuxOSConfig := d.Get("linux_os_config").([]interface{}); len(linuxOSConfig) > 0 { - if osType != string(containerservice.OSTypeLinux) { + if osType != string(managedclusters.OSTypeLinux) { return fmt.Errorf("`linux_os_config` can only be configured when `os_type` is set to `linux`") } linuxOSConfig, err := expandAgentPoolLinuxOSConfig(linuxOSConfig) @@ -542,17 +548,17 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int profile.LinuxOSConfig = linuxOSConfig } - parameters := containerservice.AgentPool{ - Name: utils.String(id.AgentPoolName), - ManagedClusterAgentPoolProfileProperties: &profile, + parameters := agentpools.AgentPool{ + Name: utils.String(id.AgentPoolName), + Properties: &profile, } - future, err := poolsClient.CreateOrUpdate(ctx, id.ResourceGroup, id.ManagedClusterName, id.AgentPoolName, parameters) + future, err := poolsClient.CreateOrUpdate(ctx, id, parameters) if err != nil { return fmt.Errorf("creating %s: %+v", id, err) } - if err = future.WaitForCompletionRef(ctx, poolsClient.Client); err != nil { + if err = future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for creation of %s: %+v", id, err) } @@ -566,7 +572,7 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.NodePoolID(d.Id()) + id, err := agentpools.ParseAgentPoolID(d.Id()) if err != nil { return err } @@ -574,19 +580,19 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int d.Partial(true) log.Printf("[DEBUG] Retrieving existing %s..", *id) - existing, err := client.Get(ctx, id.ResourceGroup, id.ManagedClusterName, id.AgentPoolName) + existing, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(existing.Response) { + if response.WasNotFound(existing.HttpResponse) { return fmt.Errorf("%s was not found", *id) } return fmt.Errorf("retrieving %s: %+v", *id, err) } - if existing.ManagedClusterAgentPoolProfileProperties == nil { + if existing.Model == nil || existing.Model.Properties == nil { return fmt.Errorf("retrieving %s: `properties` was nil", *id) } - props := existing.ManagedClusterAgentPoolProfileProperties + props := existing.Model.Properties // store the existing value should the user have opted to ignore it enableAutoScaling := false @@ -611,19 +617,20 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int } if d.HasChange("max_count") || d.Get("enable_auto_scaling").(bool) { - props.MaxCount = utils.Int32(int32(d.Get("max_count").(int))) + props.MaxCount = utils.Int64(int64(d.Get("max_count").(int))) } if d.HasChange("mode") { - props.Mode = containerservice.AgentPoolMode(d.Get("mode").(string)) + mode := agentpools.AgentPoolMode(d.Get("mode").(string)) + props.Mode = &mode } if d.HasChange("min_count") || d.Get("enable_auto_scaling").(bool) { - props.MinCount = utils.Int32(int32(d.Get("min_count").(int))) + props.MinCount = utils.Int64(int64(d.Get("min_count").(int))) } if d.HasChange("node_count") { - props.Count = utils.Int32(int32(d.Get("node_count").(int))) + props.Count = utils.Int64(int64(d.Get("node_count").(int))) } if d.HasChange("node_public_ip_prefix_id") { @@ -631,20 +638,22 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int } if d.HasChange("orchestrator_version") { - existingNodePool, err := client.Get(ctx, id.ResourceGroup, id.ManagedClusterName, id.AgentPoolName) + existingNodePoolResp, err := client.Get(ctx, *id) if err != nil { return fmt.Errorf("retrieving Node Pool %s: %+v", *id, err) } - orchestratorVersion := d.Get("orchestrator_version").(string) - currentOrchestratorVersion := "" - if v := existingNodePool.OrchestratorVersion; v != nil { - currentOrchestratorVersion = *v - } - if err := validateNodePoolSupportsVersion(ctx, containersClient, currentOrchestratorVersion, *id, orchestratorVersion); err != nil { - return err - } + if existingNodePool := existingNodePoolResp.Model; existingNodePool != nil { + orchestratorVersion := d.Get("orchestrator_version").(string) + currentOrchestratorVersion := "" + if v := existingNodePool.Properties.CurrentOrchestratorVersion; v != nil { + currentOrchestratorVersion = *v + } + if err := validateNodePoolSupportsVersion(ctx, containersClient, currentOrchestratorVersion, *id, orchestratorVersion); err != nil { + return err + } - props.OrchestratorVersion = utils.String(orchestratorVersion) + props.OrchestratorVersion = utils.String(orchestratorVersion) + } } if d.HasChange("tags") { @@ -654,18 +663,20 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int if d.HasChange("upgrade_settings") { upgradeSettingsRaw := d.Get("upgrade_settings").([]interface{}) - props.UpgradeSettings = expandUpgradeSettings(upgradeSettingsRaw) + props.UpgradeSettings = expandAgentPoolUpgradeSettings(upgradeSettingsRaw) } if d.HasChange("scale_down_mode") { - props.ScaleDownMode = containerservice.ScaleDownMode(d.Get("scale_down_mode").(string)) + mode := agentpools.ScaleDownMode(d.Get("scale_down_mode").(string)) + props.ScaleDownMode = &mode } if d.HasChange("workload_runtime") { - props.WorkloadRuntime = containerservice.WorkloadRuntime(d.Get("workload_runtime").(string)) + runtime := agentpools.WorkloadRuntime(d.Get("workload_runtime").(string)) + props.WorkloadRuntime = &runtime } if d.HasChange("node_labels") { - props.NodeLabels = utils.ExpandMapStringPtrString(d.Get("node_labels").(map[string]interface{})) + props.NodeLabels = expandNodeLabels(d.Get("node_labels").(map[string]interface{})) } // validate the auto-scale fields are both set/unset to prevent a continual diff @@ -696,13 +707,13 @@ func resourceKubernetesClusterNodePoolUpdate(d *pluginsdk.ResourceData, meta int } log.Printf("[DEBUG] Updating existing %s..", *id) - existing.ManagedClusterAgentPoolProfileProperties = props - future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ManagedClusterName, id.AgentPoolName, existing) + existing.Model.Properties = props + future, err := client.CreateOrUpdate(ctx, *id, *existing.Model) if err != nil { return fmt.Errorf("updating Node Pool %s: %+v", *id, err) } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + if err = future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for update of %s: %+v", *id, err) } @@ -717,16 +728,16 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.NodePoolID(d.Id()) + id, err := agentpools.ParseAgentPoolID(d.Id()) if err != nil { return err } // if the parent cluster doesn't exist then the node pool won't - clusterId := parse.NewClusterID(id.SubscriptionId, id.ResourceGroup, id.ManagedClusterName) - cluster, err := clustersClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName) + clusterId := managedclusters.NewManagedClusterID(id.SubscriptionId, id.ResourceGroupName, id.ResourceName) + cluster, err := clustersClient.Get(ctx, clusterId) if err != nil { - if utils.ResponseWasNotFound(cluster.Response) { + if response.WasNotFound(cluster.HttpResponse) { log.Printf("[DEBUG] %s was not found - removing from state!", clusterId) d.SetId("") return nil @@ -735,9 +746,9 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter return fmt.Errorf("retrieving %s: %+v", clusterId, err) } - resp, err := poolsClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName, id.AgentPoolName) + resp, err := poolsClient.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { log.Printf("[DEBUG] %q was not found - removing from state!", *id) d.SetId("") return nil @@ -749,24 +760,32 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter d.Set("name", id.AgentPoolName) d.Set("kubernetes_cluster_id", clusterId.ID()) - if props := resp.ManagedClusterAgentPoolProfileProperties; props != nil { + if model := resp.Model; model != nil && model.Properties != nil { + props := model.Properties d.Set("zones", zones.Flatten(props.AvailabilityZones)) d.Set("enable_auto_scaling", props.EnableAutoScaling) d.Set("enable_node_public_ip", props.EnableNodePublicIP) d.Set("enable_host_encryption", props.EnableEncryptionAtHost) d.Set("fips_enabled", props.EnableFIPS) d.Set("ultra_ssd_enabled", props.EnableUltraSSD) - d.Set("kubelet_disk_type", string(props.KubeletDiskType)) - scaleDownMode := string(containerservice.ScaleDownModeDelete) - if v := props.ScaleDownMode; v != "" { - scaleDownMode = string(v) + + if v := props.KubeletDiskType; v != nil { + d.Set("kubelet_disk_type", string(*v)) + } + + scaleDownMode := string(managedclusters.ScaleDownModeDelete) + if v := props.ScaleDownMode; v != nil { + scaleDownMode = string(*v) } d.Set("scale_down_mode", scaleDownMode) - d.Set("workload_runtime", string(props.WorkloadRuntime)) + + if v := props.WorkloadRuntime; v != nil { + d.Set("workload_runtime", string(*v)) + } evictionPolicy := "" - if props.ScaleSetEvictionPolicy != "" { - evictionPolicy = string(props.ScaleSetEvictionPolicy) + if v := props.ScaleSetEvictionPolicy; v != nil && *v != "" { + evictionPolicy = string(*v) } d.Set("eviction_policy", evictionPolicy) @@ -810,9 +829,9 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter } d.Set("min_count", minCount) - mode := string(containerservice.AgentPoolModeUser) - if props.Mode != "" { - mode = string(props.Mode) + mode := string(managedclusters.AgentPoolModeUser) + if v := props.Mode; v != nil && *v != "" { + mode = string(*props.Mode) } d.Set("mode", mode) @@ -845,19 +864,24 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter } d.Set("os_disk_size_gb", osDiskSizeGB) - osDiskType := containerservice.OSDiskTypeManaged - if props.OsDiskType != "" { - osDiskType = props.OsDiskType + osDiskType := agentpools.OSDiskTypeManaged + if v := props.OsDiskType; v != nil && *v != "" { + osDiskType = *v } d.Set("os_disk_type", osDiskType) - d.Set("os_type", string(props.OsType)) - d.Set("os_sku", string(props.OsSKU)) + + if v := props.OsType; v != nil { + d.Set("os_type", string(*v)) + } + if v := props.OsSKU; v != nil { + d.Set("os_sku", string(*v)) + } d.Set("pod_subnet_id", props.PodSubnetID) // not returned from the API if not Spot - priority := string(containerservice.ScaleSetPriorityRegular) - if props.ScaleSetPriority != "" { - priority = string(props.ScaleSetPriority) + priority := string(managedclusters.ScaleSetPriorityRegular) + if v := props.ScaleSetPriority; v != nil && *v != "" { + priority = string(*v) } d.Set("priority", priority) @@ -870,16 +894,16 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter d.Set("spot_max_price", spotMaxPrice) d.Set("vnet_subnet_id", props.VnetSubnetID) - d.Set("vm_size", props.VMSize) + d.Set("vm_size", props.VmSize) d.Set("host_group_id", props.HostGroupID) d.Set("capacity_reservation_group_id", props.CapacityReservationGroupID) - if err := d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings)); err != nil { + if err := d.Set("upgrade_settings", flattenAgentPoolUpgradeSettings(props.UpgradeSettings)); err != nil { return fmt.Errorf("setting `upgrade_settings`: %+v", err) } } - return tags.FlattenAndSet(d, resp.Tags) + return tags.FlattenAndSet(d, resp.Model.Properties.Tags) } func resourceKubernetesClusterNodePoolDelete(d *pluginsdk.ResourceData, meta interface{}) error { @@ -887,19 +911,20 @@ func resourceKubernetesClusterNodePoolDelete(d *pluginsdk.ResourceData, meta int ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.NodePoolID(d.Id()) + id, err := agentpools.ParseAgentPoolID(d.Id()) if err != nil { return err } ignorePodDisruptionBudget := true - - future, err := client.Delete(ctx, id.ResourceGroup, id.ManagedClusterName, id.AgentPoolName, &ignorePodDisruptionBudget) + future, err := client.Delete(ctx, *id, agentpools.DeleteOperationOptions{ + IgnorePodDisruptionBudget: &ignorePodDisruptionBudget, + }) if err != nil { return fmt.Errorf("deleting %s: %+v", *id, err) } - if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + if err := future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for the deletion of %s: %+v", id, err) } @@ -937,8 +962,49 @@ func upgradeSettingsForDataSourceSchema() *pluginsdk.Schema { } } -func expandUpgradeSettings(input []interface{}) *containerservice.AgentPoolUpgradeSettings { - setting := &containerservice.AgentPoolUpgradeSettings{} +func expandAgentPoolKubeletConfig(input []interface{}) *agentpools.KubeletConfig { + if len(input) == 0 || input[0] == nil { + return nil + } + + raw := input[0].(map[string]interface{}) + result := &agentpools.KubeletConfig{ + CpuCfsQuota: utils.Bool(raw["cpu_cfs_quota_enabled"].(bool)), + // must be false, otherwise the backend will report error: CustomKubeletConfig.FailSwapOn must be set to false to enable swap file on nodes. + FailSwapOn: utils.Bool(false), + AllowedUnsafeSysctls: utils.ExpandStringSlice(raw["allowed_unsafe_sysctls"].(*pluginsdk.Set).List()), + } + + if v := raw["cpu_manager_policy"].(string); v != "" { + result.CpuManagerPolicy = utils.String(v) + } + if v := raw["cpu_cfs_quota_period"].(string); v != "" { + result.CpuCfsQuotaPeriod = utils.String(v) + } + if v := raw["image_gc_high_threshold"].(int); v != 0 { + result.ImageGcHighThreshold = utils.Int64(int64(v)) + } + if v := raw["image_gc_low_threshold"].(int); v != 0 { + result.ImageGcLowThreshold = utils.Int64(int64(v)) + } + if v := raw["topology_manager_policy"].(string); v != "" { + result.TopologyManagerPolicy = utils.String(v) + } + if v := raw["container_log_max_size_mb"].(int); v != 0 { + result.ContainerLogMaxSizeMB = utils.Int64(int64(v)) + } + if v := raw["container_log_max_line"].(int); v != 0 { + result.ContainerLogMaxFiles = utils.Int64(int64(v)) + } + if v := raw["pod_max_pid"].(int); v != 0 { + result.PodMaxPids = utils.Int64(int64(v)) + } + + return result +} + +func expandAgentPoolUpgradeSettings(input []interface{}) *agentpools.AgentPoolUpgradeSettings { + setting := &agentpools.AgentPoolUpgradeSettings{} if len(input) == 0 || input[0] == nil { return setting } @@ -950,7 +1016,7 @@ func expandUpgradeSettings(input []interface{}) *containerservice.AgentPoolUpgra return setting } -func flattenUpgradeSettings(input *containerservice.AgentPoolUpgradeSettings) []interface{} { +func flattenAgentPoolUpgradeSettings(input *agentpools.AgentPoolUpgradeSettings) []interface{} { maxSurge := "" if input != nil && input.MaxSurge != nil { maxSurge = *input.MaxSurge @@ -966,3 +1032,331 @@ func flattenUpgradeSettings(input *containerservice.AgentPoolUpgradeSettings) [] }, } } + +func expandNodeLabels(input map[string]interface{}) *map[string]string { + result := make(map[string]string) + for k, v := range input { + result[k] = v.(string) + } + return &result +} + +func expandAgentPoolLinuxOSConfig(input []interface{}) (*agentpools.LinuxOSConfig, error) { + if len(input) == 0 || input[0] == nil { + return nil, nil + } + raw := input[0].(map[string]interface{}) + sysctlConfig, err := expandAgentPoolSysctlConfig(raw["sysctl_config"].([]interface{})) + if err != nil { + return nil, err + } + + result := &agentpools.LinuxOSConfig{ + Sysctls: sysctlConfig, + } + if v := raw["transparent_huge_page_enabled"].(string); v != "" { + result.TransparentHugePageEnabled = utils.String(v) + } + if v := raw["transparent_huge_page_defrag"].(string); v != "" { + result.TransparentHugePageDefrag = utils.String(v) + } + if v := raw["swap_file_size_mb"].(int); v != 0 { + result.SwapFileSizeMB = utils.Int64(int64(v)) + } + return result, nil +} + +func expandAgentPoolSysctlConfig(input []interface{}) (*agentpools.SysctlConfig, error) { + if len(input) == 0 || input[0] == nil { + return nil, nil + } + raw := input[0].(map[string]interface{}) + result := &agentpools.SysctlConfig{ + NetIPv4TcpTwReuse: utils.Bool(raw["net_ipv4_tcp_tw_reuse"].(bool)), + } + if v := raw["net_core_somaxconn"].(int); v != 0 { + result.NetCoreSomaxconn = utils.Int64(int64(v)) + } + if v := raw["net_core_netdev_max_backlog"].(int); v != 0 { + result.NetCoreNetdevMaxBacklog = utils.Int64(int64(v)) + } + if v := raw["net_core_rmem_default"].(int); v != 0 { + result.NetCoreRmemDefault = utils.Int64(int64(v)) + } + if v := raw["net_core_rmem_max"].(int); v != 0 { + result.NetCoreRmemMax = utils.Int64(int64(v)) + } + if v := raw["net_core_wmem_default"].(int); v != 0 { + result.NetCoreWmemDefault = utils.Int64(int64(v)) + } + if v := raw["net_core_wmem_max"].(int); v != 0 { + result.NetCoreWmemMax = utils.Int64(int64(v)) + } + if v := raw["net_core_optmem_max"].(int); v != 0 { + result.NetCoreOptmemMax = utils.Int64(int64(v)) + } + if v := raw["net_ipv4_tcp_max_syn_backlog"].(int); v != 0 { + result.NetIPv4TcpMaxSynBacklog = utils.Int64(int64(v)) + } + if v := raw["net_ipv4_tcp_max_tw_buckets"].(int); v != 0 { + result.NetIPv4TcpMaxTwBuckets = utils.Int64(int64(v)) + } + if v := raw["net_ipv4_tcp_fin_timeout"].(int); v != 0 { + result.NetIPv4TcpFinTimeout = utils.Int64(int64(v)) + } + if v := raw["net_ipv4_tcp_keepalive_time"].(int); v != 0 { + result.NetIPv4TcpKeepaliveTime = utils.Int64(int64(v)) + } + if v := raw["net_ipv4_tcp_keepalive_probes"].(int); v != 0 { + result.NetIPv4TcpKeepaliveProbes = utils.Int64(int64(v)) + } + if v := raw["net_ipv4_tcp_keepalive_intvl"].(int); v != 0 { + result.NetIPv4TcpkeepaliveIntvl = utils.Int64(int64(v)) + } + netIpv4IPLocalPortRangeMin := raw["net_ipv4_ip_local_port_range_min"].(int) + netIpv4IPLocalPortRangeMax := raw["net_ipv4_ip_local_port_range_max"].(int) + if (netIpv4IPLocalPortRangeMin != 0 && netIpv4IPLocalPortRangeMax == 0) || (netIpv4IPLocalPortRangeMin == 0 && netIpv4IPLocalPortRangeMax != 0) { + return nil, fmt.Errorf("`net_ipv4_ip_local_port_range_min` and `net_ipv4_ip_local_port_range_max` should both be set or unset") + } + if netIpv4IPLocalPortRangeMin > netIpv4IPLocalPortRangeMax { + return nil, fmt.Errorf("`net_ipv4_ip_local_port_range_min` should be no larger than `net_ipv4_ip_local_port_range_max`") + } + if netIpv4IPLocalPortRangeMin != 0 && netIpv4IPLocalPortRangeMax != 0 { + result.NetIPv4IPLocalPortRange = utils.String(fmt.Sprintf("%d %d", netIpv4IPLocalPortRangeMin, netIpv4IPLocalPortRangeMax)) + } + if v := raw["net_ipv4_neigh_default_gc_thresh1"].(int); v != 0 { + result.NetIPv4NeighDefaultGcThresh1 = utils.Int64(int64(v)) + } + if v := raw["net_ipv4_neigh_default_gc_thresh2"].(int); v != 0 { + result.NetIPv4NeighDefaultGcThresh2 = utils.Int64(int64(v)) + } + if v := raw["net_ipv4_neigh_default_gc_thresh3"].(int); v != 0 { + result.NetIPv4NeighDefaultGcThresh3 = utils.Int64(int64(v)) + } + if v := raw["net_netfilter_nf_conntrack_max"].(int); v != 0 { + result.NetNetfilterNfConntrackMax = utils.Int64(int64(v)) + } + if v := raw["net_netfilter_nf_conntrack_buckets"].(int); v != 0 { + result.NetNetfilterNfConntrackBuckets = utils.Int64(int64(v)) + } + if v := raw["fs_aio_max_nr"].(int); v != 0 { + result.FsAioMaxNr = utils.Int64(int64(v)) + } + if v := raw["fs_inotify_max_user_watches"].(int); v != 0 { + result.FsInotifyMaxUserWatches = utils.Int64(int64(v)) + } + if v := raw["fs_file_max"].(int); v != 0 { + result.FsFileMax = utils.Int64(int64(v)) + } + if v := raw["fs_nr_open"].(int); v != 0 { + result.FsNrOpen = utils.Int64(int64(v)) + } + if v := raw["kernel_threads_max"].(int); v != 0 { + result.KernelThreadsMax = utils.Int64(int64(v)) + } + if v := raw["vm_max_map_count"].(int); v != 0 { + result.VmMaxMapCount = utils.Int64(int64(v)) + } + if v := raw["vm_swappiness"].(int); v != 0 { + result.VmSwappiness = utils.Int64(int64(v)) + } + if v := raw["vm_vfs_cache_pressure"].(int); v != 0 { + result.VmVfsCachePressure = utils.Int64(int64(v)) + } + return result, nil +} + +func flattenAgentPoolLinuxOSConfig(input *agentpools.LinuxOSConfig) ([]interface{}, error) { + if input == nil { + return make([]interface{}, 0), nil + } + + var swapFileSizeMB int + if input.SwapFileSizeMB != nil { + swapFileSizeMB = int(*input.SwapFileSizeMB) + } + var transparentHugePageDefrag string + if input.TransparentHugePageDefrag != nil { + transparentHugePageDefrag = *input.TransparentHugePageDefrag + } + var transparentHugePageEnabled string + if input.TransparentHugePageEnabled != nil { + transparentHugePageEnabled = *input.TransparentHugePageEnabled + } + sysctlConfig, err := flattenAgentPoolSysctlConfig(input.Sysctls) + if err != nil { + return nil, err + } + return []interface{}{ + map[string]interface{}{ + "swap_file_size_mb": swapFileSizeMB, + "sysctl_config": sysctlConfig, + "transparent_huge_page_defrag": transparentHugePageDefrag, + "transparent_huge_page_enabled": transparentHugePageEnabled, + }, + }, nil +} + +func flattenAgentPoolSysctlConfig(input *agentpools.SysctlConfig) ([]interface{}, error) { + if input == nil { + return make([]interface{}, 0), nil + } + + var fsAioMaxNr int + if input.FsAioMaxNr != nil { + fsAioMaxNr = int(*input.FsAioMaxNr) + } + var fsFileMax int + if input.FsFileMax != nil { + fsFileMax = int(*input.FsFileMax) + } + var fsInotifyMaxUserWatches int + if input.FsInotifyMaxUserWatches != nil { + fsInotifyMaxUserWatches = int(*input.FsInotifyMaxUserWatches) + } + var fsNrOpen int + if input.FsNrOpen != nil { + fsNrOpen = int(*input.FsNrOpen) + } + var kernelThreadsMax int + if input.KernelThreadsMax != nil { + kernelThreadsMax = int(*input.KernelThreadsMax) + } + var netCoreNetdevMaxBacklog int + if input.NetCoreNetdevMaxBacklog != nil { + netCoreNetdevMaxBacklog = int(*input.NetCoreNetdevMaxBacklog) + } + var netCoreOptmemMax int + if input.NetCoreOptmemMax != nil { + netCoreOptmemMax = int(*input.NetCoreOptmemMax) + } + var netCoreRmemDefault int + if input.NetCoreRmemDefault != nil { + netCoreRmemDefault = int(*input.NetCoreRmemDefault) + } + var netCoreRmemMax int + if input.NetCoreRmemMax != nil { + netCoreRmemMax = int(*input.NetCoreRmemMax) + } + var netCoreSomaxconn int + if input.NetCoreSomaxconn != nil { + netCoreSomaxconn = int(*input.NetCoreSomaxconn) + } + var netCoreWmemDefault int + if input.NetCoreWmemDefault != nil { + netCoreWmemDefault = int(*input.NetCoreWmemDefault) + } + var netCoreWmemMax int + if input.NetCoreWmemMax != nil { + netCoreWmemMax = int(*input.NetCoreWmemMax) + } + var netIpv4IpLocalPortRangeMin, netIpv4IpLocalPortRangeMax int + if input.NetIPv4IPLocalPortRange != nil { + arr := regexp.MustCompile("[ \t]+").Split(*input.NetIPv4IPLocalPortRange, -1) + if len(arr) != 2 { + return nil, fmt.Errorf("parsing `NetIPv4IPLocalPortRange` %s", *input.NetIPv4IPLocalPortRange) + } + var err error + netIpv4IpLocalPortRangeMin, err = strconv.Atoi(arr[0]) + if err != nil { + return nil, err + } + netIpv4IpLocalPortRangeMax, err = strconv.Atoi(arr[1]) + if err != nil { + return nil, err + } + } + var netIpv4NeighDefaultGcThresh1 int + if input.NetIPv4NeighDefaultGcThresh1 != nil { + netIpv4NeighDefaultGcThresh1 = int(*input.NetIPv4NeighDefaultGcThresh1) + } + var netIpv4NeighDefaultGcThresh2 int + if input.NetIPv4NeighDefaultGcThresh2 != nil { + netIpv4NeighDefaultGcThresh2 = int(*input.NetIPv4NeighDefaultGcThresh2) + } + var netIpv4NeighDefaultGcThresh3 int + if input.NetIPv4NeighDefaultGcThresh3 != nil { + netIpv4NeighDefaultGcThresh3 = int(*input.NetIPv4NeighDefaultGcThresh3) + } + var netIpv4TcpFinTimeout int + if input.NetIPv4TcpFinTimeout != nil { + netIpv4TcpFinTimeout = int(*input.NetIPv4TcpFinTimeout) + } + var netIpv4TcpkeepaliveIntvl int + if input.NetIPv4TcpkeepaliveIntvl != nil { + netIpv4TcpkeepaliveIntvl = int(*input.NetIPv4TcpkeepaliveIntvl) + } + var netIpv4TcpKeepaliveProbes int + if input.NetIPv4TcpKeepaliveProbes != nil { + netIpv4TcpKeepaliveProbes = int(*input.NetIPv4TcpKeepaliveProbes) + } + var netIpv4TcpKeepaliveTime int + if input.NetIPv4TcpKeepaliveTime != nil { + netIpv4TcpKeepaliveTime = int(*input.NetIPv4TcpKeepaliveTime) + } + var netIpv4TcpMaxSynBacklog int + if input.NetIPv4TcpMaxSynBacklog != nil { + netIpv4TcpMaxSynBacklog = int(*input.NetIPv4TcpMaxSynBacklog) + } + var netIpv4TcpMaxTwBuckets int + if input.NetIPv4TcpMaxTwBuckets != nil { + netIpv4TcpMaxTwBuckets = int(*input.NetIPv4TcpMaxTwBuckets) + } + var netIpv4TcpTwReuse bool + if input.NetIPv4TcpTwReuse != nil { + netIpv4TcpTwReuse = *input.NetIPv4TcpTwReuse + } + var netNetfilterNfConntrackBuckets int + if input.NetNetfilterNfConntrackBuckets != nil { + netNetfilterNfConntrackBuckets = int(*input.NetNetfilterNfConntrackBuckets) + } + var netNetfilterNfConntrackMax int + if input.NetNetfilterNfConntrackMax != nil { + netNetfilterNfConntrackMax = int(*input.NetNetfilterNfConntrackMax) + } + var vmMaxMapCount int + if input.VmMaxMapCount != nil { + vmMaxMapCount = int(*input.VmMaxMapCount) + } + var vmSwappiness int + if input.VmSwappiness != nil { + vmSwappiness = int(*input.VmSwappiness) + } + var vmVfsCachePressure int + if input.VmVfsCachePressure != nil { + vmVfsCachePressure = int(*input.VmVfsCachePressure) + } + return []interface{}{ + map[string]interface{}{ + "fs_aio_max_nr": fsAioMaxNr, + "fs_file_max": fsFileMax, + "fs_inotify_max_user_watches": fsInotifyMaxUserWatches, + "fs_nr_open": fsNrOpen, + "kernel_threads_max": kernelThreadsMax, + "net_core_netdev_max_backlog": netCoreNetdevMaxBacklog, + "net_core_optmem_max": netCoreOptmemMax, + "net_core_rmem_default": netCoreRmemDefault, + "net_core_rmem_max": netCoreRmemMax, + "net_core_somaxconn": netCoreSomaxconn, + "net_core_wmem_default": netCoreWmemDefault, + "net_core_wmem_max": netCoreWmemMax, + "net_ipv4_ip_local_port_range_min": netIpv4IpLocalPortRangeMin, + "net_ipv4_ip_local_port_range_max": netIpv4IpLocalPortRangeMax, + "net_ipv4_neigh_default_gc_thresh1": netIpv4NeighDefaultGcThresh1, + "net_ipv4_neigh_default_gc_thresh2": netIpv4NeighDefaultGcThresh2, + "net_ipv4_neigh_default_gc_thresh3": netIpv4NeighDefaultGcThresh3, + "net_ipv4_tcp_fin_timeout": netIpv4TcpFinTimeout, + "net_ipv4_tcp_keepalive_intvl": netIpv4TcpkeepaliveIntvl, + "net_ipv4_tcp_keepalive_probes": netIpv4TcpKeepaliveProbes, + "net_ipv4_tcp_keepalive_time": netIpv4TcpKeepaliveTime, + "net_ipv4_tcp_max_syn_backlog": netIpv4TcpMaxSynBacklog, + "net_ipv4_tcp_max_tw_buckets": netIpv4TcpMaxTwBuckets, + "net_ipv4_tcp_tw_reuse": netIpv4TcpTwReuse, + "net_netfilter_nf_conntrack_buckets": netNetfilterNfConntrackBuckets, + "net_netfilter_nf_conntrack_max": netNetfilterNfConntrackMax, + "vm_max_map_count": vmMaxMapCount, + "vm_swappiness": vmSwappiness, + "vm_vfs_cache_pressure": vmVfsCachePressure, + }, + }, nil +} diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go index 7067512ba5d0..da8a1d48651c 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go @@ -8,10 +8,11 @@ import ( "strings" "testing" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -819,52 +820,53 @@ func TestAccKubernetesClusterNodePool_workloadRuntime(t *testing.T) { } func (t KubernetesClusterNodePoolResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.NodePoolID(state.ID) + id, err := agentpools.ParseAgentPoolID(state.ID) if err != nil { return nil, err } - resp, err := clients.Containers.AgentPoolsClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName, id.AgentPoolName) + resp, err := clients.Containers.AgentPoolsClient.Get(ctx, *id) if err != nil { return nil, fmt.Errorf("reading Kubernetes Cluster Node Pool (%s): %+v", id.String(), err) } - return utils.Bool(resp.ID != nil), nil + return utils.Bool(resp.Model != nil && resp.Model.Id != nil), nil } func (KubernetesClusterNodePoolResource) scaleNodePool(nodeCount int) acceptance.ClientCheckFunc { return func(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { nodePoolName := state.Attributes["name"] kubernetesClusterId := state.Attributes["kubernetes_cluster_id"] - parsedK8sId, err := parse.ClusterID(kubernetesClusterId) + parsedK8sId, err := managedclusters.ParseManagedClusterID(kubernetesClusterId) if err != nil { return fmt.Errorf("parsing kubernetes cluster id: %+v", err) } + parsedAgentPoolId := agentpools.NewAgentPoolID(parsedK8sId.SubscriptionId, parsedK8sId.ResourceGroupName, parsedK8sId.ResourceName, nodePoolName) - clusterName := parsedK8sId.ManagedClusterName - resourceGroup := parsedK8sId.ResourceGroup + clusterName := parsedK8sId.ResourceName + resourceGroup := parsedK8sId.ResourceGroupName - nodePool, err := clients.Containers.AgentPoolsClient.Get(ctx, resourceGroup, clusterName, nodePoolName) + nodePool, err := clients.Containers.AgentPoolsClient.Get(ctx, parsedAgentPoolId) if err != nil { return fmt.Errorf("Bad: Get on agentPoolsClient: %+v", err) } - if nodePool.StatusCode == http.StatusNotFound { + if nodePool.HttpResponse.StatusCode == http.StatusNotFound { return fmt.Errorf("Bad: Node Pool %q (Kubernetes Cluster %q / Resource Group: %q) does not exist", nodePoolName, clusterName, resourceGroup) } - if nodePool.ManagedClusterAgentPoolProfileProperties == nil { + if nodePool.Model == nil || nodePool.Model.Properties == nil { return fmt.Errorf("Bad: Node Pool %q (Kubernetes Cluster %q / Resource Group: %q): `properties` was nil", nodePoolName, clusterName, resourceGroup) } - nodePool.ManagedClusterAgentPoolProfileProperties.Count = utils.Int32(int32(nodeCount)) + nodePool.Model.Properties.Count = utils.Int64(int64(nodeCount)) - future, err := clients.Containers.AgentPoolsClient.CreateOrUpdate(ctx, resourceGroup, clusterName, nodePoolName, nodePool) + future, err := clients.Containers.AgentPoolsClient.CreateOrUpdate(ctx, parsedAgentPoolId, *nodePool.Model) if err != nil { return fmt.Errorf("Bad: updating node pool %q: %+v", nodePoolName, err) } - if err := future.WaitForCompletionRef(ctx, clients.Containers.AgentPoolsClient.Client); err != nil { + if err := future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("Bad: waiting for update of node pool %q: %+v", nodePoolName, err) } diff --git a/internal/services/containers/kubernetes_cluster_resource.go b/internal/services/containers/kubernetes_cluster_resource.go index 47f9a091e57c..184f8ac24372 100644 --- a/internal/services/containers/kubernetes_cluster_resource.go +++ b/internal/services/containers/kubernetes_cluster_resource.go @@ -2,18 +2,22 @@ package containers import ( "context" + "encoding/base64" "fmt" "log" "strconv" "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" - "github.com/Azure/go-autorest/autorest/date" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" "github.com/hashicorp/go-azure-helpers/resourcemanager/edgezones" "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/go-azure-helpers/resourcemanager/tags" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/go-azure-sdk/resource-manager/operationalinsights/2020-08-01/workspaces" "github.com/hashicorp/go-azure-sdk/resource-manager/privatedns/2018-09-01/privatezones" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -24,9 +28,7 @@ import ( computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/kubernetes" "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/migration" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/parse" containerValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/validate" - "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/suppress" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" @@ -42,7 +44,7 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Delete: resourceKubernetesClusterDelete, Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := parse.ClusterID(id) + _, err := managedclusters.ParseManagedClusterID(id) return err }), @@ -100,10 +102,10 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.UpgradeChannelPatch), - string(containerservice.UpgradeChannelRapid), - string(containerservice.UpgradeChannelStable), - string(containerservice.UpgradeChannelNodeImage), + string(managedclusters.UpgradeChannelPatch), + string(managedclusters.UpgradeChannelRapid), + string(managedclusters.UpgradeChannelStable), + string(managedclusters.UpgradeChannelNodeNegativeimage), }, false), }, @@ -124,10 +126,10 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.ExpanderLeastWaste), - string(containerservice.ExpanderMostPods), - string(containerservice.ExpanderPriority), - string(containerservice.ExpanderRandom), + string(managedclusters.ExpanderLeastNegativewaste), + string(managedclusters.ExpanderMostNegativepods), + string(managedclusters.ExpanderPriority), + string(managedclusters.ExpanderRandom), }, false), }, "max_graceful_termination_sec": { @@ -576,13 +578,13 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Type: pluginsdk.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.WeekDaySunday), - string(containerservice.WeekDayMonday), - string(containerservice.WeekDayTuesday), - string(containerservice.WeekDayWednesday), - string(containerservice.WeekDayThursday), - string(containerservice.WeekDayFriday), - string(containerservice.WeekDaySaturday), + string(maintenanceconfigurations.WeekDaySunday), + string(maintenanceconfigurations.WeekDayMonday), + string(maintenanceconfigurations.WeekDayTuesday), + string(maintenanceconfigurations.WeekDayWednesday), + string(maintenanceconfigurations.WeekDayThursday), + string(maintenanceconfigurations.WeekDayFriday), + string(maintenanceconfigurations.WeekDaySaturday), }, false), }, @@ -653,9 +655,9 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.NetworkPluginAzure), - string(containerservice.NetworkPluginKubenet), - string(containerservice.NetworkPluginNone), + string(managedclusters.NetworkPluginAzure), + string(managedclusters.NetworkPluginKubenet), + string(managedclusters.NetworkPluginNone), }, false), }, @@ -668,8 +670,8 @@ func resourceKubernetesCluster() *pluginsdk.Resource { // https://github.com/Azure/AKS/issues/1954#issuecomment-759306712 // Transparent is already the default and only option for CNI // Bridge is only kept for backward compatibility - string(containerservice.NetworkModeBridge), - string(containerservice.NetworkModeTransparent), + string(managedclusters.NetworkModeBridge), + string(managedclusters.NetworkModeTransparent), }, false), }, @@ -679,8 +681,8 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Computed: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.NetworkPolicyCalico), - string(containerservice.NetworkPolicyAzure), + string(managedclusters.NetworkPolicyCalico), + string(managedclusters.NetworkPolicyAzure), }, false), }, @@ -741,11 +743,11 @@ func resourceKubernetesCluster() *pluginsdk.Resource { "load_balancer_sku": { Type: pluginsdk.TypeString, Optional: true, - Default: string(containerservice.LoadBalancerSkuStandard), + Default: string(managedclusters.LoadBalancerSkuStandard), ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.LoadBalancerSkuBasic), - string(containerservice.LoadBalancerSkuStandard), + string(managedclusters.LoadBalancerSkuBasic), + string(managedclusters.LoadBalancerSkuStandard), }, false), }, @@ -753,12 +755,12 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: string(containerservice.OutboundTypeLoadBalancer), + Default: string(managedclusters.OutboundTypeLoadBalancer), ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.OutboundTypeLoadBalancer), - string(containerservice.OutboundTypeUserDefinedRouting), - string(containerservice.OutboundTypeManagedNATGateway), - string(containerservice.OutboundTypeUserAssignedNATGateway), + string(managedclusters.OutboundTypeLoadBalancer), + string(managedclusters.OutboundTypeUserDefinedRouting), + string(managedclusters.OutboundTypeManagedNATGateway), + string(managedclusters.OutboundTypeUserAssignedNATGateway), }, false), }, @@ -870,8 +872,8 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Elem: &pluginsdk.Schema{ Type: pluginsdk.TypeString, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.IPFamilyIPv4), - string(containerservice.IPFamilyIPv6), + string(managedclusters.IPFamilyIPvFour), + string(managedclusters.IPFamilyIPvSix), }, false), }, }, @@ -910,6 +912,7 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Type: pluginsdk.TypeBool, Optional: true, ForceNew: true, + Default: false, }, "private_cluster_public_fqdn_enabled": { @@ -978,14 +981,14 @@ func resourceKubernetesCluster() *pluginsdk.Resource { "sku_tier": { Type: pluginsdk.TypeString, Optional: true, - Default: string(containerservice.ManagedClusterSKUTierFree), + Default: string(managedclusters.ManagedClusterSKUTierFree), ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.ManagedClusterSKUTierFree), - string(containerservice.ManagedClusterSKUTierPaid), + string(managedclusters.ManagedClusterSKUTierFree), + string(managedclusters.ManagedClusterSKUTierPaid), }, false), }, - "tags": tags.Schema(), + "tags": commonschema.Tags(), "windows_profile": { Type: pluginsdk.TypeList, @@ -1009,7 +1012,7 @@ func resourceKubernetesCluster() *pluginsdk.Resource { Type: pluginsdk.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.LicenseTypeWindowsServer), + string(managedclusters.LicenseTypeWindowsServer), }, false), }, "gmsa": { @@ -1059,19 +1062,19 @@ func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{} log.Printf("[INFO] preparing arguments for Managed Kubernetes Cluster create.") - id := parse.NewClusterID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) - existing, err := client.Get(ctx, id.ResourceGroup, id.ManagedClusterName) + id := managedclusters.NewManagedClusterID(subscriptionId, d.Get("resource_group_name").(string), d.Get("name").(string)) + existing, err := client.Get(ctx, id) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { + if !response.WasNotFound(existing.HttpResponse) { return fmt.Errorf("checking for presence of existing %s: %+v", id, err) } } - if !utils.ResponseWasNotFound(existing.Response) { + if !response.WasNotFound(existing.HttpResponse) { return tf.ImportAsExistsError("azurerm_kubernetes_cluster", id.ID()) } - if err := validateKubernetesCluster(d, nil, id.ResourceGroup, id.ManagedClusterName); err != nil { + if err := validateKubernetesCluster(d, nil, id.ResourceGroupName, id.ResourceName); err != nil { return err } @@ -1093,13 +1096,13 @@ func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{} // supplied by the user which will result in a diff in some cases, so if versions have been supplied check that they // are identical agentProfile := ConvertDefaultNodePoolToAgentPool(agentProfiles) - if nodePoolVersion := agentProfile.ManagedClusterAgentPoolProfileProperties.OrchestratorVersion; nodePoolVersion != nil { + if nodePoolVersion := agentProfile.Properties.CurrentOrchestratorVersion; nodePoolVersion != nil { if kubernetesVersion != "" && kubernetesVersion != *nodePoolVersion { return fmt.Errorf("version mismatch between the control plane running %s and default node pool running %s, they must use the same kubernetes versions", kubernetesVersion, *nodePoolVersion) } } - var addonProfiles *map[string]*containerservice.ManagedClusterAddonProfile + var addonProfiles *map[string]managedclusters.ManagedClusterAddonProfile addOns := collectKubernetesAddons(d) addonProfiles, err = expandKubernetesAddOns(d, addOns, env) if err != nil { @@ -1112,7 +1115,7 @@ func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{} return err } - var azureADProfile *containerservice.ManagedClusterAADProfile + var azureADProfile *managedclusters.ManagedClusterAADProfile if v, ok := d.GetOk("azure_active_directory_role_based_access_control"); ok { azureADProfile, err = expandKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(v.([]interface{}), tenantId) if err != nil { @@ -1137,7 +1140,7 @@ func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{} return fmt.Errorf("`dns_prefix` should be set if it is not a private cluster") } - apiAccessProfile := containerservice.ManagedClusterAPIServerAccessProfile{ + apiAccessProfile := managedclusters.ManagedClusterAPIServerAccessProfile{ EnablePrivateCluster: &enablePrivateCluster, AuthorizedIPRanges: apiServerAuthorizedIPRanges, EnablePrivateClusterPublicFQDN: utils.Bool(d.Get("private_cluster_public_fqdn_enabled").(bool)), @@ -1157,15 +1160,15 @@ func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{} httpProxyConfig := expandKubernetesClusterHttpProxyConfig(httpProxyConfigRaw) enableOidcIssuer := false - var oidcIssuerProfile *containerservice.ManagedClusterOIDCIssuerProfile + var oidcIssuerProfile *managedclusters.ManagedClusterOIDCIssuerProfile if v, ok := d.GetOk("oidc_issuer_enabled"); ok { enableOidcIssuer = v.(bool) oidcIssuerProfile = expandKubernetesClusterOidcIssuerProfile(enableOidcIssuer) } - publicNetworkAccess := containerservice.PublicNetworkAccessEnabled + publicNetworkAccess := managedclusters.PublicNetworkAccessEnabled if !d.Get("public_network_access_enabled").(bool) { - publicNetworkAccess = containerservice.PublicNetworkAccessDisabled + publicNetworkAccess = managedclusters.PublicNetworkAccessDisabled } microsoftDefenderRaw := d.Get("microsoft_defender").([]interface{}) @@ -1180,36 +1183,36 @@ func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{} } if securityProfile == nil { - securityProfile = &containerservice.ManagedClusterSecurityProfile{} + securityProfile = &managedclusters.ManagedClusterSecurityProfile{} } - securityProfile.WorkloadIdentity = &containerservice.ManagedClusterSecurityProfileWorkloadIdentity{ + securityProfile.WorkloadIdentity = &managedclusters.ManagedClusterSecurityProfileWorkloadIdentity{ Enabled: &workloadIdentity, } } - parameters := containerservice.ManagedCluster{ - Name: utils.String(id.ManagedClusterName), + parameters := managedclusters.ManagedCluster{ + Name: utils.String(id.ResourceName), ExtendedLocation: expandEdgeZone(d.Get("edge_zone").(string)), - Location: utils.String(location), - Sku: &containerservice.ManagedClusterSKU{ - Name: containerservice.ManagedClusterSKUNameBasic, // the only possible value at this point - Tier: containerservice.ManagedClusterSKUTier(d.Get("sku_tier").(string)), + Location: location, + Sku: &managedclusters.ManagedClusterSKU{ + Name: utils.ToPtr(managedclusters.ManagedClusterSKUNameBasic), // the only possible value at this point + Tier: utils.ToPtr(managedclusters.ManagedClusterSKUTier(d.Get("sku_tier").(string))), }, - ManagedClusterProperties: &containerservice.ManagedClusterProperties{ - APIServerAccessProfile: &apiAccessProfile, + Properties: &managedclusters.ManagedClusterProperties{ + ApiServerAccessProfile: &apiAccessProfile, AadProfile: azureADProfile, - AddonProfiles: *addonProfiles, + AddonProfiles: addonProfiles, AgentPoolProfiles: agentProfiles, AutoScalerProfile: autoScalerProfile, - DNSPrefix: utils.String(dnsPrefix), + DnsPrefix: utils.String(dnsPrefix), EnableRBAC: utils.Bool(d.Get("role_based_access_control_enabled").(bool)), KubernetesVersion: utils.String(kubernetesVersion), LinuxProfile: linuxProfile, WindowsProfile: windowsProfile, NetworkProfile: networkProfile, NodeResourceGroup: utils.String(nodeResourceGroup), - PublicNetworkAccess: publicNetworkAccess, + PublicNetworkAccess: &publicNetworkAccess, DisableLocalAccounts: utils.Bool(d.Get("local_account_disabled").(bool)), HTTPProxyConfig: httpProxyConfig, OidcIssuerProfile: oidcIssuerProfile, @@ -1219,12 +1222,12 @@ func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{} } if v := d.Get("automatic_channel_upgrade").(string); v != "" { - parameters.ManagedClusterProperties.AutoUpgradeProfile = &containerservice.ManagedClusterAutoUpgradeProfile{ - UpgradeChannel: containerservice.UpgradeChannel(v), + parameters.Properties.AutoUpgradeProfile = &managedclusters.ManagedClusterAutoUpgradeProfile{ + UpgradeChannel: utils.ToPtr(managedclusters.UpgradeChannel(v)), } } else { - parameters.ManagedClusterProperties.AutoUpgradeProfile = &containerservice.ManagedClusterAutoUpgradeProfile{ - UpgradeChannel: containerservice.UpgradeChannelNone, + parameters.Properties.AutoUpgradeProfile = &managedclusters.ManagedClusterAutoUpgradeProfile{ + UpgradeChannel: utils.ToPtr(managedclusters.UpgradeChannelNone), } } @@ -1242,26 +1245,26 @@ func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{} return fmt.Errorf("expanding `identity`: %+v", err) } parameters.Identity = expandedIdentity - parameters.ManagedClusterProperties.ServicePrincipalProfile = &containerservice.ManagedClusterServicePrincipalProfile{ - ClientID: utils.String("msi"), + parameters.Properties.ServicePrincipalProfile = &managedclusters.ManagedClusterServicePrincipalProfile{ + ClientId: "msi", } } if len(kubernetesClusterIdentityRaw) > 0 { - parameters.ManagedClusterProperties.IdentityProfile = expandKubernetesClusterIdentityProfile(kubernetesClusterIdentityRaw) + parameters.Properties.IdentityProfile = expandKubernetesClusterIdentityProfile(kubernetesClusterIdentityRaw) } servicePrincipalSet := false if len(servicePrincipalProfileRaw) > 0 { servicePrincipalProfileVal := servicePrincipalProfileRaw[0].(map[string]interface{}) - parameters.ManagedClusterProperties.ServicePrincipalProfile = &containerservice.ManagedClusterServicePrincipalProfile{ - ClientID: utils.String(servicePrincipalProfileVal["client_id"].(string)), + parameters.Properties.ServicePrincipalProfile = &managedclusters.ManagedClusterServicePrincipalProfile{ + ClientId: servicePrincipalProfileVal["client_id"].(string), Secret: utils.String(servicePrincipalProfileVal["client_secret"].(string)), } servicePrincipalSet = true } if v, ok := d.GetOk("private_dns_zone_id"); ok { - if (parameters.Identity == nil && !servicePrincipalSet) || (v.(string) != "System" && v.(string) != "None" && (!servicePrincipalSet && parameters.Identity.Type != containerservice.ResourceIdentityTypeUserAssigned)) { + if (parameters.Identity == nil && !servicePrincipalSet) || (v.(string) != "System" && v.(string) != "None" && (!servicePrincipalSet && parameters.Identity.Type != identity.TypeUserAssigned)) { return fmt.Errorf("a user assigned identity or a service principal must be used when using a custom private dns zone") } apiAccessProfile.PrivateDNSZone = utils.String(v.(string)) @@ -1271,28 +1274,29 @@ func resourceKubernetesClusterCreate(d *pluginsdk.ResourceData, meta interface{} if !enablePrivateCluster || apiAccessProfile.PrivateDNSZone == nil || *apiAccessProfile.PrivateDNSZone == "System" || *apiAccessProfile.PrivateDNSZone == "None" { return fmt.Errorf("`dns_prefix_private_cluster` should only be set for private cluster with custom private dns zone") } - parameters.FqdnSubdomain = utils.String(v.(string)) + parameters.Properties.FqdnSubdomain = utils.String(v.(string)) } if v, ok := d.GetOk("disk_encryption_set_id"); ok && v.(string) != "" { - parameters.ManagedClusterProperties.DiskEncryptionSetID = utils.String(v.(string)) + parameters.Properties.DiskEncryptionSetID = utils.String(v.(string)) } - future, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ManagedClusterName, parameters) + future, err := client.CreateOrUpdate(ctx, id, parameters) if err != nil { return fmt.Errorf("creating %s: %+v", id, err) } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + if err = future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for creation of %s: %+v", id, err) } if maintenanceConfigRaw, ok := d.GetOk("maintenance_window"); ok { client := meta.(*clients.Client).Containers.MaintenanceConfigurationsClient - parameters := containerservice.MaintenanceConfiguration{ - MaintenanceConfigurationProperties: expandKubernetesClusterMaintenanceConfiguration(maintenanceConfigRaw.([]interface{})), + parameters := maintenanceconfigurations.MaintenanceConfiguration{ + Properties: expandKubernetesClusterMaintenanceConfiguration(maintenanceConfigRaw.([]interface{})), } - if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ManagedClusterName, "default", parameters); err != nil { + maintenanceId := maintenanceconfigurations.NewMaintenanceConfigurationID(id.SubscriptionId, id.ResourceGroupName, id.ResourceName, "default") + if _, err := client.CreateOrUpdate(ctx, maintenanceId, parameters); err != nil { return fmt.Errorf("creating/updating maintenance config for %s: %+v", id, err) } } @@ -1309,7 +1313,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.ClusterID(d.Id()) + id, err := managedclusters.ParseManagedClusterID(d.Id()) if err != nil { return err } @@ -1317,23 +1321,24 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} d.Partial(true) // we need to conditionally update the cluster - existing, err := clusterClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName) + existing, err := clusterClient.Get(ctx, *id) if err != nil { return fmt.Errorf("retrieving existing %s: %+v", *id, err) } - if existing.ManagedClusterProperties == nil { + if existing.Model == nil || existing.Model.Properties == nil { return fmt.Errorf("retrieving existing %s: `properties` was nil", *id) } + props := existing.Model.Properties - if err := validateKubernetesCluster(d, &existing, id.ResourceGroup, id.ManagedClusterName); err != nil { + if err := validateKubernetesCluster(d, existing.Model, id.ResourceGroupName, id.ResourceName); err != nil { return err } // when update, we should set the value of `Identity.UserAssignedIdentities` empty // otherwise the rest api will report error - this is tracked here: https://github.com/Azure/azure-rest-api-specs/issues/13631 - if existing.Identity != nil && existing.Identity.UserAssignedIdentities != nil { - for k := range existing.Identity.UserAssignedIdentities { - existing.Identity.UserAssignedIdentities[k] = &containerservice.ManagedClusterIdentityUserAssignedIdentitiesValue{} + if existing.Model.Identity != nil && existing.Model.Identity.IdentityIds != nil { + for k := range existing.Model.Identity.IdentityIds { + existing.Model.Identity.IdentityIds[k] = identity.UserAssignedIdentityDetails{} } } @@ -1345,26 +1350,26 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} clientId := servicePrincipalRaw["client_id"].(string) clientSecret := servicePrincipalRaw["client_secret"].(string) - params := containerservice.ManagedClusterServicePrincipalProfile{ - ClientID: utils.String(clientId), + params := managedclusters.ManagedClusterServicePrincipalProfile{ + ClientId: clientId, Secret: utils.String(clientSecret), } - future, err := clusterClient.ResetServicePrincipalProfile(ctx, id.ResourceGroup, id.ManagedClusterName, params) + future, err := clusterClient.ResetServicePrincipalProfile(ctx, *id, params) if err != nil { return fmt.Errorf("updating Service Principal for %s: %+v", *id, err) } - if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil { + if err = future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for update of Service Principal for %s: %+v", *id, err) } log.Printf("[DEBUG] Updated the Service Principal for %s.", *id) // since we're patching it, re-retrieve the latest version of the cluster - existing, err = clusterClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName) + existing, err = clusterClient.Get(ctx, *id) if err != nil { return fmt.Errorf("retrieving updated %s: %+v", *id, err) } - if existing.ManagedClusterProperties == nil { + if existing.Model == nil || existing.Model.Properties == nil { return fmt.Errorf("retrieving updated %s: `properties` was nil", *id) } } @@ -1374,7 +1379,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} // RBAC profile updates need to be handled atomically before any call to createUpdate as a diff there will create a PropertyChangeNotAllowed error if d.HasChange("role_based_access_control_enabled") { - props := existing.ManagedClusterProperties + // check if we can determine current EnableRBAC state - don't do anything destructive if we can't be sure if props.EnableRBAC == nil { return fmt.Errorf("updating %s: RBAC Enabled was nil", *id) @@ -1390,7 +1395,6 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} } if d.HasChange("azure_active_directory_role_based_access_control") { - props := existing.ManagedClusterProperties tenantId := meta.(*clients.Client).Account.TenantId azureADRaw := d.Get("azure_active_directory_role_based_access_control").([]interface{}) azureADProfile, err := expandKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(azureADRaw, tenantId) @@ -1401,18 +1405,18 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} props.AadProfile = azureADProfile if props.AadProfile != nil && (props.AadProfile.Managed == nil || !*props.AadProfile.Managed) { log.Printf("[DEBUG] Updating the RBAC AAD profile") - future, err := clusterClient.ResetAADProfile(ctx, id.ResourceGroup, id.ManagedClusterName, *props.AadProfile) + future, err := clusterClient.ResetAADProfile(ctx, *id, *props.AadProfile) if err != nil { return fmt.Errorf("updating Managed Kubernetes Cluster AAD Profile for %s: %+v", *id, err) } - if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil { + if err = future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for update of RBAC AAD profile of %s: %+v", *id, err) } } if props.AadProfile != nil && props.AadProfile.Managed != nil && *props.AadProfile.Managed { - existing.ManagedClusterProperties.AadProfile = azureADProfile + existing.Model.Properties.AadProfile = azureADProfile updateCluster = true } } @@ -1424,7 +1428,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if err != nil { return err } - existing.ManagedClusterProperties.AddonProfiles = *addonProfiles + existing.Model.Properties.AddonProfiles = addonProfiles } if d.HasChange("api_server_authorized_ip_ranges") { @@ -1435,26 +1439,26 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if v, ok := d.GetOk("private_cluster_enabled"); ok { enablePrivateCluster = v.(bool) } - existing.ManagedClusterProperties.APIServerAccessProfile = &containerservice.ManagedClusterAPIServerAccessProfile{ + existing.Model.Properties.ApiServerAccessProfile = &managedclusters.ManagedClusterAPIServerAccessProfile{ AuthorizedIPRanges: utils.ExpandStringSlice(apiServerAuthorizedIPRangesRaw), EnablePrivateCluster: &enablePrivateCluster, } if v, ok := d.GetOk("private_dns_zone_id"); ok { - existing.ManagedClusterProperties.APIServerAccessProfile.PrivateDNSZone = utils.String(v.(string)) + existing.Model.Properties.ApiServerAccessProfile.PrivateDNSZone = utils.String(v.(string)) } } if d.HasChange("private_cluster_public_fqdn_enabled") { updateCluster = true - existing.ManagedClusterProperties.APIServerAccessProfile.EnablePrivateClusterPublicFQDN = utils.Bool(d.Get("private_cluster_public_fqdn_enabled").(bool)) + existing.Model.Properties.ApiServerAccessProfile.EnablePrivateClusterPublicFQDN = utils.Bool(d.Get("private_cluster_public_fqdn_enabled").(bool)) } if d.HasChange("run_command_enabled") { updateCluster = true - if existing.ManagedClusterProperties.APIServerAccessProfile == nil { - existing.ManagedClusterProperties.APIServerAccessProfile = &containerservice.ManagedClusterAPIServerAccessProfile{} + if existing.Model.Properties.ApiServerAccessProfile == nil { + existing.Model.Properties.ApiServerAccessProfile = &managedclusters.ManagedClusterAPIServerAccessProfile{} } - existing.ManagedClusterProperties.APIServerAccessProfile.DisableRunCommand = utils.Bool(!d.Get("run_command_enabled").(bool)) + existing.Model.Properties.ApiServerAccessProfile.DisableRunCommand = utils.Bool(!d.Get("run_command_enabled").(bool)) } if d.HasChange("auto_scaler_profile") { @@ -1462,7 +1466,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} autoScalerProfileRaw := d.Get("auto_scaler_profile").([]interface{}) autoScalerProfile := expandKubernetesClusterAutoScalerProfile(autoScalerProfileRaw) - existing.ManagedClusterProperties.AutoScalerProfile = autoScalerProfile + existing.Model.Properties.AutoScalerProfile = autoScalerProfile } if d.HasChange("enable_pod_security_policy") && d.Get("enable_pod_security_policy").(bool) { @@ -1473,18 +1477,18 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} updateCluster = true linuxProfileRaw := d.Get("linux_profile").([]interface{}) linuxProfile := expandKubernetesClusterLinuxProfile(linuxProfileRaw) - existing.ManagedClusterProperties.LinuxProfile = linuxProfile + existing.Model.Properties.LinuxProfile = linuxProfile } if d.HasChange("local_account_disabled") { updateCluster = true - existing.ManagedClusterProperties.DisableLocalAccounts = utils.Bool(d.Get("local_account_disabled").(bool)) + existing.Model.Properties.DisableLocalAccounts = utils.Bool(d.Get("local_account_disabled").(bool)) } if d.HasChange("network_profile") { updateCluster = true - networkProfile := *existing.ManagedClusterProperties.NetworkProfile + networkProfile := *existing.Model.Properties.NetworkProfile if networkProfile.LoadBalancerProfile == nil && networkProfile.NatGatewayProfile == nil { // on of the profiles should be present @@ -1501,13 +1505,13 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if key := "network_profile.0.load_balancer_profile.0.idle_timeout_in_minutes"; d.HasChange(key) { idleTimeoutInMinutes := d.Get(key).(int) - loadBalancerProfile.IdleTimeoutInMinutes = utils.Int32(int32(idleTimeoutInMinutes)) + loadBalancerProfile.IdleTimeoutInMinutes = utils.Int64(int64(idleTimeoutInMinutes)) } if key := "network_profile.0.load_balancer_profile.0.managed_outbound_ip_count"; d.HasChange(key) { managedOutboundIPCount := d.Get(key).(int) - loadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{ - Count: utils.Int32(int32(managedOutboundIPCount)), + loadBalancerProfile.ManagedOutboundIPs = &managedclusters.ManagedClusterLoadBalancerProfileManagedOutboundIPs{ + Count: utils.Int64(int64(managedOutboundIPCount)), } // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs. @@ -1518,9 +1522,9 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if key := "network_profile.0.load_balancer_profile.0.managed_outbound_ipv6_count"; d.HasChange(key) { managedOutboundIPV6Count := d.Get(key).(int) if loadBalancerProfile.ManagedOutboundIPs == nil { - loadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{} + loadBalancerProfile.ManagedOutboundIPs = &managedclusters.ManagedClusterLoadBalancerProfileManagedOutboundIPs{} } - loadBalancerProfile.ManagedOutboundIPs.CountIPv6 = utils.Int32(int32(managedOutboundIPV6Count)) + loadBalancerProfile.ManagedOutboundIPs.CountIPv6 = utils.Int64(int64(managedOutboundIPV6Count)) // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs. loadBalancerProfile.OutboundIPs = nil @@ -1532,14 +1536,14 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if v := outboundIPAddress.(*pluginsdk.Set).List(); len(v) == 0 { // sending [] to unset `outbound_ip_address_ids` results in 400 / Bad Request // instead we default back to AKS managed outbound which is the default of the AKS API when nothing is provided - loadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{ - Count: utils.Int32(1), + loadBalancerProfile.ManagedOutboundIPs = &managedclusters.ManagedClusterLoadBalancerProfileManagedOutboundIPs{ + Count: utils.Int64(1), } loadBalancerProfile.OutboundIPs = nil loadBalancerProfile.OutboundIPPrefixes = nil } else { publicIPAddressIDs := idsToResourceReferences(d.Get(key)) - loadBalancerProfile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{ + loadBalancerProfile.OutboundIPs = &managedclusters.ManagedClusterLoadBalancerProfileOutboundIPs{ PublicIPs: publicIPAddressIDs, } @@ -1554,14 +1558,14 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if v := outboundIPPrefixes.(*pluginsdk.Set).List(); len(v) == 0 { // sending [] to unset `outbound_ip_address_ids` results in 400 / Bad Request // instead we default back to AKS managed outbound which is the default of the AKS API when nothing is specified - loadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{ - Count: utils.Int32(1), + loadBalancerProfile.ManagedOutboundIPs = &managedclusters.ManagedClusterLoadBalancerProfileManagedOutboundIPs{ + Count: utils.Int64(1), } loadBalancerProfile.OutboundIPs = nil loadBalancerProfile.OutboundIPPrefixes = nil } else { outboundIPPrefixIDs := idsToResourceReferences(d.Get(key)) - loadBalancerProfile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{ + loadBalancerProfile.OutboundIPPrefixes = &managedclusters.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{ PublicIPPrefixes: outboundIPPrefixIDs, } @@ -1573,10 +1577,10 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if key := "network_profile.0.load_balancer_profile.0.outbound_ports_allocated"; d.HasChange(key) { allocatedOutboundPorts := d.Get(key).(int) - loadBalancerProfile.AllocatedOutboundPorts = utils.Int32(int32(allocatedOutboundPorts)) + loadBalancerProfile.AllocatedOutboundPorts = utils.Int64(int64(allocatedOutboundPorts)) } - existing.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile = &loadBalancerProfile + existing.Model.Properties.NetworkProfile.LoadBalancerProfile = &loadBalancerProfile } if networkProfile.NatGatewayProfile != nil { @@ -1584,32 +1588,32 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if key := "network_profile.0.nat_gateway_profile.0.idle_timeout_in_minutes"; d.HasChange(key) { idleTimeoutInMinutes := d.Get(key).(int) - natGatewayProfile.IdleTimeoutInMinutes = utils.Int32(int32(idleTimeoutInMinutes)) + natGatewayProfile.IdleTimeoutInMinutes = utils.Int64(int64(idleTimeoutInMinutes)) } if key := "network_profile.0.nat_gateway_profile.0.managed_outbound_ip_count"; d.HasChange(key) { managedOutboundIPCount := d.Get(key).(int) - natGatewayProfile.ManagedOutboundIPProfile = &containerservice.ManagedClusterManagedOutboundIPProfile{ - Count: utils.Int32(int32(managedOutboundIPCount)), + natGatewayProfile.ManagedOutboundIPProfile = &managedclusters.ManagedClusterManagedOutboundIPProfile{ + Count: utils.Int64(int64(managedOutboundIPCount)), } natGatewayProfile.EffectiveOutboundIPs = nil } - existing.ManagedClusterProperties.NetworkProfile.NatGatewayProfile = &natGatewayProfile + existing.Model.Properties.NetworkProfile.NatGatewayProfile = &natGatewayProfile } } if d.HasChange("tags") { updateCluster = true t := d.Get("tags").(map[string]interface{}) - existing.Tags = tags.Expand(t) + existing.Model.Tags = tags.Expand(t) } if d.HasChange("windows_profile") { updateCluster = true windowsProfileRaw := d.Get("windows_profile").([]interface{}) windowsProfile := expandKubernetesClusterWindowsProfile(windowsProfileRaw) - existing.ManagedClusterProperties.WindowsProfile = windowsProfile + existing.Model.Properties.WindowsProfile = windowsProfile } if d.HasChange("identity") { @@ -1620,66 +1624,67 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if err != nil { return fmt.Errorf("expanding `identity`: %+v", err) } - existing.Identity = expandedIdentity + existing.Model.Identity = expandedIdentity } if d.HasChange("sku_tier") { updateCluster = true - if existing.Sku == nil { - existing.Sku = &containerservice.ManagedClusterSKU{ - Name: containerservice.ManagedClusterSKUNameBasic, + if existing.Model.Sku == nil { + basic := managedclusters.ManagedClusterSKUNameBasic + existing.Model.Sku = &managedclusters.ManagedClusterSKU{ + Name: &basic, } } - skuTier := containerservice.ManagedClusterSKUTierFree + skuTier := managedclusters.ManagedClusterSKUTierFree if v := d.Get("sku_tier").(string); v != "" { - skuTier = containerservice.ManagedClusterSKUTier(v) + skuTier = managedclusters.ManagedClusterSKUTier(v) } - existing.Sku.Tier = skuTier + existing.Model.Sku.Tier = &skuTier } if d.HasChange("automatic_channel_upgrade") { updateCluster = true - if existing.ManagedClusterProperties.AutoUpgradeProfile == nil { - existing.ManagedClusterProperties.AutoUpgradeProfile = &containerservice.ManagedClusterAutoUpgradeProfile{} + if existing.Model.Properties.AutoUpgradeProfile == nil { + existing.Model.Properties.AutoUpgradeProfile = &managedclusters.ManagedClusterAutoUpgradeProfile{} } - channel := containerservice.UpgradeChannelNone + channel := managedclusters.UpgradeChannelNone if v := d.Get("automatic_channel_upgrade").(string); v != "" { - channel = containerservice.UpgradeChannel(v) + channel = managedclusters.UpgradeChannel(v) } - existing.ManagedClusterProperties.AutoUpgradeProfile.UpgradeChannel = channel + existing.Model.Properties.AutoUpgradeProfile.UpgradeChannel = &channel } if d.HasChange("http_proxy_config") { updateCluster = true httpProxyConfigRaw := d.Get("http_proxy_config").([]interface{}) httpProxyConfig := expandKubernetesClusterHttpProxyConfig(httpProxyConfigRaw) - existing.ManagedClusterProperties.HTTPProxyConfig = httpProxyConfig + existing.Model.Properties.HTTPProxyConfig = httpProxyConfig } if d.HasChange("oidc_issuer_enabled") { updateCluster = true oidcIssuerEnabled := d.Get("oidc_issuer_enabled").(bool) oidcIssuerProfile := expandKubernetesClusterOidcIssuerProfile(oidcIssuerEnabled) - existing.ManagedClusterProperties.OidcIssuerProfile = oidcIssuerProfile + existing.Model.Properties.OidcIssuerProfile = oidcIssuerProfile } if d.HasChanges("microsoft_defender") { updateCluster = true microsoftDefenderRaw := d.Get("microsoft_defender").([]interface{}) microsoftDefender := expandKubernetesClusterMicrosoftDefender(d, microsoftDefenderRaw) - existing.ManagedClusterProperties.SecurityProfile = microsoftDefender + existing.Model.Properties.SecurityProfile = microsoftDefender } if d.HasChanges("workload_identity_enabled") { updateCluster = true workloadIdentity := d.Get("workload_identity_enabled").(bool) - if existing.ManagedClusterProperties.SecurityProfile == nil { - existing.ManagedClusterProperties.SecurityProfile = &containerservice.ManagedClusterSecurityProfile{} + if existing.Model.Properties.SecurityProfile == nil { + existing.Model.Properties.SecurityProfile = &managedclusters.ManagedClusterSecurityProfile{} } - existing.ManagedClusterProperties.SecurityProfile.WorkloadIdentity = &containerservice.ManagedClusterSecurityProfileWorkloadIdentity{ + existing.Model.Properties.SecurityProfile.WorkloadIdentity = &managedclusters.ManagedClusterSecurityProfileWorkloadIdentity{ Enabled: &workloadIdentity, } } @@ -1688,19 +1693,19 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} // If Defender was explicitly disabled in a prior update then we should strip SecurityProfile.AzureDefender from the request // body to prevent errors in cases where Defender is disabled for the entire subscription if !d.HasChanges("microsoft_defender") && len(d.Get("microsoft_defender").([]interface{})) == 0 { - if existing.ManagedClusterProperties.SecurityProfile == nil { - existing.ManagedClusterProperties.SecurityProfile = &containerservice.ManagedClusterSecurityProfile{} + if existing.Model.Properties.SecurityProfile == nil { + existing.Model.Properties.SecurityProfile = &managedclusters.ManagedClusterSecurityProfile{} } - existing.ManagedClusterProperties.SecurityProfile.AzureDefender = nil + existing.Model.Properties.SecurityProfile.Defender = nil } log.Printf("[DEBUG] Updating %s..", *id) - future, err := clusterClient.CreateOrUpdate(ctx, id.ResourceGroup, id.ManagedClusterName, existing) + future, err := clusterClient.CreateOrUpdate(ctx, *id, *existing.Model) if err != nil { return fmt.Errorf("updating %s: %+v", *id, err) } - if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil { + if err = future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for update of %s: %+v", *id, err) } log.Printf("[DEBUG] Updated %s..", *id) @@ -1708,24 +1713,24 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} // then roll the version of Kubernetes if necessary if d.HasChange("kubernetes_version") { - existing, err = clusterClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName) + existing, err = clusterClient.Get(ctx, *id) if err != nil { return fmt.Errorf("retrieving existing %s: %+v", *id, err) } - if existing.ManagedClusterProperties == nil { + if existing.Model == nil || existing.Model.Properties == nil { return fmt.Errorf("retrieving existing %s: `properties` was nil", *id) } kubernetesVersion := d.Get("kubernetes_version").(string) log.Printf("[DEBUG] Upgrading the version of Kubernetes to %q..", kubernetesVersion) - existing.ManagedClusterProperties.KubernetesVersion = utils.String(kubernetesVersion) + existing.Model.Properties.KubernetesVersion = utils.String(kubernetesVersion) - future, err := clusterClient.CreateOrUpdate(ctx, id.ResourceGroup, id.ManagedClusterName, existing) + future, err := clusterClient.CreateOrUpdate(ctx, *id, *existing.Model) if err != nil { return fmt.Errorf("updating Kubernetes Version for %s: %+v", *id, err) } - if err = future.WaitForCompletionRef(ctx, clusterClient.Client); err != nil { + if err = future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for update of %s: %+v", *id, err) } @@ -1742,16 +1747,16 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} } agentProfile := ConvertDefaultNodePoolToAgentPool(agentProfiles) - defaultNodePoolId := parse.NewNodePoolID(id.SubscriptionId, id.ResourceGroup, id.ManagedClusterName, *agentProfile.Name) + defaultNodePoolId := agentpools.NewAgentPoolID(id.SubscriptionId, id.ResourceGroupName, id.ResourceName, *agentProfile.Name) // if a users specified a version - confirm that version is supported on the cluster - if nodePoolVersion := agentProfile.ManagedClusterAgentPoolProfileProperties.OrchestratorVersion; nodePoolVersion != nil { - existingNodePool, err := nodePoolsClient.Get(ctx, defaultNodePoolId.ResourceGroup, defaultNodePoolId.ManagedClusterName, defaultNodePoolId.AgentPoolName) + if nodePoolVersion := agentProfile.Properties.CurrentOrchestratorVersion; nodePoolVersion != nil { + existingNodePool, err := nodePoolsClient.Get(ctx, defaultNodePoolId) if err != nil { return fmt.Errorf("retrieving Default Node Pool %s: %+v", defaultNodePoolId, err) } currentNodePoolVersion := "" - if v := existingNodePool.OrchestratorVersion; v != nil { + if v := existingNodePool.Model.Properties.OrchestratorVersion; v != nil { currentNodePoolVersion = *v } @@ -1760,12 +1765,12 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} } } - agentPool, err := nodePoolsClient.CreateOrUpdate(ctx, defaultNodePoolId.ResourceGroup, defaultNodePoolId.ManagedClusterName, defaultNodePoolId.AgentPoolName, agentProfile) + agentPool, err := nodePoolsClient.CreateOrUpdate(ctx, defaultNodePoolId, agentProfile) if err != nil { return fmt.Errorf("updating Default Node Pool %s %+v", defaultNodePoolId, err) } - if err := agentPool.WaitForCompletionRef(ctx, nodePoolsClient.Client); err != nil { + if err := agentPool.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for update of Default Node Pool %s: %+v", defaultNodePoolId, err) } log.Printf("[DEBUG] Updated Default Node Pool.") @@ -1773,10 +1778,11 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} if d.HasChange("maintenance_window") { client := meta.(*clients.Client).Containers.MaintenanceConfigurationsClient - parameters := containerservice.MaintenanceConfiguration{ - MaintenanceConfigurationProperties: expandKubernetesClusterMaintenanceConfiguration(d.Get("maintenance_window").([]interface{})), + parameters := maintenanceconfigurations.MaintenanceConfiguration{ + Properties: expandKubernetesClusterMaintenanceConfiguration(d.Get("maintenance_window").([]interface{})), } - if _, err := client.CreateOrUpdate(ctx, id.ResourceGroup, id.ManagedClusterName, "default", parameters); err != nil { + maintenanceId := maintenanceconfigurations.NewMaintenanceConfigurationID(id.SubscriptionId, id.ResourceGroupName, id.ResourceName, "default") + if _, err := client.CreateOrUpdate(ctx, maintenanceId, parameters); err != nil { return fmt.Errorf("creating/updating Maintenance Configuration for Managed Kubernetes Cluster (%q): %+v", id, err) } } @@ -1791,14 +1797,14 @@ func resourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.ClusterID(d.Id()) + id, err := managedclusters.ParseManagedClusterID(d.Id()) if err != nil { return err } - resp, err := client.Get(ctx, id.ResourceGroup, id.ManagedClusterName) + resp, err := client.Get(ctx, *id) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { log.Printf("[DEBUG] %s was not found - removing from state!", *id) d.SetId("") return nil @@ -1807,26 +1813,32 @@ func resourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) return fmt.Errorf("retrieving %s: %+v", *id, err) } - profile, err := client.GetAccessProfile(ctx, id.ResourceGroup, id.ManagedClusterName, "clusterUser") + respModel := resp.Model + if respModel == nil { + return fmt.Errorf("retrieving %s: no payload delivered", *id) + } + + accessProfileId := managedclusters.NewAccessProfileID(id.SubscriptionId, id.ResourceGroupName, id.ResourceName, "clusterUser") + profile, err := client.GetAccessProfile(ctx, accessProfileId) if err != nil { return fmt.Errorf("retrieving Access Profile for %s: %+v", *id, err) } - d.Set("name", id.ManagedClusterName) - d.Set("resource_group_name", id.ResourceGroup) - d.Set("edge_zone", flattenEdgeZone(resp.ExtendedLocation)) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) + d.Set("name", id.ResourceName) + d.Set("resource_group_name", id.ResourceGroupName) + d.Set("edge_zone", flattenEdgeZone(respModel.ExtendedLocation)) + if location := respModel.Location; location != "" { + d.Set("location", azure.NormalizeLocation(location)) } - skuTier := string(containerservice.ManagedClusterSKUTierFree) - if resp.Sku != nil && resp.Sku.Tier != "" { - skuTier = string(resp.Sku.Tier) + skuTier := string(managedclusters.ManagedClusterSKUTierFree) + if respModel.Sku != nil && respModel.Sku.Tier != nil && *respModel.Sku.Tier != "" { + skuTier = string(*respModel.Sku.Tier) } d.Set("sku_tier", skuTier) - if props := resp.ManagedClusterProperties; props != nil { - d.Set("dns_prefix", props.DNSPrefix) + if props := respModel.Properties; props != nil { + d.Set("dns_prefix", props.DnsPrefix) d.Set("dns_prefix_private_cluster", props.FqdnSubdomain) d.Set("fqdn", props.Fqdn) d.Set("private_fqdn", props.PrivateFQDN) @@ -1836,27 +1848,31 @@ func resourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) d.Set("node_resource_group", props.NodeResourceGroup) d.Set("enable_pod_security_policy", props.EnablePodSecurityPolicy) d.Set("local_account_disabled", props.DisableLocalAccounts) - d.Set("public_network_access_enabled", props.PublicNetworkAccess != containerservice.PublicNetworkAccessDisabled) + d.Set("public_network_access_enabled", *props.PublicNetworkAccess != managedclusters.PublicNetworkAccessDisabled) upgradeChannel := "" - if profile := props.AutoUpgradeProfile; profile != nil && profile.UpgradeChannel != containerservice.UpgradeChannelNone { - upgradeChannel = string(profile.UpgradeChannel) + if profile := props.AutoUpgradeProfile; profile != nil && *profile.UpgradeChannel != managedclusters.UpgradeChannelNone { + upgradeChannel = string(*profile.UpgradeChannel) } d.Set("automatic_channel_upgrade", upgradeChannel) - if accessProfile := props.APIServerAccessProfile; accessProfile != nil { + enablePrivateCluster := false + enablePrivateClusterPublicFQDN := false + runCommandEnabled := true + if accessProfile := props.ApiServerAccessProfile; accessProfile != nil { apiServerAuthorizedIPRanges := utils.FlattenStringSlice(accessProfile.AuthorizedIPRanges) if err := d.Set("api_server_authorized_ip_ranges", apiServerAuthorizedIPRanges); err != nil { return fmt.Errorf("setting `api_server_authorized_ip_ranges`: %+v", err) } - - d.Set("private_cluster_enabled", accessProfile.EnablePrivateCluster) - d.Set("private_cluster_public_fqdn_enabled", accessProfile.EnablePrivateClusterPublicFQDN) - runCommandEnabled := true + if accessProfile.EnablePrivateCluster != nil { + enablePrivateCluster = *accessProfile.EnablePrivateCluster + } + if accessProfile.EnablePrivateClusterPublicFQDN != nil { + enablePrivateClusterPublicFQDN = *accessProfile.EnablePrivateClusterPublicFQDN + } if accessProfile.DisableRunCommand != nil { runCommandEnabled = !*accessProfile.DisableRunCommand } - d.Set("run_command_enabled", runCommandEnabled) switch { case accessProfile.PrivateDNSZone != nil && strings.EqualFold("System", *accessProfile.PrivateDNSZone): d.Set("private_dns_zone_id", "System") @@ -1867,16 +1883,21 @@ func resourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) } } - addOns := flattenKubernetesAddOns(props.AddonProfiles) - d.Set("aci_connector_linux", addOns["aci_connector_linux"]) - d.Set("azure_policy_enabled", addOns["azure_policy_enabled"].(bool)) - d.Set("http_application_routing_enabled", addOns["http_application_routing_enabled"].(bool)) - d.Set("http_application_routing_zone_name", addOns["http_application_routing_zone_name"]) - d.Set("oms_agent", addOns["oms_agent"]) - d.Set("ingress_application_gateway", addOns["ingress_application_gateway"]) - d.Set("open_service_mesh_enabled", addOns["open_service_mesh_enabled"].(bool)) - d.Set("key_vault_secrets_provider", addOns["key_vault_secrets_provider"]) + d.Set("private_cluster_enabled", enablePrivateCluster) + d.Set("private_cluster_public_fqdn_enabled", enablePrivateClusterPublicFQDN) + d.Set("run_command_enabled", runCommandEnabled) + if props.AddonProfiles != nil { + addOns := flattenKubernetesAddOns(*props.AddonProfiles) + d.Set("aci_connector_linux", addOns["aci_connector_linux"]) + d.Set("azure_policy_enabled", addOns["azure_policy_enabled"].(bool)) + d.Set("http_application_routing_enabled", addOns["http_application_routing_enabled"].(bool)) + d.Set("http_application_routing_zone_name", addOns["http_application_routing_zone_name"]) + d.Set("oms_agent", addOns["oms_agent"]) + d.Set("ingress_application_gateway", addOns["ingress_application_gateway"]) + d.Set("open_service_mesh_enabled", addOns["open_service_mesh_enabled"].(bool)) + d.Set("key_vault_secrets_provider", addOns["key_vault_secrets_provider"]) + } autoScalerProfile, err := flattenKubernetesClusterAutoScalerProfile(props.AutoScalerProfile) if err != nil { return err @@ -1893,10 +1914,14 @@ func resourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) return fmt.Errorf("setting `default_node_pool`: %+v", err) } - kubeletIdentity, err := flattenKubernetesClusterIdentityProfile(props.IdentityProfile) - if err != nil { - return err + kubeletIdentity := []interface{}{} + if identityProfile := props.IdentityProfile; identityProfile != nil { + kubeletIdentity, err = flattenKubernetesClusterIdentityProfile(*props.IdentityProfile) + if err != nil { + return err + } } + if err := d.Set("kubelet_identity", kubeletIdentity); err != nil { return fmt.Errorf("setting `kubelet_identity`: %+v", err) } @@ -1964,12 +1989,17 @@ func resourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) // adminProfile is only available for RBAC enabled clusters with AAD and local account is not disabled if props.AadProfile != nil && (props.DisableLocalAccounts == nil || !*props.DisableLocalAccounts) { - adminProfile, err := client.GetAccessProfile(ctx, id.ResourceGroup, id.ManagedClusterName, "clusterAdmin") + + accessProfileId := managedclusters.NewAccessProfileID(id.SubscriptionId, id.ResourceGroupName, id.ResourceName, "clusterAdmin") + adminProfile, err := client.GetAccessProfile(ctx, accessProfileId) if err != nil { - return fmt.Errorf("retrieving Admin Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): %+v", id.ManagedClusterName, id.ResourceGroup, err) + return fmt.Errorf("retrieving Admin Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): %+v", id.ResourceName, id.ResourceGroupName, err) } - adminKubeConfigRaw, adminKubeConfig := flattenKubernetesClusterAccessProfile(adminProfile) + if adminProfile.Model == nil { + return fmt.Errorf("retrieving Admin Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): no payload found", id.ResourceName, id.ResourceGroupName) + } + adminKubeConfigRaw, adminKubeConfig := flattenKubernetesClusterAccessProfile(*adminProfile.Model) d.Set("kube_admin_config_raw", adminKubeConfigRaw) if err := d.Set("kube_admin_config", adminKubeConfig); err != nil { return fmt.Errorf("setting `kube_admin_config`: %+v", err) @@ -1980,7 +2010,7 @@ func resourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) } } - identity, err := flattenClusterIdentity(resp.Identity) + identity, err := identity.FlattenSystemOrUserAssignedMap(respModel.Identity) if err != nil { return fmt.Errorf("setting `identity`: %+v", err) } @@ -1989,19 +2019,20 @@ func resourceKubernetesClusterRead(d *pluginsdk.ResourceData, meta interface{}) return fmt.Errorf("setting `identity`: %+v", err) } - kubeConfigRaw, kubeConfig := flattenKubernetesClusterAccessProfile(profile) + kubeConfigRaw, kubeConfig := flattenKubernetesClusterAccessProfile(*profile.Model) d.Set("kube_config_raw", kubeConfigRaw) if err := d.Set("kube_config", kubeConfig); err != nil { return fmt.Errorf("setting `kube_config`: %+v", err) } maintenanceConfigurationsClient := meta.(*clients.Client).Containers.MaintenanceConfigurationsClient - configResp, _ := maintenanceConfigurationsClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName, "default") - if props := configResp.MaintenanceConfigurationProperties; props != nil { - d.Set("maintenance_window", flattenKubernetesClusterMaintenanceConfiguration(props)) + maintenanceId := maintenanceconfigurations.NewMaintenanceConfigurationID(id.SubscriptionId, id.ResourceGroupName, id.ResourceName, "default") + configResp, _ := maintenanceConfigurationsClient.Get(ctx, maintenanceId) + if configurationBody := configResp.Model; configurationBody != nil && configurationBody.Properties != nil { + d.Set("maintenance_window", flattenKubernetesClusterMaintenanceConfiguration(configurationBody.Properties)) } - return tags.FlattenAndSet(d, resp.Tags) + return tags.FlattenAndSet(d, respModel.Tags) } func resourceKubernetesClusterDelete(d *pluginsdk.ResourceData, meta interface{}) error { @@ -2009,36 +2040,41 @@ func resourceKubernetesClusterDelete(d *pluginsdk.ResourceData, meta interface{} ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.ClusterID(d.Id()) + id, err := managedclusters.ParseManagedClusterID(d.Id()) if err != nil { return err } if _, ok := d.GetOk("maintenance_window"); ok { client := meta.(*clients.Client).Containers.MaintenanceConfigurationsClient - if _, err := client.Delete(ctx, id.ResourceGroup, id.ManagedClusterName, "default"); err != nil { + maintenanceId := maintenanceconfigurations.NewMaintenanceConfigurationID(id.SubscriptionId, id.ResourceGroupName, id.ResourceName, "default") + if _, err := client.Delete(ctx, maintenanceId); err != nil { return fmt.Errorf("deleting Maintenance Configuration for %s: %+v", *id, err) } } ignorePodDisruptionBudget := true - - future, err := client.Delete(ctx, id.ResourceGroup, id.ManagedClusterName, &ignorePodDisruptionBudget) + future, err := client.Delete(ctx, *id, managedclusters.DeleteOperationOptions{ + IgnorePodDisruptionBudget: &ignorePodDisruptionBudget, + }) if err != nil { return fmt.Errorf("deleting %s: %+v", *id, err) } - if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + if err := future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("waiting for the deletion of %s: %+v", *id, err) } return nil } -func flattenKubernetesClusterAccessProfile(profile containerservice.ManagedClusterAccessProfile) (*string, []interface{}) { - if accessProfile := profile.AccessProfile; accessProfile != nil { +func flattenKubernetesClusterAccessProfile(profile managedclusters.ManagedClusterAccessProfile) (*string, []interface{}) { + if accessProfile := profile.Properties; accessProfile != nil { if kubeConfigRaw := accessProfile.KubeConfig; kubeConfigRaw != nil { - rawConfig := string(*kubeConfigRaw) + rawConfig := *kubeConfigRaw + if base64IsEncoded(*kubeConfigRaw) { + rawConfig, _ = base64Decode(*kubeConfigRaw) + } var flattenedKubeConfig []interface{} if strings.Contains(rawConfig, "apiserver-id:") || strings.Contains(rawConfig, "exec") { @@ -2063,7 +2099,7 @@ func flattenKubernetesClusterAccessProfile(profile containerservice.ManagedClust return nil, []interface{}{} } -func expandKubernetesClusterLinuxProfile(input []interface{}) *containerservice.LinuxProfile { +func expandKubernetesClusterLinuxProfile(input []interface{}) *managedclusters.ContainerServiceLinuxProfile { if len(input) == 0 { return nil } @@ -2078,56 +2114,56 @@ func expandKubernetesClusterLinuxProfile(input []interface{}) *containerservice. keyData = key["key_data"].(string) } - return &containerservice.LinuxProfile{ - AdminUsername: &adminUsername, - SSH: &containerservice.SSHConfiguration{ - PublicKeys: &[]containerservice.SSHPublicKey{ + return &managedclusters.ContainerServiceLinuxProfile{ + AdminUsername: adminUsername, + Ssh: managedclusters.ContainerServiceSshConfiguration{ + PublicKeys: []managedclusters.ContainerServiceSshPublicKey{ { - KeyData: &keyData, + KeyData: keyData, }, }, }, } } -func expandKubernetesClusterIdentityProfile(input []interface{}) map[string]*containerservice.UserAssignedIdentity { - identityProfile := make(map[string]*containerservice.UserAssignedIdentity) +func expandKubernetesClusterIdentityProfile(input []interface{}) *map[string]managedclusters.UserAssignedIdentity { + identityProfile := make(map[string]managedclusters.UserAssignedIdentity) if len(input) == 0 || input[0] == nil { - return identityProfile + return &identityProfile } values := input[0].(map[string]interface{}) - if containerservice.ResourceIdentityType(values["user_assigned_identity_id"].(string)) != "" { - identityProfile["kubeletidentity"] = &containerservice.UserAssignedIdentity{ - ResourceID: utils.String(values["user_assigned_identity_id"].(string)), - ClientID: utils.String(values["client_id"].(string)), - ObjectID: utils.String(values["object_id"].(string)), + if identity.Type(values["user_assigned_identity_id"].(string)) != "" { + identityProfile["kubeletidentity"] = managedclusters.UserAssignedIdentity{ + ResourceId: utils.String(values["user_assigned_identity_id"].(string)), + ClientId: utils.String(values["client_id"].(string)), + ObjectId: utils.String(values["object_id"].(string)), } } - return identityProfile + return &identityProfile } -func flattenKubernetesClusterIdentityProfile(profile map[string]*containerservice.UserAssignedIdentity) ([]interface{}, error) { +func flattenKubernetesClusterIdentityProfile(profile map[string]managedclusters.UserAssignedIdentity) ([]interface{}, error) { if profile == nil { return []interface{}{}, nil } kubeletIdentity := make([]interface{}, 0) - if kubeletidentity := profile["kubeletidentity"]; kubeletidentity != nil { + if kubeletidentity, ok := profile["kubeletidentity"]; ok { clientId := "" - if clientid := kubeletidentity.ClientID; clientid != nil { + if clientid := kubeletidentity.ClientId; clientid != nil { clientId = *clientid } objectId := "" - if objectid := kubeletidentity.ObjectID; objectid != nil { + if objectid := kubeletidentity.ObjectId; objectid != nil { objectId = *objectid } userAssignedIdentityId := "" - if resourceid := kubeletidentity.ResourceID; resourceid != nil { + if resourceid := kubeletidentity.ResourceId; resourceid != nil { parsedId, err := commonids.ParseUserAssignedIdentityIDInsensitively(*resourceid) if err != nil { return nil, err @@ -2146,29 +2182,26 @@ func flattenKubernetesClusterIdentityProfile(profile map[string]*containerservic return kubeletIdentity, nil } -func flattenKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile) []interface{} { +func flattenKubernetesClusterLinuxProfile(profile *managedclusters.ContainerServiceLinuxProfile) []interface{} { if profile == nil { return []interface{}{} } - adminUsername := "" - if username := profile.AdminUsername; username != nil { - adminUsername = *username - } + adminUsername := profile.AdminUsername sshKeys := make([]interface{}, 0) - if ssh := profile.SSH; ssh != nil { - if keys := ssh.PublicKeys; keys != nil { - for _, sshKey := range *keys { - keyData := "" - if kd := sshKey.KeyData; kd != nil { - keyData = *kd - } - sshKeys = append(sshKeys, map[string]interface{}{ - "key_data": keyData, - }) + ssh := profile.Ssh + if keys := ssh.PublicKeys; keys != nil { + for _, sshKey := range keys { + keyData := "" + if kd := sshKey.KeyData; kd != "" { + keyData = kd } + sshKeys = append(sshKeys, map[string]interface{}{ + "key_data": keyData, + }) } + } return []interface{}{ @@ -2179,51 +2212,48 @@ func flattenKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile } } -func expandKubernetesClusterWindowsProfile(input []interface{}) *containerservice.ManagedClusterWindowsProfile { +func expandKubernetesClusterWindowsProfile(input []interface{}) *managedclusters.ManagedClusterWindowsProfile { if len(input) == 0 { return nil } config := input[0].(map[string]interface{}) - license := containerservice.LicenseTypeNone + license := managedclusters.LicenseTypeNone if v := config["license"].(string); v != "" { - license = containerservice.LicenseType(v) + license = managedclusters.LicenseType(v) } gmsaProfile := expandGmsaProfile(config["gmsa"].([]interface{})) - return &containerservice.ManagedClusterWindowsProfile{ - AdminUsername: utils.String(config["admin_username"].(string)), + return &managedclusters.ManagedClusterWindowsProfile{ + AdminUsername: config["admin_username"].(string), AdminPassword: utils.String(config["admin_password"].(string)), - LicenseType: license, + LicenseType: &license, GmsaProfile: gmsaProfile, } } -func expandGmsaProfile(input []interface{}) *containerservice.WindowsGmsaProfile { +func expandGmsaProfile(input []interface{}) *managedclusters.WindowsGmsaProfile { if len(input) == 0 { return nil } config := input[0].(map[string]interface{}) - return &containerservice.WindowsGmsaProfile{ + return &managedclusters.WindowsGmsaProfile{ Enabled: utils.Bool(true), - DNSServer: utils.String(config["dns_server"].(string)), + DnsServer: utils.String(config["dns_server"].(string)), RootDomainName: utils.String(config["root_domain"].(string)), } } -func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClusterWindowsProfile, d *pluginsdk.ResourceData) []interface{} { +func flattenKubernetesClusterWindowsProfile(profile *managedclusters.ManagedClusterWindowsProfile, d *pluginsdk.ResourceData) []interface{} { if profile == nil { return []interface{}{} } - adminUsername := "" - if username := profile.AdminUsername; username != nil { - adminUsername = *username - } + adminUsername := profile.AdminUsername // admin password isn't returned, so let's look it up adminPassword := "" @@ -2232,8 +2262,8 @@ func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClu } license := "" - if profile.LicenseType != containerservice.LicenseTypeNone { - license = string(profile.LicenseType) + if profile.LicenseType != nil && *profile.LicenseType != managedclusters.LicenseTypeNone { + license = string(*profile.LicenseType) } gmsaProfile := flattenGmsaProfile(profile.GmsaProfile) @@ -2248,13 +2278,13 @@ func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClu } } -func flattenGmsaProfile(profile *containerservice.WindowsGmsaProfile) []interface{} { +func flattenGmsaProfile(profile *managedclusters.WindowsGmsaProfile) []interface{} { if profile == nil { return []interface{}{} } dnsServer := "" - if dns := profile.DNSServer; dns != nil { + if dns := profile.DnsServer; dns != nil { dnsServer = *dns } @@ -2271,7 +2301,7 @@ func flattenGmsaProfile(profile *containerservice.WindowsGmsaProfile) []interfac } } -func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservice.NetworkProfile, error) { +func expandKubernetesClusterNetworkProfile(input []interface{}) (*managedclusters.ContainerServiceNetworkProfile, error) { if len(input) == 0 { return nil, nil } @@ -2293,12 +2323,12 @@ func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservi return nil, err } - networkProfile := containerservice.NetworkProfile{ - NetworkPlugin: containerservice.NetworkPlugin(networkPlugin), - NetworkMode: containerservice.NetworkMode(networkMode), - NetworkPolicy: containerservice.NetworkPolicy(networkPolicy), - LoadBalancerSku: containerservice.LoadBalancerSku(loadBalancerSku), - OutboundType: containerservice.OutboundType(outboundType), + networkProfile := managedclusters.ContainerServiceNetworkProfile{ + NetworkPlugin: utils.ToPtr(managedclusters.NetworkPlugin(networkPlugin)), + NetworkMode: utils.ToPtr(managedclusters.NetworkMode(networkMode)), + NetworkPolicy: utils.ToPtr(managedclusters.NetworkPolicy(networkPolicy)), + LoadBalancerSku: utils.ToPtr(managedclusters.LoadBalancerSku(loadBalancerSku)), + OutboundType: utils.ToPtr(managedclusters.OutboundType(outboundType)), IPFamilies: ipVersions, } @@ -2320,7 +2350,7 @@ func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservi if v, ok := config["dns_service_ip"]; ok && v.(string) != "" { dnsServiceIP := v.(string) - networkProfile.DNSServiceIP = utils.String(dnsServiceIP) + networkProfile.DnsServiceIP = utils.String(dnsServiceIP) } if v, ok := config["pod_cidr"]; ok && v.(string) != "" { @@ -2349,99 +2379,99 @@ func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservi return &networkProfile, nil } -func expandLoadBalancerProfile(d []interface{}) *containerservice.ManagedClusterLoadBalancerProfile { +func expandLoadBalancerProfile(d []interface{}) *managedclusters.ManagedClusterLoadBalancerProfile { if d[0] == nil { return nil } config := d[0].(map[string]interface{}) - profile := &containerservice.ManagedClusterLoadBalancerProfile{} + profile := &managedclusters.ManagedClusterLoadBalancerProfile{} if mins, ok := config["idle_timeout_in_minutes"]; ok && mins.(int) != 0 { - profile.IdleTimeoutInMinutes = utils.Int32(int32(mins.(int))) + profile.IdleTimeoutInMinutes = utils.Int64(int64(mins.(int))) } if port, ok := config["outbound_ports_allocated"].(int); ok { - profile.AllocatedOutboundPorts = utils.Int32(int32(port)) + profile.AllocatedOutboundPorts = utils.Int64(int64(port)) } if ipCount := config["managed_outbound_ip_count"]; ipCount != nil { - if c := int32(ipCount.(int)); c > 0 { - profile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{Count: &c} + if c := int64(ipCount.(int)); c > 0 { + profile.ManagedOutboundIPs = &managedclusters.ManagedClusterLoadBalancerProfileManagedOutboundIPs{Count: &c} } } if ipv6Count := config["managed_outbound_ipv6_count"]; ipv6Count != nil { - if c := int32(ipv6Count.(int)); c > 0 { + if c := int64(ipv6Count.(int)); c > 0 { if profile.ManagedOutboundIPs == nil { - profile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{} + profile.ManagedOutboundIPs = &managedclusters.ManagedClusterLoadBalancerProfileManagedOutboundIPs{} } profile.ManagedOutboundIPs.CountIPv6 = &c } } if ipPrefixes := idsToResourceReferences(config["outbound_ip_prefix_ids"]); ipPrefixes != nil { - profile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{PublicIPPrefixes: ipPrefixes} + profile.OutboundIPPrefixes = &managedclusters.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{PublicIPPrefixes: ipPrefixes} } if outIps := idsToResourceReferences(config["outbound_ip_address_ids"]); outIps != nil { - profile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{PublicIPs: outIps} + profile.OutboundIPs = &managedclusters.ManagedClusterLoadBalancerProfileOutboundIPs{PublicIPs: outIps} } return profile } -func expandIPVersions(input []interface{}) (*[]containerservice.IPFamily, error) { +func expandIPVersions(input []interface{}) (*[]managedclusters.IPFamily, error) { if len(input) == 0 { return nil, nil } - ipv := make([]containerservice.IPFamily, 0) + ipv := make([]managedclusters.IPFamily, 0) for _, data := range input { - ipv = append(ipv, containerservice.IPFamily(data.(string))) + ipv = append(ipv, managedclusters.IPFamily(data.(string))) } - if len(ipv) == 1 && ipv[0] == containerservice.IPFamilyIPv6 { + if len(ipv) == 1 && ipv[0] == managedclusters.IPFamilyIPvSix { return nil, fmt.Errorf("`ip_versions` must be `IPv4` or `IPv4` and `IPv6`. `IPv6` alone is not supported") } return &ipv, nil } -func expandNatGatewayProfile(d []interface{}) *containerservice.ManagedClusterNATGatewayProfile { +func expandNatGatewayProfile(d []interface{}) *managedclusters.ManagedClusterNATGatewayProfile { if d[0] == nil { return nil } config := d[0].(map[string]interface{}) - profile := &containerservice.ManagedClusterNATGatewayProfile{} + profile := &managedclusters.ManagedClusterNATGatewayProfile{} if mins, ok := config["idle_timeout_in_minutes"]; ok && mins.(int) != 0 { - profile.IdleTimeoutInMinutes = utils.Int32(int32(mins.(int))) + profile.IdleTimeoutInMinutes = utils.Int64(int64(mins.(int))) } if ipCount := config["managed_outbound_ip_count"]; ipCount != nil { - if c := int32(ipCount.(int)); c > 0 { - profile.ManagedOutboundIPProfile = &containerservice.ManagedClusterManagedOutboundIPProfile{Count: &c} + if c := int64(ipCount.(int)); c > 0 { + profile.ManagedOutboundIPProfile = &managedclusters.ManagedClusterManagedOutboundIPProfile{Count: &c} } } return profile } -func idsToResourceReferences(set interface{}) *[]containerservice.ResourceReference { +func idsToResourceReferences(set interface{}) *[]managedclusters.ResourceReference { if set == nil { return nil } s := set.(*pluginsdk.Set) - results := make([]containerservice.ResourceReference, 0) + results := make([]managedclusters.ResourceReference, 0) for _, element := range s.List() { id := element.(string) - results = append(results, containerservice.ResourceReference{ID: &id}) + results = append(results, managedclusters.ResourceReference{Id: &id}) } if len(results) > 0 { @@ -2451,7 +2481,7 @@ func idsToResourceReferences(set interface{}) *[]containerservice.ResourceRefere return nil } -func resourceReferencesToIds(refs *[]containerservice.ResourceReference) []string { +func resourceReferencesToIds(refs *[]managedclusters.ResourceReference) []string { if refs == nil { return nil } @@ -2459,8 +2489,8 @@ func resourceReferencesToIds(refs *[]containerservice.ResourceReference) []strin ids := make([]string, 0) for _, ref := range *refs { - if ref.ID != nil { - ids = append(ids, *ref.ID) + if ref.Id != nil { + ids = append(ids, *ref.Id) } } @@ -2471,14 +2501,14 @@ func resourceReferencesToIds(refs *[]containerservice.ResourceReference) []strin return nil } -func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkProfile) []interface{} { +func flattenKubernetesClusterNetworkProfile(profile *managedclusters.ContainerServiceNetworkProfile) []interface{} { if profile == nil { return []interface{}{} } dnsServiceIP := "" - if profile.DNSServiceIP != nil { - dnsServiceIP = *profile.DNSServiceIP + if profile.DnsServiceIP != nil { + dnsServiceIP = *profile.DnsServiceIP } dockerBridgeCidr := "" @@ -2496,6 +2526,26 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro podCidr = *profile.PodCidr } + networkPlugin := "" + if profile.NetworkPlugin != nil { + networkPlugin = string(*profile.NetworkPlugin) + } + + networkMode := "" + if profile.NetworkMode != nil { + networkMode = string(*profile.NetworkMode) + } + + networkPolicy := "" + if profile.NetworkPolicy != nil { + networkPolicy = string(*profile.NetworkPolicy) + } + + outboundType := "" + if profile.OutboundType != nil { + outboundType = string(*profile.OutboundType) + } + lbProfiles := make([]interface{}, 0) if lbp := profile.LoadBalancerProfile; lbp != nil { lb := make(map[string]interface{}) @@ -2561,9 +2611,10 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro // TODO - Remove the workaround below once issue https://github.com/Azure/azure-rest-api-specs/issues/18056 is resolved sku := profile.LoadBalancerSku - for _, v := range containerservice.PossibleLoadBalancerSkuValues() { - if strings.EqualFold(string(v), string(sku)) { - sku = v + for _, v := range managedclusters.PossibleValuesForLoadBalancerSku() { + if strings.EqualFold(v, string(*sku)) { + lsSku := managedclusters.LoadBalancerSku(v) + sku = &lsSku } } @@ -2571,28 +2622,28 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro map[string]interface{}{ "dns_service_ip": dnsServiceIP, "docker_bridge_cidr": dockerBridgeCidr, - "load_balancer_sku": string(sku), + "load_balancer_sku": string(*sku), "load_balancer_profile": lbProfiles, "nat_gateway_profile": ngwProfiles, "ip_versions": ipVersions, - "network_plugin": string(profile.NetworkPlugin), - "network_mode": string(profile.NetworkMode), - "network_policy": string(profile.NetworkPolicy), + "network_plugin": networkPlugin, + "network_mode": networkMode, + "network_policy": networkPolicy, "pod_cidr": podCidr, "pod_cidrs": utils.FlattenStringSlice(profile.PodCidrs), "service_cidr": serviceCidr, "service_cidrs": utils.FlattenStringSlice(profile.ServiceCidrs), - "outbound_type": string(profile.OutboundType), + "outbound_type": outboundType, }, } } -func expandKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(input []interface{}, providerTenantId string) (*containerservice.ManagedClusterAADProfile, error) { +func expandKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(input []interface{}, providerTenantId string) (*managedclusters.ManagedClusterAADProfile, error) { if len(input) == 0 { return nil, nil } - var aad *containerservice.ManagedClusterAADProfile + var aad *managedclusters.ManagedClusterAADProfile azureAdRaw := input[0].(map[string]interface{}) @@ -2610,7 +2661,7 @@ func expandKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(input []i } if managed { - aad = &containerservice.ManagedClusterAADProfile{ + aad = &managedclusters.ManagedClusterAADProfile{ TenantID: utils.String(tenantId), Managed: utils.Bool(managed), AdminGroupObjectIDs: adminGroupObjectIds, @@ -2621,7 +2672,7 @@ func expandKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(input []i return nil, fmt.Errorf("can't specify client_app_id or server_app_id or server_app_secret when using managed aad rbac (managed = true)") } } else { - aad = &containerservice.ManagedClusterAADProfile{ + aad = &managedclusters.ManagedClusterAADProfile{ ClientAppID: utils.String(clientAppId), ServerAppID: utils.String(serverAppId), ServerAppSecret: utils.String(serverAppSecret), @@ -2645,19 +2696,19 @@ func expandKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(input []i return aad, nil } -func expandKubernetesClusterManagedClusterIdentity(input []interface{}) (*containerservice.ManagedClusterIdentity, error) { +func expandKubernetesClusterManagedClusterIdentity(input []interface{}) (*identity.SystemOrUserAssignedMap, error) { expanded, err := identity.ExpandSystemOrUserAssignedMap(input) if err != nil { return nil, err } - out := containerservice.ManagedClusterIdentity{ - Type: containerservice.ResourceIdentityType(string(expanded.Type)), + out := identity.SystemOrUserAssignedMap{ + Type: identity.Type(string(expanded.Type)), } if expanded.Type == identity.TypeUserAssigned { - out.UserAssignedIdentities = make(map[string]*containerservice.ManagedClusterIdentityUserAssignedIdentitiesValue) + out.IdentityIds = make(map[string]identity.UserAssignedIdentityDetails) for k := range expanded.IdentityIds { - out.UserAssignedIdentities[k] = &containerservice.ManagedClusterIdentityUserAssignedIdentitiesValue{ + out.IdentityIds[k] = identity.UserAssignedIdentityDetails{ // intentionally empty } } @@ -2665,7 +2716,7 @@ func expandKubernetesClusterManagedClusterIdentity(input []interface{}) (*contai return &out, nil } -func flattenKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(input *containerservice.ManagedClusterProperties, d *pluginsdk.ResourceData) []interface{} { +func flattenKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(input *managedclusters.ManagedClusterProperties, d *pluginsdk.ResourceData) []interface{} { results := make([]interface{}, 0) if profile := input.AadProfile; profile != nil { adminGroupObjectIds := utils.FlattenStringSlice(profile.AdminGroupObjectIDs) @@ -2722,15 +2773,12 @@ func flattenKubernetesClusterAzureActiveDirectoryRoleBasedAccessControl(input *c return results } -func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *containerservice.ManagedClusterServicePrincipalProfile, d *pluginsdk.ResourceData) []interface{} { +func flattenAzureRmKubernetesClusterServicePrincipalProfile(profile *managedclusters.ManagedClusterServicePrincipalProfile, d *pluginsdk.ResourceData) []interface{} { if profile == nil { return []interface{}{} } - clientId := "" - if v := profile.ClientID; v != nil { - clientId = *v - } + clientId := profile.ClientId if strings.EqualFold(clientId, "msi") { return []interface{}{} @@ -2797,34 +2845,7 @@ func flattenKubernetesClusterKubeConfigAAD(config kubernetes.KubeConfigAAD) []in } } -func flattenClusterIdentity(input *containerservice.ManagedClusterIdentity) (*[]interface{}, error) { - var transform *identity.SystemOrUserAssignedMap - - if input != nil { - transform = &identity.SystemOrUserAssignedMap{ - Type: identity.Type(string(input.Type)), - PrincipalId: "", - TenantId: "", - IdentityIds: make(map[string]identity.UserAssignedIdentityDetails), - } - if input.PrincipalID != nil { - transform.PrincipalId = *input.PrincipalID - } - if input.TenantID != nil { - transform.TenantId = *input.TenantID - } - for k, v := range input.UserAssignedIdentities { - transform.IdentityIds[k] = identity.UserAssignedIdentityDetails{ - ClientId: v.ClientID, - PrincipalId: v.PrincipalID, - } - } - } - - return identity.FlattenSystemOrUserAssignedMap(transform) -} - -func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.ManagedClusterPropertiesAutoScalerProfile) ([]interface{}, error) { +func flattenKubernetesClusterAutoScalerProfile(profile *managedclusters.ManagedClusterPropertiesAutoScalerProfile) ([]interface{}, error) { if profile == nil { return []interface{}{}, nil } @@ -2836,6 +2857,11 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed balanceSimilarNodeGroups = strings.EqualFold(*profile.BalanceSimilarNodeGroups, "true") } + expander := "" + if profile.Expander != nil { + expander = string(*profile.Expander) + } + maxGracefulTerminationSec := "" if profile.MaxGracefulTerminationSec != nil { maxGracefulTerminationSec = *profile.MaxGracefulTerminationSec @@ -2922,7 +2948,7 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed return []interface{}{ map[string]interface{}{ "balance_similar_node_groups": balanceSimilarNodeGroups, - "expander": string(profile.Expander), + "expander": expander, "max_graceful_termination_sec": maxGracefulTerminationSec, "max_node_provisioning_time": maxNodeProvisionTime, "max_unready_nodes": maxUnreadyNodes, @@ -2942,7 +2968,7 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed }, nil } -func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerservice.ManagedClusterPropertiesAutoScalerProfile { +func expandKubernetesClusterAutoScalerProfile(input []interface{}) *managedclusters.ManagedClusterPropertiesAutoScalerProfile { if len(input) == 0 { return nil } @@ -2967,9 +2993,9 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser skipNodesWithLocalStorage := config["skip_nodes_with_local_storage"].(bool) skipNodesWithSystemPods := config["skip_nodes_with_system_pods"].(bool) - return &containerservice.ManagedClusterPropertiesAutoScalerProfile{ + return &managedclusters.ManagedClusterPropertiesAutoScalerProfile{ BalanceSimilarNodeGroups: utils.String(strconv.FormatBool(balanceSimilarNodeGroups)), - Expander: containerservice.Expander(expander), + Expander: utils.ToPtr(managedclusters.Expander(expander)), MaxGracefulTerminationSec: utils.String(maxGracefulTerminationSec), MaxNodeProvisionTime: utils.String(maxNodeProvisionTime), MaxTotalUnreadyPercentage: utils.String(maxUnreadyPercentage), @@ -2988,44 +3014,44 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser } } -func expandKubernetesClusterMaintenanceConfiguration(input []interface{}) *containerservice.MaintenanceConfigurationProperties { +func expandKubernetesClusterMaintenanceConfiguration(input []interface{}) *maintenanceconfigurations.MaintenanceConfigurationProperties { if len(input) == 0 { return nil } value := input[0].(map[string]interface{}) - return &containerservice.MaintenanceConfigurationProperties{ + return &maintenanceconfigurations.MaintenanceConfigurationProperties{ NotAllowedTime: expandKubernetesClusterMaintenanceConfigurationTimeSpans(value["not_allowed"].(*pluginsdk.Set).List()), TimeInWeek: expandKubernetesClusterMaintenanceConfigurationTimeInWeeks(value["allowed"].(*pluginsdk.Set).List()), } } -func expandKubernetesClusterMaintenanceConfigurationTimeSpans(input []interface{}) *[]containerservice.TimeSpan { - results := make([]containerservice.TimeSpan, 0) +func expandKubernetesClusterMaintenanceConfigurationTimeSpans(input []interface{}) *[]maintenanceconfigurations.TimeSpan { + results := make([]maintenanceconfigurations.TimeSpan, 0) for _, item := range input { v := item.(map[string]interface{}) start, _ := time.Parse(time.RFC3339, v["start"].(string)) end, _ := time.Parse(time.RFC3339, v["end"].(string)) - results = append(results, containerservice.TimeSpan{ - Start: &date.Time{Time: start}, - End: &date.Time{Time: end}, + results = append(results, maintenanceconfigurations.TimeSpan{ + Start: utils.ToPtr(start.Format("2006-01-02T15:04:05Z07:00")), + End: utils.ToPtr(end.Format("2006-01-02T15:04:05Z07:00")), }) } return &results } -func expandKubernetesClusterMaintenanceConfigurationTimeInWeeks(input []interface{}) *[]containerservice.TimeInWeek { - results := make([]containerservice.TimeInWeek, 0) +func expandKubernetesClusterMaintenanceConfigurationTimeInWeeks(input []interface{}) *[]maintenanceconfigurations.TimeInWeek { + results := make([]maintenanceconfigurations.TimeInWeek, 0) for _, item := range input { v := item.(map[string]interface{}) - results = append(results, containerservice.TimeInWeek{ - Day: containerservice.WeekDay(v["day"].(string)), - HourSlots: utils.ExpandInt32Slice(v["hours"].(*pluginsdk.Set).List()), + results = append(results, maintenanceconfigurations.TimeInWeek{ + Day: utils.ToPtr(maintenanceconfigurations.WeekDay(v["day"].(string))), + HourSlots: utils.ExpandInt64Slice(v["hours"].(*pluginsdk.Set).List()), }) } return &results } -func flattenKubernetesClusterMaintenanceConfiguration(input *containerservice.MaintenanceConfigurationProperties) interface{} { +func flattenKubernetesClusterMaintenanceConfiguration(input *maintenanceconfigurations.MaintenanceConfigurationProperties) interface{} { results := make([]interface{}, 0) if input == nil { return results @@ -3037,7 +3063,7 @@ func flattenKubernetesClusterMaintenanceConfiguration(input *containerservice.Ma return results } -func flattenKubernetesClusterMaintenanceConfigurationTimeSpans(input *[]containerservice.TimeSpan) []interface{} { +func flattenKubernetesClusterMaintenanceConfigurationTimeSpans(input *[]maintenanceconfigurations.TimeSpan) []interface{} { results := make([]interface{}, 0) if input == nil { return results @@ -3046,11 +3072,12 @@ func flattenKubernetesClusterMaintenanceConfigurationTimeSpans(input *[]containe for _, item := range *input { var end string if item.End != nil { - end = item.End.Format(time.RFC3339) + end = *item.End } var start string if item.Start != nil { - start = item.Start.Format(time.RFC3339) + start = *item.Start + } results = append(results, map[string]interface{}{ "end": end, @@ -3060,23 +3087,27 @@ func flattenKubernetesClusterMaintenanceConfigurationTimeSpans(input *[]containe return results } -func flattenKubernetesClusterMaintenanceConfigurationTimeInWeeks(input *[]containerservice.TimeInWeek) []interface{} { +func flattenKubernetesClusterMaintenanceConfigurationTimeInWeeks(input *[]maintenanceconfigurations.TimeInWeek) []interface{} { results := make([]interface{}, 0) if input == nil { return results } for _, item := range *input { + day := "" + if item.Day != nil { + day = string(*item.Day) + } results = append(results, map[string]interface{}{ - "day": string(item.Day), - "hours": utils.FlattenInt32Slice(item.HourSlots), + "day": day, + "hours": utils.FlattenInt64Slice(item.HourSlots), }) } return results } -func expandKubernetesClusterHttpProxyConfig(input []interface{}) *containerservice.ManagedClusterHTTPProxyConfig { - httpProxyConfig := containerservice.ManagedClusterHTTPProxyConfig{} +func expandKubernetesClusterHttpProxyConfig(input []interface{}) *managedclusters.ManagedClusterHTTPProxyConfig { + httpProxyConfig := managedclusters.ManagedClusterHTTPProxyConfig{} if len(input) == 0 || input[0] == nil { return nil } @@ -3095,14 +3126,14 @@ func expandKubernetesClusterHttpProxyConfig(input []interface{}) *containerservi return &httpProxyConfig } -func expandKubernetesClusterOidcIssuerProfile(input bool) *containerservice.ManagedClusterOIDCIssuerProfile { - oidcIssuerProfile := containerservice.ManagedClusterOIDCIssuerProfile{} +func expandKubernetesClusterOidcIssuerProfile(input bool) *managedclusters.ManagedClusterOIDCIssuerProfile { + oidcIssuerProfile := managedclusters.ManagedClusterOIDCIssuerProfile{} oidcIssuerProfile.Enabled = &input return &oidcIssuerProfile } -func flattenKubernetesClusterHttpProxyConfig(props *containerservice.ManagedClusterProperties) []interface{} { +func flattenKubernetesClusterHttpProxyConfig(props *managedclusters.ManagedClusterProperties) []interface{} { if props == nil || props.HTTPProxyConfig == nil { return []interface{}{} } @@ -3138,11 +3169,13 @@ func flattenKubernetesClusterHttpProxyConfig(props *containerservice.ManagedClus }) } -func expandKubernetesClusterMicrosoftDefender(d *pluginsdk.ResourceData, input []interface{}) *containerservice.ManagedClusterSecurityProfile { +func expandKubernetesClusterMicrosoftDefender(d *pluginsdk.ResourceData, input []interface{}) *managedclusters.ManagedClusterSecurityProfile { if (len(input) == 0 || input[0] == nil) && d.HasChange("microsoft_defender") { - return &containerservice.ManagedClusterSecurityProfile{ - AzureDefender: &containerservice.ManagedClusterSecurityProfileAzureDefender{ - Enabled: utils.Bool(false), + return &managedclusters.ManagedClusterSecurityProfile{ + Defender: &managedclusters.ManagedClusterSecurityProfileDefender{ + SecurityMonitoring: &managedclusters.ManagedClusterSecurityProfileDefenderSecurityMonitoring{ + Enabled: utils.Bool(false), + }, }, } } else if len(input) == 0 || input[0] == nil { @@ -3150,21 +3183,23 @@ func expandKubernetesClusterMicrosoftDefender(d *pluginsdk.ResourceData, input [ } config := input[0].(map[string]interface{}) - return &containerservice.ManagedClusterSecurityProfile{ - AzureDefender: &containerservice.ManagedClusterSecurityProfileAzureDefender{ - Enabled: utils.Bool(true), - LogAnalyticsWorkspaceResourceID: utils.String(config["log_analytics_workspace_id"].(string)), + return &managedclusters.ManagedClusterSecurityProfile{ + Defender: &managedclusters.ManagedClusterSecurityProfileDefender{ + SecurityMonitoring: &managedclusters.ManagedClusterSecurityProfileDefenderSecurityMonitoring{ + Enabled: utils.Bool(true), + }, + LogAnalyticsWorkspaceResourceId: utils.String(config["log_analytics_workspace_id"].(string)), }, } } -func flattenKubernetesClusterMicrosoftDefender(input *containerservice.ManagedClusterSecurityProfile) []interface{} { - if input == nil || input.AzureDefender == nil || (input.AzureDefender.Enabled != nil && !*input.AzureDefender.Enabled) { +func flattenKubernetesClusterMicrosoftDefender(input *managedclusters.ManagedClusterSecurityProfile) []interface{} { + if input == nil || input.Defender == nil || (input.Defender.SecurityMonitoring != nil && input.Defender.SecurityMonitoring.Enabled != nil && !*input.Defender.SecurityMonitoring.Enabled) { return []interface{}{} } logAnalyticsWorkspace := "" - if v := input.AzureDefender.LogAnalyticsWorkspaceResourceID; v != nil { + if v := input.Defender.LogAnalyticsWorkspaceResourceId; v != nil { logAnalyticsWorkspace = *v } @@ -3175,22 +3210,34 @@ func flattenKubernetesClusterMicrosoftDefender(input *containerservice.ManagedCl } } -func expandEdgeZone(input string) *containerservice.ExtendedLocation { +func expandEdgeZone(input string) *edgezones.Model { normalized := edgezones.Normalize(input) if normalized == "" { return nil } - return &containerservice.ExtendedLocation{ - Name: utils.String(normalized), - Type: containerservice.ExtendedLocationTypesEdgeZone, + return &edgezones.Model{ + Name: normalized, } } -func flattenEdgeZone(input *containerservice.ExtendedLocation) string { +func flattenEdgeZone(input *edgezones.Model) string { // As the `extendedLocation.type` returned by API is always lower case, so it has to use `Normalize` function while comparing them - if input == nil || edgezones.Normalize(string(input.Type)) != edgezones.Normalize(string(containerservice.ExtendedLocationTypesEdgeZone)) || input.Name == nil { + if input == nil || input.Name == "" { return "" } - return edgezones.NormalizeNilable(input.Name) + return edgezones.NormalizeNilable(&input.Name) +} + +func base64Decode(str string) (string, bool) { + data, err := base64.StdEncoding.DecodeString(str) + if err != nil { + return "", true + } + return string(data), false +} + +func base64IsEncoded(data string) bool { + _, err := base64.StdEncoding.DecodeString(data) + return err == nil } diff --git a/internal/services/containers/kubernetes_cluster_resource_test.go b/internal/services/containers/kubernetes_cluster_resource_test.go index 89cd00179457..110022a8fd1b 100644 --- a/internal/services/containers/kubernetes_cluster_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_resource_test.go @@ -6,10 +6,11 @@ import ( "net/http" "testing" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -95,17 +96,17 @@ func TestAccKubernetesCluster_edgeZone(t *testing.T) { } func (t KubernetesClusterResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.ClusterID(state.ID) + id, err := managedclusters.ParseManagedClusterID(state.ID) if err != nil { return nil, err } - resp, err := clients.Containers.KubernetesClustersClient.Get(ctx, id.ResourceGroup, id.ManagedClusterName) + resp, err := clients.Containers.KubernetesClustersClient.Get(ctx, *id) if err != nil { return nil, fmt.Errorf("reading Kubernetes Cluster (%s): %+v", id.String(), err) } - return utils.Bool(resp.ID != nil), nil + return utils.Bool(resp.Model != nil && resp.Model.Id != nil), nil } func (KubernetesClusterResource) updateDefaultNodePoolAgentCount(nodeCount int) acceptance.ClientCheckFunc { @@ -114,27 +115,28 @@ func (KubernetesClusterResource) updateDefaultNodePoolAgentCount(nodeCount int) clusterName := state.Attributes["name"] resourceGroup := state.Attributes["resource_group_name"] - nodePool, err := clients.Containers.AgentPoolsClient.Get(ctx, resourceGroup, clusterName, nodePoolName) + agentPoolId := agentpools.NewAgentPoolID(clients.Account.SubscriptionId, resourceGroup, clusterName, nodePoolName) + nodePool, err := clients.Containers.AgentPoolsClient.Get(ctx, agentPoolId) if err != nil { return fmt.Errorf("Bad: Get on agentPoolsClient: %+v", err) } - if nodePool.StatusCode == http.StatusNotFound { + if nodePool.HttpResponse.StatusCode == http.StatusNotFound { return fmt.Errorf("Bad: Node Pool %q (Kubernetes Cluster %q / Resource Group: %q) does not exist", nodePoolName, clusterName, resourceGroup) } - if nodePool.ManagedClusterAgentPoolProfileProperties == nil { + if nodePool.Model == nil || nodePool.Model.Properties == nil { return fmt.Errorf("Bad: Node Pool %q (Kubernetes Cluster %q / Resource Group: %q): `properties` was nil", nodePoolName, clusterName, resourceGroup) } - nodePool.ManagedClusterAgentPoolProfileProperties.Count = utils.Int32(int32(nodeCount)) + nodePool.Model.Properties.Count = utils.Int64(int64(nodeCount)) - future, err := clients.Containers.AgentPoolsClient.CreateOrUpdate(ctx, resourceGroup, clusterName, nodePoolName, nodePool) + future, err := clients.Containers.AgentPoolsClient.CreateOrUpdate(ctx, agentPoolId, *nodePool.Model) if err != nil { return fmt.Errorf("Bad: updating node pool %q: %+v", nodePoolName, err) } - if err := future.WaitForCompletionRef(ctx, clients.Containers.AgentPoolsClient.Client); err != nil { + if err := future.Poller.PollUntilDone(); err != nil { return fmt.Errorf("Bad: waiting for update of node pool %q: %+v", nodePoolName, err) } diff --git a/internal/services/containers/kubernetes_cluster_validate.go b/internal/services/containers/kubernetes_cluster_validate.go index 6ab60e63ae7b..bdd28f327d1d 100644 --- a/internal/services/containers/kubernetes_cluster_validate.go +++ b/internal/services/containers/kubernetes_cluster_validate.go @@ -6,14 +6,16 @@ import ( "net/http" "strings" - "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/client" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" - "github.com/hashicorp/terraform-provider-azurerm/utils" ) -func validateKubernetesCluster(d *pluginsdk.ResourceData, cluster *containerservice.ManagedCluster, resourceGroup, name string) error { +func validateKubernetesCluster(d *pluginsdk.ResourceData, cluster *managedclusters.ManagedCluster, resourceGroup, name string) error { if v, exists := d.GetOk("network_profile"); exists { rawProfiles := v.([]interface{}) @@ -69,11 +71,11 @@ func validateKubernetesCluster(d *pluginsdk.ResourceData, cluster *containerserv // defined locally, if so, we need to error out if cluster != nil { servicePrincipalExists := false - if props := cluster.ManagedClusterProperties; props != nil { + if props := cluster.Properties; props != nil { if sp := props.ServicePrincipalProfile; sp != nil { - if cid := sp.ClientID; cid != nil { + if cid := sp.ClientId; cid != "" { // if it's MSI we ignore the block - servicePrincipalExists = !strings.EqualFold(*cid, "msi") + servicePrincipalExists = !strings.EqualFold(cid, "msi") } } } @@ -102,10 +104,10 @@ func validateKubernetesCluster(d *pluginsdk.ResourceData, cluster *containerserv } else { // for an existing cluster servicePrincipalIsMsi := false - if props := cluster.ManagedClusterProperties; props != nil { + if props := cluster.Properties; props != nil { if sp := props.ServicePrincipalProfile; sp != nil { - if cid := sp.ClientID; cid != nil { - servicePrincipalIsMsi = strings.EqualFold(*cid, "msi") + if cid := sp.ClientId; cid != "" { + servicePrincipalIsMsi = strings.EqualFold(cid, "msi") } } } @@ -116,8 +118,8 @@ func validateKubernetesCluster(d *pluginsdk.ResourceData, cluster *containerserv } hasIdentity := false - if identity := cluster.Identity; identity != nil { - hasIdentity = identity.Type != containerservice.ResourceIdentityTypeNone + if clusterIdentity := cluster.Identity; clusterIdentity != nil { + hasIdentity = clusterIdentity.Type != identity.TypeNone } if hasIdentity { @@ -262,9 +264,10 @@ details can be found at https://aka.ms/version-skew-policy. `, desiredNodePoolVersion, nodePoolName, clusterName, resourceGroup, clusterVersionDetails, versionsList) } -func validateNodePoolSupportsVersion(ctx context.Context, client *client.Client, currentNodePoolVersion string, defaultNodePoolId parse.NodePoolId, desiredNodePoolVersion string) error { +func validateNodePoolSupportsVersion(ctx context.Context, client *client.Client, currentNodePoolVersion string, defaultNodePoolId agentpools.AgentPoolId, desiredNodePoolVersion string) error { // confirm the version being used is >= the version of the control plane - versions, err := client.AgentPoolsClient.GetAvailableAgentPoolVersions(ctx, defaultNodePoolId.ResourceGroup, defaultNodePoolId.ManagedClusterName) + clusterId := agentpools.NewManagedClusterID(defaultNodePoolId.SubscriptionId, defaultNodePoolId.ResourceGroupName, defaultNodePoolId.ResourceName) + resp, err := client.AgentPoolsClient.GetAvailableAgentPoolVersions(ctx, clusterId) if err != nil { return fmt.Errorf("retrieving Available Agent Pool Versions for %s: %+v", defaultNodePoolId, err) } @@ -277,8 +280,8 @@ func validateNodePoolSupportsVersion(ctx context.Context, client *client.Client, } // when creating a new cluster or upgrading the desired version should be supported - if !versionExists && versions.AgentPoolAvailableVersionsProperties != nil && versions.AgentPoolAvailableVersionsProperties.AgentPoolVersions != nil { - for _, version := range *versions.AgentPoolAvailableVersionsProperties.AgentPoolVersions { + if versions := resp.Model; !versionExists && versions != nil && versions.Properties.AgentPoolVersions != nil { + for _, version := range *versions.Properties.AgentPoolVersions { if version.KubernetesVersion == nil { continue } @@ -293,21 +296,21 @@ func validateNodePoolSupportsVersion(ctx context.Context, client *client.Client, } if !versionExists { - clusterId := parse.NewClusterID(defaultNodePoolId.SubscriptionId, defaultNodePoolId.ResourceGroup, defaultNodePoolId.ManagedClusterName) - cluster, err := client.KubernetesClustersClient.Get(ctx, clusterId.ResourceGroup, clusterId.ManagedClusterName) + clusterId := managedclusters.NewManagedClusterID(defaultNodePoolId.SubscriptionId, defaultNodePoolId.ResourceGroupName, defaultNodePoolId.ResourceName) + cluster, err := client.KubernetesClustersClient.Get(ctx, clusterId) if err != nil { - if !utils.ResponseWasStatusCode(cluster.Response, http.StatusUnauthorized) { + if !response.WasStatusCode(cluster.HttpResponse, http.StatusUnauthorized) { return fmt.Errorf("retrieving %s: %+v", clusterId, err) } } // nilable since a user may not necessarily have access, and this is trying to be helpful var clusterVersion *string - if props := cluster.ManagedClusterProperties; props != nil { - clusterVersion = props.CurrentKubernetesVersion + if clusterModel := cluster.Model; clusterModel != nil && clusterModel.Properties != nil { + clusterVersion = clusterModel.Properties.CurrentKubernetesVersion } - return clusterControlPlaneMustBeUpgradedError(defaultNodePoolId.ResourceGroup, defaultNodePoolId.ManagedClusterName, defaultNodePoolId.AgentPoolName, clusterVersion, desiredNodePoolVersion, supportedVersions) + return clusterControlPlaneMustBeUpgradedError(defaultNodePoolId.ResourceGroupName, defaultNodePoolId.ResourceName, defaultNodePoolId.AgentPoolName, clusterVersion, desiredNodePoolVersion, supportedVersions) } return nil diff --git a/internal/services/containers/kubernetes_nodepool.go b/internal/services/containers/kubernetes_nodepool.go index 3709fe02de3b..d38da7f27163 100644 --- a/internal/services/containers/kubernetes_nodepool.go +++ b/internal/services/containers/kubernetes_nodepool.go @@ -7,16 +7,17 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" + "github.com/hashicorp/go-azure-helpers/resourcemanager/tags" "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" "github.com/hashicorp/go-azure-sdk/resource-manager/compute/2021-11-01/proximityplacementgroups" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/services/containers/validate" networkValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/network/validate" - "github.com/hashicorp/terraform-provider-azurerm/internal/tags" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/utils" @@ -42,10 +43,10 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: string(containerservice.AgentPoolTypeVirtualMachineScaleSets), + Default: string(managedclusters.AgentPoolTypeVirtualMachineScaleSets), ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.AgentPoolTypeAvailabilitySet), - string(containerservice.AgentPoolTypeVirtualMachineScaleSets), + string(managedclusters.AgentPoolTypeAvailabilitySet), + string(managedclusters.AgentPoolTypeVirtualMachineScaleSets), }, false), }, @@ -98,8 +99,8 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.KubeletDiskTypeOS), - string(containerservice.KubeletDiskTypeTemporary), + string(managedclusters.KubeletDiskTypeOS), + string(managedclusters.KubeletDiskTypeTemporary), }, false), }, @@ -164,7 +165,7 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { }, }, - "tags": tags.Schema(), + "tags": commonschema.Tags(), "os_disk_size_gb": { Type: pluginsdk.TypeInt, @@ -178,10 +179,10 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: containerservice.OSDiskTypeManaged, + Default: agentpools.OSDiskTypeManaged, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.OSDiskTypeEphemeral), - string(containerservice.OSDiskTypeManaged), + string(managedclusters.OSDiskTypeEphemeral), + string(managedclusters.OSDiskTypeManaged), }, false), }, @@ -191,8 +192,8 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { ForceNew: true, Computed: true, // defaults to Ubuntu if using Linux ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.OSSKUUbuntu), - string(containerservice.OSSKUCBLMariner), + string(managedclusters.OSSKUUbuntu), + string(managedclusters.OSSKUCBLMariner), }, false), }, @@ -237,10 +238,10 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - Default: string(containerservice.ScaleDownModeDelete), + Default: string(managedclusters.ScaleDownModeDelete), ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.ScaleDownModeDeallocate), - string(containerservice.ScaleDownModeDelete), + string(managedclusters.ScaleDownModeDeallocate), + string(managedclusters.ScaleDownModeDelete), }, false), }, @@ -258,7 +259,7 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ - string(containerservice.WorkloadRuntimeOCIContainer), + string(managedclusters.WorkloadRuntimeOCIContainer), }, false), }, } @@ -617,54 +618,104 @@ func schemaNodePoolSysctlConfig() *pluginsdk.Schema { } } -func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterAgentPoolProfile) containerservice.AgentPool { +func ConvertDefaultNodePoolToAgentPool(input *[]managedclusters.ManagedClusterAgentPoolProfile) agentpools.AgentPool { defaultCluster := (*input)[0] - return containerservice.AgentPool{ - Name: defaultCluster.Name, - ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + + agentpool := agentpools.AgentPool{ + Name: &defaultCluster.Name, + Properties: &agentpools.ManagedClusterAgentPoolProfileProperties{ Count: defaultCluster.Count, - VMSize: defaultCluster.VMSize, + VmSize: defaultCluster.VmSize, OsDiskSizeGB: defaultCluster.OsDiskSizeGB, - OsDiskType: defaultCluster.OsDiskType, VnetSubnetID: defaultCluster.VnetSubnetID, - KubeletConfig: defaultCluster.KubeletConfig, - LinuxOSConfig: defaultCluster.LinuxOSConfig, MaxPods: defaultCluster.MaxPods, - OsType: defaultCluster.OsType, MaxCount: defaultCluster.MaxCount, MessageOfTheDay: defaultCluster.MessageOfTheDay, MinCount: defaultCluster.MinCount, EnableAutoScaling: defaultCluster.EnableAutoScaling, EnableFIPS: defaultCluster.EnableFIPS, - KubeletDiskType: defaultCluster.KubeletDiskType, - Type: defaultCluster.Type, OrchestratorVersion: defaultCluster.OrchestratorVersion, ProximityPlacementGroupID: defaultCluster.ProximityPlacementGroupID, AvailabilityZones: defaultCluster.AvailabilityZones, EnableNodePublicIP: defaultCluster.EnableNodePublicIP, NodePublicIPPrefixID: defaultCluster.NodePublicIPPrefixID, - ScaleSetPriority: defaultCluster.ScaleSetPriority, - ScaleSetEvictionPolicy: defaultCluster.ScaleSetEvictionPolicy, SpotMaxPrice: defaultCluster.SpotMaxPrice, - Mode: defaultCluster.Mode, NodeLabels: defaultCluster.NodeLabels, NodeTaints: defaultCluster.NodeTaints, PodSubnetID: defaultCluster.PodSubnetID, - ScaleDownMode: defaultCluster.ScaleDownMode, Tags: defaultCluster.Tags, - UpgradeSettings: defaultCluster.UpgradeSettings, - WorkloadRuntime: defaultCluster.WorkloadRuntime, }, } + if osDisktypeNodePool := defaultCluster.OsDiskType; osDisktypeNodePool != nil { + osDisktype := agentpools.OSDiskType(string(*osDisktypeNodePool)) + agentpool.Properties.OsDiskType = &osDisktype + } + if kubeletConfigNodePool := defaultCluster.KubeletConfig; kubeletConfigNodePool != nil { + kubeletConfig := agentpools.KubeletConfig{ + AllowedUnsafeSysctls: kubeletConfigNodePool.AllowedUnsafeSysctls, + ContainerLogMaxFiles: kubeletConfigNodePool.ContainerLogMaxFiles, + ContainerLogMaxSizeMB: kubeletConfigNodePool.ContainerLogMaxSizeMB, + CpuCfsQuota: kubeletConfigNodePool.CpuCfsQuota, + CpuCfsQuotaPeriod: kubeletConfigNodePool.CpuCfsQuotaPeriod, + CpuManagerPolicy: kubeletConfigNodePool.CpuManagerPolicy, + FailSwapOn: kubeletConfigNodePool.FailSwapOn, + ImageGcHighThreshold: kubeletConfigNodePool.ImageGcHighThreshold, + ImageGcLowThreshold: kubeletConfigNodePool.ImageGcLowThreshold, + PodMaxPids: kubeletConfigNodePool.PodMaxPids, + TopologyManagerPolicy: kubeletConfigNodePool.TopologyManagerPolicy, + } + agentpool.Properties.KubeletConfig = &kubeletConfig + } + if linuxOsConfigRaw := defaultCluster.LinuxOSConfig; linuxOsConfigRaw != nil { + linuxOsConfig := agentpools.LinuxOSConfig{ + SwapFileSizeMB: linuxOsConfigRaw.SwapFileSizeMB, + TransparentHugePageDefrag: linuxOsConfigRaw.TransparentHugePageDefrag, + TransparentHugePageEnabled: linuxOsConfigRaw.TransparentHugePageEnabled, + } + if sysctlsRaw := linuxOsConfigRaw.Sysctls; sysctlsRaw != nil { + linuxOsConfig.Sysctls = utils.ToPtr(agentpools.SysctlConfig(*sysctlsRaw)) + } + agentpool.Properties.LinuxOSConfig = &linuxOsConfig + } + if osTypeNodePool := defaultCluster.OsType; osTypeNodePool != nil { + agentpool.Properties.OsType = utils.ToPtr(agentpools.OSType(string(*osTypeNodePool))) + } + if kubeletDiskTypeNodePool := defaultCluster.KubeletDiskType; kubeletDiskTypeNodePool != nil { + agentpool.Properties.KubeletDiskType = utils.ToPtr(agentpools.KubeletDiskType(string(*kubeletDiskTypeNodePool))) + } + if agentPoolTypeNodePool := defaultCluster.Type; agentPoolTypeNodePool != nil { + agentpool.Properties.Type = utils.ToPtr(agentpools.AgentPoolType(string(*agentPoolTypeNodePool))) + } + if scaleSetPriorityNodePool := defaultCluster.ScaleSetPriority; scaleSetPriorityNodePool != nil { + agentpool.Properties.ScaleSetPriority = utils.ToPtr(agentpools.ScaleSetPriority(string(*scaleSetPriorityNodePool))) + } + if scaleSetEvictionPolicyNodePool := defaultCluster.ScaleSetEvictionPolicy; scaleSetEvictionPolicyNodePool != nil { + agentpool.Properties.ScaleSetEvictionPolicy = utils.ToPtr(agentpools.ScaleSetEvictionPolicy(string(*scaleSetEvictionPolicyNodePool))) + } + if modeNodePool := defaultCluster.Mode; modeNodePool != nil { + agentpool.Properties.Mode = utils.ToPtr(agentpools.AgentPoolMode(string(*modeNodePool))) + } + if scaleDownModeNodePool := defaultCluster.ScaleDownMode; scaleDownModeNodePool != nil { + agentpool.Properties.ScaleDownMode = utils.ToPtr(agentpools.ScaleDownMode(string(*scaleDownModeNodePool))) + } + agentpool.Properties.UpgradeSettings = &agentpools.AgentPoolUpgradeSettings{} + if upgradeSettingsNodePool := defaultCluster.UpgradeSettings; upgradeSettingsNodePool != nil && upgradeSettingsNodePool.MaxSurge != nil && *upgradeSettingsNodePool.MaxSurge != "" { + agentpool.Properties.UpgradeSettings.MaxSurge = upgradeSettingsNodePool.MaxSurge + } + if workloadRuntimeNodePool := defaultCluster.WorkloadRuntime; workloadRuntimeNodePool != nil { + agentpool.Properties.WorkloadRuntime = utils.ToPtr(agentpools.WorkloadRuntime(string(*workloadRuntimeNodePool))) + } + + return agentpool } -func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.ManagedClusterAgentPoolProfile, error) { +func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]managedclusters.ManagedClusterAgentPoolProfile, error) { input := d.Get("default_node_pool").([]interface{}) raw := input[0].(map[string]interface{}) enableAutoScaling := raw["enable_auto_scaling"].(bool) nodeLabelsRaw := raw["node_labels"].(map[string]interface{}) - nodeLabels := utils.ExpandMapStringPtrString(nodeLabelsRaw) + nodeLabels := expandNodeLabels(nodeLabelsRaw) nodeTaintsRaw := raw["node_taints"].([]interface{}) nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw) @@ -679,30 +730,30 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag t := raw["tags"].(map[string]interface{}) - profile := containerservice.ManagedClusterAgentPoolProfile{ + profile := managedclusters.ManagedClusterAgentPoolProfile{ EnableAutoScaling: utils.Bool(enableAutoScaling), EnableFIPS: utils.Bool(raw["fips_enabled"].(bool)), EnableNodePublicIP: utils.Bool(raw["enable_node_public_ip"].(bool)), EnableEncryptionAtHost: utils.Bool(raw["enable_host_encryption"].(bool)), - KubeletDiskType: containerservice.KubeletDiskType(raw["kubelet_disk_type"].(string)), - Name: utils.String(raw["name"].(string)), + KubeletDiskType: utils.ToPtr(managedclusters.KubeletDiskType(raw["kubelet_disk_type"].(string))), + Name: raw["name"].(string), NodeLabels: nodeLabels, NodeTaints: nodeTaints, Tags: tags.Expand(t), - Type: containerservice.AgentPoolType(raw["type"].(string)), - VMSize: utils.String(raw["vm_size"].(string)), + Type: utils.ToPtr(managedclusters.AgentPoolType(raw["type"].(string))), + VmSize: utils.String(raw["vm_size"].(string)), // at this time the default node pool has to be Linux or the AKS cluster fails to provision with: // Pods not in Running status: coredns-7fc597cc45-v5z7x,coredns-autoscaler-7ccc76bfbd-djl7j,metrics-server-cbd95f966-5rl97,tunnelfront-7d9884977b-wpbvn // Windows agents can be configured via the separate node pool resource - OsType: containerservice.OSTypeLinux, + OsType: utils.ToPtr(managedclusters.OSTypeLinux), // without this set the API returns: // Code="MustDefineAtLeastOneSystemPool" Message="Must define at least one system pool." // since this is the "default" node pool we can assume this is a system node pool - Mode: containerservice.AgentPoolModeSystem, + Mode: utils.ToPtr(managedclusters.AgentPoolModeSystem), - UpgradeSettings: expandUpgradeSettings(raw["upgrade_settings"].([]interface{})), + UpgradeSettings: expandClusterNodePoolUpgradeSettings(raw["upgrade_settings"].([]interface{})), // // TODO: support these in time // ScaleSetEvictionPolicy: "", @@ -714,8 +765,8 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag profile.AvailabilityZones = &zones } - if maxPods := int32(raw["max_pods"].(int)); maxPods > 0 { - profile.MaxPods = utils.Int32(maxPods) + if maxPods := int64(raw["max_pods"].(int)); maxPods > 0 { + profile.MaxPods = utils.Int64(maxPods) } if v := raw["message_of_the_day"].(string); v != "" { @@ -727,26 +778,27 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag profile.NodePublicIPPrefixID = utils.String(prefixID) } - if osDiskSizeGB := int32(raw["os_disk_size_gb"].(int)); osDiskSizeGB > 0 { - profile.OsDiskSizeGB = utils.Int32(osDiskSizeGB) + if osDiskSizeGB := int64(raw["os_disk_size_gb"].(int)); osDiskSizeGB > 0 { + profile.OsDiskSizeGB = utils.Int64(osDiskSizeGB) } - profile.OsDiskType = containerservice.OSDiskTypeManaged + profile.OsDiskType = utils.ToPtr(managedclusters.OSDiskTypeManaged) if osDiskType := raw["os_disk_type"].(string); osDiskType != "" { - profile.OsDiskType = containerservice.OSDiskType(raw["os_disk_type"].(string)) + profile.OsDiskType = utils.ToPtr(managedclusters.OSDiskType(osDiskType)) } if osSku := raw["os_sku"].(string); osSku != "" { - profile.OsSKU = containerservice.OSSKU(osSku) + profile.OsSKU = utils.ToPtr(managedclusters.OSSKU(osSku)) } if podSubnetID := raw["pod_subnet_id"].(string); podSubnetID != "" { profile.PodSubnetID = utils.String(podSubnetID) } - profile.ScaleDownMode = containerservice.ScaleDownModeDelete + scaleDownModeDelete := managedclusters.ScaleDownModeDelete + profile.ScaleDownMode = &scaleDownModeDelete if scaleDownMode := raw["scale_down_mode"].(string); scaleDownMode != "" { - profile.ScaleDownMode = containerservice.ScaleDownMode(scaleDownMode) + profile.ScaleDownMode = utils.ToPtr(managedclusters.ScaleDownMode(scaleDownMode)) } if ultraSSDEnabled, ok := raw["ultra_ssd_enabled"]; ok { @@ -770,7 +822,7 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag } if workloadRunTime := raw["workload_runtime"].(string); workloadRunTime != "" { - profile.WorkloadRuntime = containerservice.WorkloadRuntime(workloadRunTime) + profile.WorkloadRuntime = utils.ToPtr(managedclusters.WorkloadRuntime(workloadRunTime)) } if capacityReservationGroupId := raw["capacity_reservation_group_id"].(string); capacityReservationGroupId != "" { @@ -784,13 +836,13 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag // Count must always be set (see #6094), RP behaviour has changed // since the API version upgrade in v2.1.0 making Count required // for all create/update requests - profile.Count = utils.Int32(int32(count)) + profile.Count = utils.Int64(int64(count)) if enableAutoScaling { // if Count has not been set use min count if count == 0 { count = minCount - profile.Count = utils.Int32(int32(count)) + profile.Count = utils.Int64(int64(count)) } // Count must be set for the initial creation when using AutoScaling but cannot be updated @@ -799,7 +851,7 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag } if maxCount > 0 { - profile.MaxCount = utils.Int32(int32(maxCount)) + profile.MaxCount = utils.Int64(int64(maxCount)) if maxCount < count { return nil, fmt.Errorf("`node_count`(%d) must be equal to or less than `max_count`(%d) when `enable_auto_scaling` is set to `true`", count, maxCount) } @@ -808,7 +860,7 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag } if minCount > 0 { - profile.MinCount = utils.Int32(int32(minCount)) + profile.MinCount = utils.Int64(int64(minCount)) if minCount > count && d.IsNewResource() { return nil, fmt.Errorf("`node_count`(%d) must be equal to or greater than `min_count`(%d) when `enable_auto_scaling` is set to `true`", count, minCount) @@ -825,74 +877,74 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag } if kubeletConfig := raw["kubelet_config"].([]interface{}); len(kubeletConfig) > 0 { - profile.KubeletConfig = expandAgentPoolKubeletConfig(kubeletConfig) + profile.KubeletConfig = expandClusterNodePoolKubeletConfig(kubeletConfig) } if linuxOSConfig := raw["linux_os_config"].([]interface{}); len(linuxOSConfig) > 0 { - linuxOSConfig, err := expandAgentPoolLinuxOSConfig(linuxOSConfig) + linuxOSConfig, err := expandClusterNodePoolLinuxOSConfig(linuxOSConfig) if err != nil { return nil, err } profile.LinuxOSConfig = linuxOSConfig } - return &[]containerservice.ManagedClusterAgentPoolProfile{ + return &[]managedclusters.ManagedClusterAgentPoolProfile{ profile, }, nil } -func expandAgentPoolKubeletConfig(input []interface{}) *containerservice.KubeletConfig { +func expandClusterNodePoolKubeletConfig(input []interface{}) *managedclusters.KubeletConfig { if len(input) == 0 || input[0] == nil { return nil } raw := input[0].(map[string]interface{}) - result := &containerservice.KubeletConfig{ - CPUCfsQuota: utils.Bool(raw["cpu_cfs_quota_enabled"].(bool)), + result := &managedclusters.KubeletConfig{ + CpuCfsQuota: utils.Bool(raw["cpu_cfs_quota_enabled"].(bool)), // must be false, otherwise the backend will report error: CustomKubeletConfig.FailSwapOn must be set to false to enable swap file on nodes. FailSwapOn: utils.Bool(false), AllowedUnsafeSysctls: utils.ExpandStringSlice(raw["allowed_unsafe_sysctls"].(*pluginsdk.Set).List()), } if v := raw["cpu_manager_policy"].(string); v != "" { - result.CPUManagerPolicy = utils.String(v) + result.CpuManagerPolicy = utils.String(v) } if v := raw["cpu_cfs_quota_period"].(string); v != "" { - result.CPUCfsQuotaPeriod = utils.String(v) + result.CpuCfsQuotaPeriod = utils.String(v) } if v := raw["image_gc_high_threshold"].(int); v != 0 { - result.ImageGcHighThreshold = utils.Int32(int32(v)) + result.ImageGcHighThreshold = utils.Int64(int64(v)) } if v := raw["image_gc_low_threshold"].(int); v != 0 { - result.ImageGcLowThreshold = utils.Int32(int32(v)) + result.ImageGcLowThreshold = utils.Int64(int64(v)) } if v := raw["topology_manager_policy"].(string); v != "" { result.TopologyManagerPolicy = utils.String(v) } if v := raw["container_log_max_size_mb"].(int); v != 0 { - result.ContainerLogMaxSizeMB = utils.Int32(int32(v)) + result.ContainerLogMaxSizeMB = utils.Int64(int64(v)) } if v := raw["container_log_max_line"].(int); v != 0 { - result.ContainerLogMaxFiles = utils.Int32(int32(v)) + result.ContainerLogMaxFiles = utils.Int64(int64(v)) } if v := raw["pod_max_pid"].(int); v != 0 { - result.PodMaxPids = utils.Int32(int32(v)) + result.PodMaxPids = utils.Int64(int64(v)) } return result } -func expandAgentPoolLinuxOSConfig(input []interface{}) (*containerservice.LinuxOSConfig, error) { +func expandClusterNodePoolLinuxOSConfig(input []interface{}) (*managedclusters.LinuxOSConfig, error) { if len(input) == 0 || input[0] == nil { return nil, nil } raw := input[0].(map[string]interface{}) - sysctlConfig, err := expandAgentPoolSysctlConfig(raw["sysctl_config"].([]interface{})) + sysctlConfig, err := expandClusterNodePoolSysctlConfig(raw["sysctl_config"].([]interface{})) if err != nil { return nil, err } - result := &containerservice.LinuxOSConfig{ + result := &managedclusters.LinuxOSConfig{ Sysctls: sysctlConfig, } if v := raw["transparent_huge_page_enabled"].(string); v != "" { @@ -902,57 +954,57 @@ func expandAgentPoolLinuxOSConfig(input []interface{}) (*containerservice.LinuxO result.TransparentHugePageDefrag = utils.String(v) } if v := raw["swap_file_size_mb"].(int); v != 0 { - result.SwapFileSizeMB = utils.Int32(int32(v)) + result.SwapFileSizeMB = utils.Int64(int64(v)) } return result, nil } -func expandAgentPoolSysctlConfig(input []interface{}) (*containerservice.SysctlConfig, error) { +func expandClusterNodePoolSysctlConfig(input []interface{}) (*managedclusters.SysctlConfig, error) { if len(input) == 0 || input[0] == nil { return nil, nil } raw := input[0].(map[string]interface{}) - result := &containerservice.SysctlConfig{ - NetIpv4TCPTwReuse: utils.Bool(raw["net_ipv4_tcp_tw_reuse"].(bool)), + result := &managedclusters.SysctlConfig{ + NetIPv4TcpTwReuse: utils.Bool(raw["net_ipv4_tcp_tw_reuse"].(bool)), } if v := raw["net_core_somaxconn"].(int); v != 0 { - result.NetCoreSomaxconn = utils.Int32(int32(v)) + result.NetCoreSomaxconn = utils.Int64(int64(v)) } if v := raw["net_core_netdev_max_backlog"].(int); v != 0 { - result.NetCoreNetdevMaxBacklog = utils.Int32(int32(v)) + result.NetCoreNetdevMaxBacklog = utils.Int64(int64(v)) } if v := raw["net_core_rmem_default"].(int); v != 0 { - result.NetCoreRmemDefault = utils.Int32(int32(v)) + result.NetCoreRmemDefault = utils.Int64(int64(v)) } if v := raw["net_core_rmem_max"].(int); v != 0 { - result.NetCoreRmemMax = utils.Int32(int32(v)) + result.NetCoreRmemMax = utils.Int64(int64(v)) } if v := raw["net_core_wmem_default"].(int); v != 0 { - result.NetCoreWmemDefault = utils.Int32(int32(v)) + result.NetCoreWmemDefault = utils.Int64(int64(v)) } if v := raw["net_core_wmem_max"].(int); v != 0 { - result.NetCoreWmemMax = utils.Int32(int32(v)) + result.NetCoreWmemMax = utils.Int64(int64(v)) } if v := raw["net_core_optmem_max"].(int); v != 0 { - result.NetCoreOptmemMax = utils.Int32(int32(v)) + result.NetCoreOptmemMax = utils.Int64(int64(v)) } if v := raw["net_ipv4_tcp_max_syn_backlog"].(int); v != 0 { - result.NetIpv4TCPMaxSynBacklog = utils.Int32(int32(v)) + result.NetIPv4TcpMaxSynBacklog = utils.Int64(int64(v)) } if v := raw["net_ipv4_tcp_max_tw_buckets"].(int); v != 0 { - result.NetIpv4TCPMaxTwBuckets = utils.Int32(int32(v)) + result.NetIPv4TcpMaxTwBuckets = utils.Int64(int64(v)) } if v := raw["net_ipv4_tcp_fin_timeout"].(int); v != 0 { - result.NetIpv4TCPFinTimeout = utils.Int32(int32(v)) + result.NetIPv4TcpFinTimeout = utils.Int64(int64(v)) } if v := raw["net_ipv4_tcp_keepalive_time"].(int); v != 0 { - result.NetIpv4TCPKeepaliveTime = utils.Int32(int32(v)) + result.NetIPv4TcpKeepaliveTime = utils.Int64(int64(v)) } if v := raw["net_ipv4_tcp_keepalive_probes"].(int); v != 0 { - result.NetIpv4TCPKeepaliveProbes = utils.Int32(int32(v)) + result.NetIPv4TcpKeepaliveProbes = utils.Int64(int64(v)) } if v := raw["net_ipv4_tcp_keepalive_intvl"].(int); v != 0 { - result.NetIpv4TcpkeepaliveIntvl = utils.Int32(int32(v)) + result.NetIPv4TcpkeepaliveIntvl = utils.Int64(int64(v)) } netIpv4IPLocalPortRangeMin := raw["net_ipv4_ip_local_port_range_min"].(int) netIpv4IPLocalPortRangeMax := raw["net_ipv4_ip_local_port_range_max"].(int) @@ -963,51 +1015,51 @@ func expandAgentPoolSysctlConfig(input []interface{}) (*containerservice.SysctlC return nil, fmt.Errorf("`net_ipv4_ip_local_port_range_min` should be no larger than `net_ipv4_ip_local_port_range_max`") } if netIpv4IPLocalPortRangeMin != 0 && netIpv4IPLocalPortRangeMax != 0 { - result.NetIpv4IPLocalPortRange = utils.String(fmt.Sprintf("%d %d", netIpv4IPLocalPortRangeMin, netIpv4IPLocalPortRangeMax)) + result.NetIPv4IPLocalPortRange = utils.String(fmt.Sprintf("%d %d", netIpv4IPLocalPortRangeMin, netIpv4IPLocalPortRangeMax)) } if v := raw["net_ipv4_neigh_default_gc_thresh1"].(int); v != 0 { - result.NetIpv4NeighDefaultGcThresh1 = utils.Int32(int32(v)) + result.NetIPv4NeighDefaultGcThresh1 = utils.Int64(int64(v)) } if v := raw["net_ipv4_neigh_default_gc_thresh2"].(int); v != 0 { - result.NetIpv4NeighDefaultGcThresh2 = utils.Int32(int32(v)) + result.NetIPv4NeighDefaultGcThresh2 = utils.Int64(int64(v)) } if v := raw["net_ipv4_neigh_default_gc_thresh3"].(int); v != 0 { - result.NetIpv4NeighDefaultGcThresh3 = utils.Int32(int32(v)) + result.NetIPv4NeighDefaultGcThresh3 = utils.Int64(int64(v)) } if v := raw["net_netfilter_nf_conntrack_max"].(int); v != 0 { - result.NetNetfilterNfConntrackMax = utils.Int32(int32(v)) + result.NetNetfilterNfConntrackMax = utils.Int64(int64(v)) } if v := raw["net_netfilter_nf_conntrack_buckets"].(int); v != 0 { - result.NetNetfilterNfConntrackBuckets = utils.Int32(int32(v)) + result.NetNetfilterNfConntrackBuckets = utils.Int64(int64(v)) } if v := raw["fs_aio_max_nr"].(int); v != 0 { - result.FsAioMaxNr = utils.Int32(int32(v)) + result.FsAioMaxNr = utils.Int64(int64(v)) } if v := raw["fs_inotify_max_user_watches"].(int); v != 0 { - result.FsInotifyMaxUserWatches = utils.Int32(int32(v)) + result.FsInotifyMaxUserWatches = utils.Int64(int64(v)) } if v := raw["fs_file_max"].(int); v != 0 { - result.FsFileMax = utils.Int32(int32(v)) + result.FsFileMax = utils.Int64(int64(v)) } if v := raw["fs_nr_open"].(int); v != 0 { - result.FsNrOpen = utils.Int32(int32(v)) + result.FsNrOpen = utils.Int64(int64(v)) } if v := raw["kernel_threads_max"].(int); v != 0 { - result.KernelThreadsMax = utils.Int32(int32(v)) + result.KernelThreadsMax = utils.Int64(int64(v)) } if v := raw["vm_max_map_count"].(int); v != 0 { - result.VMMaxMapCount = utils.Int32(int32(v)) + result.VmMaxMapCount = utils.Int64(int64(v)) } if v := raw["vm_swappiness"].(int); v != 0 { - result.VMSwappiness = utils.Int32(int32(v)) + result.VmSwappiness = utils.Int64(int64(v)) } if v := raw["vm_vfs_cache_pressure"].(int); v != 0 { - result.VMVfsCachePressure = utils.Int32(int32(v)) + result.VmVfsCachePressure = utils.Int64(int64(v)) } return result, nil } -func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfile, d *pluginsdk.ResourceData) (*[]interface{}, error) { +func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProfile, d *pluginsdk.ResourceData) (*[]interface{}, error) { if input == nil { return &[]interface{}{}, nil } @@ -1071,16 +1123,13 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro minCount = int(*agentPool.MinCount) } - name := "" - if agentPool.Name != nil { - name = *agentPool.Name - } + name := agentPool.Name var nodeLabels map[string]string if agentPool.NodeLabels != nil { nodeLabels = make(map[string]string) - for k, v := range agentPool.NodeLabels { - nodeLabels[k] = *v + for k, v := range *agentPool.NodeLabels { + nodeLabels[k] = v } } @@ -1103,9 +1152,9 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro osDiskSizeGB = int(*agentPool.OsDiskSizeGB) } - osDiskType := containerservice.OSDiskTypeManaged - if agentPool.OsDiskType != "" { - osDiskType = agentPool.OsDiskType + osDiskType := managedclusters.OSDiskTypeManaged + if agentPool.OsDiskType != nil { + osDiskType = *agentPool.OsDiskType } podSubnetId := "" @@ -1136,14 +1185,14 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro proximityPlacementGroupId = *agentPool.ProximityPlacementGroupID } - scaleDownMode := containerservice.ScaleDownModeDelete - if agentPool.ScaleDownMode != "" { - scaleDownMode = agentPool.ScaleDownMode + scaleDownMode := managedclusters.ScaleDownModeDelete + if agentPool.ScaleDownMode != nil { + scaleDownMode = *agentPool.ScaleDownMode } vmSize := "" - if agentPool.VMSize != nil { - vmSize = *agentPool.VMSize + if agentPool.VmSize != nil { + vmSize = *agentPool.VmSize } capacityReservationGroupId := "" if agentPool.CapacityReservationGroupID != nil { @@ -1151,12 +1200,27 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro } workloadRunTime := "" - if agentPool.WorkloadRuntime != "" { - workloadRunTime = string(agentPool.WorkloadRuntime) + if agentPool.WorkloadRuntime != nil { + workloadRunTime = string(*agentPool.WorkloadRuntime) } - upgradeSettings := flattenUpgradeSettings(agentPool.UpgradeSettings) - linuxOSConfig, err := flattenAgentPoolLinuxOSConfig(agentPool.LinuxOSConfig) + kubeletDiskType := "" + if agentPool.KubeletDiskType != nil { + kubeletDiskType = string(*agentPool.KubeletDiskType) + } + + osSKU := "" + if agentPool.OsSKU != nil { + osSKU = string(*agentPool.OsSKU) + } + + agentPoolType := "" + if agentPool.Type != nil { + agentPoolType = string(*agentPool.Type) + } + + upgradeSettings := flattenClusterNodePoolUpgradeSettings(agentPool.UpgradeSettings) + linuxOSConfig, err := flattenClusterNodePoolLinuxOSConfig(agentPool.LinuxOSConfig) if err != nil { return nil, err } @@ -1167,7 +1231,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "enable_host_encryption": enableHostEncryption, "fips_enabled": enableFIPS, "host_group_id": hostGroupID, - "kubelet_disk_type": string(agentPool.KubeletDiskType), + "kubelet_disk_type": kubeletDiskType, "max_count": maxCount, "max_pods": maxPods, "message_of_the_day": messageOfTheDay, @@ -1179,10 +1243,10 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "node_taints": []string{}, "os_disk_size_gb": osDiskSizeGB, "os_disk_type": string(osDiskType), - "os_sku": string(agentPool.OsSKU), + "os_sku": osSKU, "scale_down_mode": string(scaleDownMode), "tags": tags.Flatten(agentPool.Tags), - "type": string(agentPool.Type), + "type": agentPoolType, "ultra_ssd_enabled": enableUltraSSD, "vm_size": vmSize, "workload_runtime": workloadRunTime, @@ -1192,7 +1256,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "upgrade_settings": upgradeSettings, "vnet_subnet_id": vnetSubnetId, "only_critical_addons_enabled": criticalAddonsEnabled, - "kubelet_config": flattenAgentPoolKubeletConfig(agentPool.KubeletConfig), + "kubelet_config": flattenClusterNodePoolKubeletConfig(agentPool.KubeletConfig), "linux_os_config": linuxOSConfig, "zones": zones.Flatten(agentPool.AvailabilityZones), "capacity_reservation_group_id": capacityReservationGroupId, @@ -1203,7 +1267,77 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro }, nil } -func flattenAgentPoolKubeletConfig(input *containerservice.KubeletConfig) []interface{} { +func flattenClusterNodePoolUpgradeSettings(input *managedclusters.AgentPoolUpgradeSettings) []interface{} { + maxSurge := "" + if input != nil && input.MaxSurge != nil { + maxSurge = *input.MaxSurge + } + + if maxSurge == "" { + return []interface{}{} + } + + return []interface{}{ + map[string]interface{}{ + "max_surge": maxSurge, + }, + } +} + +func flattenClusterNodePoolKubeletConfig(input *managedclusters.KubeletConfig) []interface{} { + if input == nil { + return []interface{}{} + } + + var cpuManagerPolicy, cpuCfsQuotaPeriod, topologyManagerPolicy string + var cpuCfsQuotaEnabled bool + var imageGcHighThreshold, imageGcLowThreshold, containerLogMaxSizeMB, containerLogMaxLines, podMaxPids int + + if input.CpuManagerPolicy != nil { + cpuManagerPolicy = *input.CpuManagerPolicy + } + if input.CpuCfsQuota != nil { + cpuCfsQuotaEnabled = *input.CpuCfsQuota + } + if input.CpuCfsQuotaPeriod != nil { + cpuCfsQuotaPeriod = *input.CpuCfsQuotaPeriod + } + if input.ImageGcHighThreshold != nil { + imageGcHighThreshold = int(*input.ImageGcHighThreshold) + } + if input.ImageGcLowThreshold != nil { + imageGcLowThreshold = int(*input.ImageGcLowThreshold) + } + if input.TopologyManagerPolicy != nil { + topologyManagerPolicy = *input.TopologyManagerPolicy + } + if input.ContainerLogMaxSizeMB != nil { + containerLogMaxSizeMB = int(*input.ContainerLogMaxSizeMB) + } + if input.ContainerLogMaxFiles != nil { + containerLogMaxLines = int(*input.ContainerLogMaxFiles) + } + if input.PodMaxPids != nil { + podMaxPids = int(*input.PodMaxPids) + } + + return []interface{}{ + map[string]interface{}{ + "cpu_manager_policy": cpuManagerPolicy, + "cpu_cfs_quota_enabled": cpuCfsQuotaEnabled, + "cpu_cfs_quota_period": cpuCfsQuotaPeriod, + "image_gc_high_threshold": imageGcHighThreshold, + "image_gc_low_threshold": imageGcLowThreshold, + "topology_manager_policy": topologyManagerPolicy, + "allowed_unsafe_sysctls": utils.FlattenStringSlice(input.AllowedUnsafeSysctls), + "container_log_max_size_mb": containerLogMaxSizeMB, + "container_log_max_line": containerLogMaxLines, + "pod_max_pid": podMaxPids, + }, + } +} + +func flattenAgentPoolKubeletConfig(input *agentpools.KubeletConfig) []interface{} { if input == nil { return []interface{}{} } @@ -1212,14 +1346,14 @@ func flattenAgentPoolKubeletConfig(input *containerservice.KubeletConfig) []inte var cpuCfsQuotaEnabled bool var imageGcHighThreshold, imageGcLowThreshold, containerLogMaxSizeMB, containerLogMaxLines, podMaxPids int - if input.CPUManagerPolicy != nil { - cpuManagerPolicy = *input.CPUManagerPolicy + if input.CpuManagerPolicy != nil { + cpuManagerPolicy = *input.CpuManagerPolicy } - if input.CPUCfsQuota != nil { - cpuCfsQuotaEnabled = *input.CPUCfsQuota + if input.CpuCfsQuota != nil { + cpuCfsQuotaEnabled = *input.CpuCfsQuota } - if input.CPUCfsQuotaPeriod != nil { - cpuCfsQuotaPeriod = *input.CPUCfsQuotaPeriod + if input.CpuCfsQuotaPeriod != nil { + cpuCfsQuotaPeriod = *input.CpuCfsQuotaPeriod } if input.ImageGcHighThreshold != nil { imageGcHighThreshold = int(*input.ImageGcHighThreshold) @@ -1256,7 +1390,7 @@ func flattenAgentPoolKubeletConfig(input *containerservice.KubeletConfig) []inte } } -func flattenAgentPoolLinuxOSConfig(input *containerservice.LinuxOSConfig) ([]interface{}, error) { +func flattenClusterNodePoolLinuxOSConfig(input *managedclusters.LinuxOSConfig) ([]interface{}, error) { if input == nil { return make([]interface{}, 0), nil } @@ -1273,7 +1407,7 @@ func flattenAgentPoolLinuxOSConfig(input *containerservice.LinuxOSConfig) ([]int if input.TransparentHugePageEnabled != nil { transparentHugePageEnabled = *input.TransparentHugePageEnabled } - sysctlConfig, err := flattenAgentPoolSysctlConfig(input.Sysctls) + sysctlConfig, err := flattenClusterNodePoolSysctlConfig(input.Sysctls) if err != nil { return nil, err } @@ -1287,7 +1421,7 @@ func flattenAgentPoolLinuxOSConfig(input *containerservice.LinuxOSConfig) ([]int }, nil } -func flattenAgentPoolSysctlConfig(input *containerservice.SysctlConfig) ([]interface{}, error) { +func flattenClusterNodePoolSysctlConfig(input *managedclusters.SysctlConfig) ([]interface{}, error) { if input == nil { return make([]interface{}, 0), nil } @@ -1341,10 +1475,10 @@ func flattenAgentPoolSysctlConfig(input *containerservice.SysctlConfig) ([]inter netCoreWmemMax = int(*input.NetCoreWmemMax) } var netIpv4IpLocalPortRangeMin, netIpv4IpLocalPortRangeMax int - if input.NetIpv4IPLocalPortRange != nil { - arr := regexp.MustCompile("[ \t]+").Split(*input.NetIpv4IPLocalPortRange, -1) + if input.NetIPv4IPLocalPortRange != nil { + arr := regexp.MustCompile("[ \t]+").Split(*input.NetIPv4IPLocalPortRange, -1) if len(arr) != 2 { - return nil, fmt.Errorf("parsing `NetIpv4IPLocalPortRange` %s", *input.NetIpv4IPLocalPortRange) + return nil, fmt.Errorf("parsing `NetIPv4IPLocalPortRange` %s", *input.NetIPv4IPLocalPortRange) } var err error netIpv4IpLocalPortRangeMin, err = strconv.Atoi(arr[0]) @@ -1357,44 +1491,44 @@ func flattenAgentPoolSysctlConfig(input *containerservice.SysctlConfig) ([]inter } } var netIpv4NeighDefaultGcThresh1 int - if input.NetIpv4NeighDefaultGcThresh1 != nil { - netIpv4NeighDefaultGcThresh1 = int(*input.NetIpv4NeighDefaultGcThresh1) + if input.NetIPv4NeighDefaultGcThresh1 != nil { + netIpv4NeighDefaultGcThresh1 = int(*input.NetIPv4NeighDefaultGcThresh1) } var netIpv4NeighDefaultGcThresh2 int - if input.NetIpv4NeighDefaultGcThresh2 != nil { - netIpv4NeighDefaultGcThresh2 = int(*input.NetIpv4NeighDefaultGcThresh2) + if input.NetIPv4NeighDefaultGcThresh2 != nil { + netIpv4NeighDefaultGcThresh2 = int(*input.NetIPv4NeighDefaultGcThresh2) } var netIpv4NeighDefaultGcThresh3 int - if input.NetIpv4NeighDefaultGcThresh3 != nil { - netIpv4NeighDefaultGcThresh3 = int(*input.NetIpv4NeighDefaultGcThresh3) + if input.NetIPv4NeighDefaultGcThresh3 != nil { + netIpv4NeighDefaultGcThresh3 = int(*input.NetIPv4NeighDefaultGcThresh3) } var netIpv4TcpFinTimeout int - if input.NetIpv4TCPFinTimeout != nil { - netIpv4TcpFinTimeout = int(*input.NetIpv4TCPFinTimeout) + if input.NetIPv4TcpFinTimeout != nil { + netIpv4TcpFinTimeout = int(*input.NetIPv4TcpFinTimeout) } var netIpv4TcpkeepaliveIntvl int - if input.NetIpv4TcpkeepaliveIntvl != nil { - netIpv4TcpkeepaliveIntvl = int(*input.NetIpv4TcpkeepaliveIntvl) + if input.NetIPv4TcpkeepaliveIntvl != nil { + netIpv4TcpkeepaliveIntvl = int(*input.NetIPv4TcpkeepaliveIntvl) } var netIpv4TcpKeepaliveProbes int - if input.NetIpv4TCPKeepaliveProbes != nil { - netIpv4TcpKeepaliveProbes = int(*input.NetIpv4TCPKeepaliveProbes) + if input.NetIPv4TcpKeepaliveProbes != nil { + netIpv4TcpKeepaliveProbes = int(*input.NetIPv4TcpKeepaliveProbes) } var netIpv4TcpKeepaliveTime int - if input.NetIpv4TCPKeepaliveTime != nil { - netIpv4TcpKeepaliveTime = int(*input.NetIpv4TCPKeepaliveTime) + if input.NetIPv4TcpKeepaliveTime != nil { + netIpv4TcpKeepaliveTime = int(*input.NetIPv4TcpKeepaliveTime) } var netIpv4TcpMaxSynBacklog int - if input.NetIpv4TCPMaxSynBacklog != nil { - netIpv4TcpMaxSynBacklog = int(*input.NetIpv4TCPMaxSynBacklog) + if input.NetIPv4TcpMaxSynBacklog != nil { + netIpv4TcpMaxSynBacklog = int(*input.NetIPv4TcpMaxSynBacklog) } var netIpv4TcpMaxTwBuckets int - if input.NetIpv4TCPMaxTwBuckets != nil { - netIpv4TcpMaxTwBuckets = int(*input.NetIpv4TCPMaxTwBuckets) + if input.NetIPv4TcpMaxTwBuckets != nil { + netIpv4TcpMaxTwBuckets = int(*input.NetIPv4TcpMaxTwBuckets) } var netIpv4TcpTwReuse bool - if input.NetIpv4TCPTwReuse != nil { - netIpv4TcpTwReuse = *input.NetIpv4TCPTwReuse + if input.NetIPv4TcpTwReuse != nil { + netIpv4TcpTwReuse = *input.NetIPv4TcpTwReuse } var netNetfilterNfConntrackBuckets int if input.NetNetfilterNfConntrackBuckets != nil { @@ -1405,16 +1539,16 @@ func flattenAgentPoolSysctlConfig(input *containerservice.SysctlConfig) ([]inter netNetfilterNfConntrackMax = int(*input.NetNetfilterNfConntrackMax) } var vmMaxMapCount int - if input.VMMaxMapCount != nil { - vmMaxMapCount = int(*input.VMMaxMapCount) + if input.VmMaxMapCount != nil { + vmMaxMapCount = int(*input.VmMaxMapCount) } var vmSwappiness int - if input.VMSwappiness != nil { - vmSwappiness = int(*input.VMSwappiness) + if input.VmSwappiness != nil { + vmSwappiness = int(*input.VmSwappiness) } var vmVfsCachePressure int - if input.VMVfsCachePressure != nil { - vmVfsCachePressure = int(*input.VMVfsCachePressure) + if input.VmVfsCachePressure != nil { + vmVfsCachePressure = int(*input.VmVfsCachePressure) } return []interface{}{ map[string]interface{}{ @@ -1451,15 +1585,15 @@ func flattenAgentPoolSysctlConfig(input *containerservice.SysctlConfig) ([]inter }, nil } -func findDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfile, d *pluginsdk.ResourceData) (*containerservice.ManagedClusterAgentPoolProfile, error) { +func findDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProfile, d *pluginsdk.ResourceData) (*managedclusters.ManagedClusterAgentPoolProfile, error) { // first try loading this from the Resource Data if possible (e.g. when Created) defaultNodePoolName := d.Get("default_node_pool.0.name") - var agentPool *containerservice.ManagedClusterAgentPoolProfile + var agentPool *managedclusters.ManagedClusterAgentPoolProfile if defaultNodePoolName != "" { // find it for _, v := range *input { - if v.Name != nil && *v.Name == defaultNodePoolName { + if v.Name == defaultNodePoolName { agentPool = &v break } @@ -1469,26 +1603,39 @@ func findDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfil if agentPool == nil { // otherwise we need to fall back to the name of the first agent pool for _, v := range *input { - if v.Name == nil { + if v.Name == "" { continue } - if v.Mode != containerservice.AgentPoolModeSystem { + if *v.Mode != managedclusters.AgentPoolModeSystem { continue } - defaultNodePoolName = *v.Name + defaultNodePoolName = v.Name agentPool = &v break } if defaultNodePoolName == nil { - return nil, fmt.Errorf("Unable to Determine Default Agent Pool") + return nil, fmt.Errorf("unable to Determine Default Agent Pool") } } if agentPool == nil { - return nil, fmt.Errorf("The Default Agent Pool %q was not found", defaultNodePoolName) + return nil, fmt.Errorf("the Default Agent Pool %q was not found", defaultNodePoolName) } return agentPool, nil } + +func expandClusterNodePoolUpgradeSettings(input []interface{}) *managedclusters.AgentPoolUpgradeSettings { + setting := &managedclusters.AgentPoolUpgradeSettings{} + if len(input) == 0 || input[0] == nil { + return setting + } + + v := input[0].(map[string]interface{}) + if maxSurgeRaw := v["max_surge"].(string); maxSurgeRaw != "" { + setting.MaxSurge = utils.String(maxSurgeRaw) + } + return setting +} diff --git a/internal/services/machinelearning/machine_learning_inference_cluster_resource.go b/internal/services/machinelearning/machine_learning_inference_cluster_resource.go index e443dafcffbb..396f7afe07a0 100644 --- a/internal/services/machinelearning/machine_learning_inference_cluster_resource.go +++ b/internal/services/machinelearning/machine_learning_inference_cluster_resource.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" "github.com/hashicorp/go-azure-helpers/resourcemanager/tags" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" "github.com/hashicorp/go-azure-sdk/resource-manager/machinelearningservices/2022-05-01/machinelearningcomputes" "github.com/hashicorp/go-azure-sdk/resource-manager/machinelearningservices/2022-05-01/workspaces" "github.com/hashicorp/terraform-provider-azurerm/helpers/azure" @@ -163,22 +163,27 @@ func resourceAksInferenceClusterCreate(d *pluginsdk.ResourceData, meta interface } // Get AKS Compute Properties - aksID, err := parse.KubernetesClusterID(d.Get("kubernetes_cluster_id").(string)) + aksID, err := managedclusters.ParseManagedClusterID(d.Get("kubernetes_cluster_id").(string)) if err != nil { return err } - aks, err := aksClient.Get(ctx, aksID.ResourceGroup, aksID.ManagedClusterName) + aks, err := aksClient.Get(ctx, *aksID) if err != nil { return err } + aksModel := aks.Model + if aksModel == nil { + return fmt.Errorf("AKS not found") + } + identity, err := expandIdentity(d.Get("identity").([]interface{})) if err != nil { return fmt.Errorf("expanding `identity`: %+v", err) } inferenceClusterParameters := machinelearningcomputes.ComputeResource{ - Properties: expandAksComputeProperties(&aks, d), + Properties: expandAksComputeProperties(aksID.ID(), aksModel, d), Identity: identity, Location: utils.String(azure.NormalizeLocation(d.Get("location").(string))), Tags: tags.Expand(d.Get("tags").(map[string]interface{})), @@ -230,7 +235,7 @@ func resourceAksInferenceClusterRead(d *pluginsdk.ResourceData, meta interface{} aksComputeProperties := computeResource.Model.Properties.(machinelearningcomputes.AKS) // Retrieve AKS Cluster ID - aksId, err := parse.KubernetesClusterID(*aksComputeProperties.ResourceId) + aksId, err := managedclusters.ParseManagedClusterID(*aksComputeProperties.ResourceId) if err != nil { return err } @@ -277,10 +282,10 @@ func resourceAksInferenceClusterDelete(d *pluginsdk.ResourceData, meta interface return nil } -func expandAksComputeProperties(aks *containerservice.ManagedCluster, d *pluginsdk.ResourceData) machinelearningcomputes.AKS { - fqdn := aks.PrivateFQDN +func expandAksComputeProperties(aksId string, aks *managedclusters.ManagedCluster, d *pluginsdk.ResourceData) machinelearningcomputes.AKS { + fqdn := aks.Properties.PrivateFQDN if fqdn == nil { - fqdn = aks.Fqdn + fqdn = aks.Properties.Fqdn } return machinelearningcomputes.AKS{ @@ -289,9 +294,9 @@ func expandAksComputeProperties(aks *containerservice.ManagedCluster, d *plugins SslConfiguration: expandSSLConfig(d.Get("ssl").([]interface{})), ClusterPurpose: utils.ToPtr(machinelearningcomputes.ClusterPurpose(d.Get("cluster_purpose").(string))), }, - ComputeLocation: aks.Location, + ComputeLocation: utils.String(aks.Location), Description: utils.String(d.Get("description").(string)), - ResourceId: aks.ID, + ResourceId: utils.String(aksId), } } diff --git a/utils/common_marshal.go b/utils/common_marshal.go index 4aa1e6eaf4e7..425000edf59a 100644 --- a/utils/common_marshal.go +++ b/utils/common_marshal.go @@ -54,6 +54,15 @@ func ExpandInt32Slice(input []interface{}) *[]int32 { return &result } +func ExpandInt64Slice(input []interface{}) *[]int64 { + result := make([]int64, len(input)) + for i, item := range input { + result[i] = int64(item.(int)) + } + + return &result +} + func FlattenStringSlice(input *[]string) []interface{} { result := make([]interface{}, 0) if input != nil { @@ -106,6 +115,16 @@ func FlattenInt32Slice(input *[]int32) []interface{} { return result } +func FlattenInt64Slice(input *[]int64) []interface{} { + result := make([]interface{}, 0) + if input != nil { + for _, item := range *input { + result = append(result, item) + } + } + return result +} + func ExpandStringSliceWithDelimiter(input []interface{}, delimiter string) *string { result := make([]string, 0) for _, item := range input { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/CHANGELOG.md deleted file mode 100644 index 52911e4cc5e4..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/CHANGELOG.md +++ /dev/null @@ -1,2 +0,0 @@ -# Change History - diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/_meta.json deleted file mode 100644 index 8b1625a1ceee..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "fc9d4d2798f755bea848ac1c29b2730d31002cb8", - "readme": "/_/azure-rest-api-specs/specification/containerservice/resource-manager/readme.md", - "tag": "package-preview-2022-03", - "use": "@microsoft.azure/autorest.go@2.1.187", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-preview-2022-03 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/containerservice/resource-manager/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/agentpools.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/agentpools.go deleted file mode 100644 index 90495e507bf9..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/agentpools.go +++ /dev/null @@ -1,734 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// AgentPoolsClient is the the Container Service Client. -type AgentPoolsClient struct { - BaseClient -} - -// NewAgentPoolsClient creates an instance of the AgentPoolsClient client. -func NewAgentPoolsClient(subscriptionID string) AgentPoolsClient { - return NewAgentPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewAgentPoolsClientWithBaseURI creates an instance of the AgentPoolsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewAgentPoolsClientWithBaseURI(baseURI string, subscriptionID string) AgentPoolsClient { - return AgentPoolsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate sends the create or update request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// agentPoolName - the name of the agent pool. -// parameters - the agent pool to create or update. -func (client AgentPoolsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters AgentPool) (result AgentPoolsCreateOrUpdateFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties.KubeletConfig", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties.KubeletConfig.ContainerLogMaxFiles", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties.KubeletConfig.ContainerLogMaxFiles", Name: validation.InclusiveMinimum, Rule: int64(2), Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("containerservice.AgentPoolsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, agentPoolName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - result, err = client.CreateOrUpdateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "CreateOrUpdate", result.Response(), "Failure sending request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client AgentPoolsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, parameters AgentPool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "agentPoolName": autorest.Encode("path", agentPoolName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client AgentPoolsClient) CreateOrUpdateSender(req *http.Request) (future AgentPoolsCreateOrUpdateFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client AgentPoolsClient) CreateOrUpdateResponder(resp *http.Response) (result AgentPool, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete sends the delete request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// agentPoolName - the name of the agent pool. -// ignorePodDisruptionBudget - ignore-pod-disruption-budget=true to delete those pods on a node without -// considering Pod Disruption Budget -func (client AgentPoolsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, ignorePodDisruptionBudget *bool) (result AgentPoolsDeleteFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.Delete") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.AgentPoolsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName, agentPoolName, ignorePodDisruptionBudget) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Delete", nil, "Failure preparing request") - return - } - - result, err = client.DeleteSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Delete", result.Response(), "Failure sending request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client AgentPoolsClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string, ignorePodDisruptionBudget *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "agentPoolName": autorest.Encode("path", agentPoolName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if ignorePodDisruptionBudget != nil { - queryParameters["ignore-pod-disruption-budget"] = autorest.Encode("query", *ignorePodDisruptionBudget) - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client AgentPoolsClient) DeleteSender(req *http.Request) (future AgentPoolsDeleteFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client AgentPoolsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get sends the get request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// agentPoolName - the name of the agent pool. -func (client AgentPoolsClient) Get(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPool, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.AgentPoolsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, resourceName, agentPoolName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client AgentPoolsClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "agentPoolName": autorest.Encode("path", agentPoolName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client AgentPoolsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client AgentPoolsClient) GetResponder(resp *http.Response) (result AgentPool, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetAvailableAgentPoolVersions see [supported Kubernetes -// versions](https://docs.microsoft.com/azure/aks/supported-kubernetes-versions) for more details about the version -// lifecycle. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client AgentPoolsClient) GetAvailableAgentPoolVersions(ctx context.Context, resourceGroupName string, resourceName string) (result AgentPoolAvailableVersions, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.GetAvailableAgentPoolVersions") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", err.Error()) - } - - req, err := client.GetAvailableAgentPoolVersionsPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetAvailableAgentPoolVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", resp, "Failure sending request") - return - } - - result, err = client.GetAvailableAgentPoolVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetAvailableAgentPoolVersions", resp, "Failure responding to request") - return - } - - return -} - -// GetAvailableAgentPoolVersionsPreparer prepares the GetAvailableAgentPoolVersions request. -func (client AgentPoolsClient) GetAvailableAgentPoolVersionsPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetAvailableAgentPoolVersionsSender sends the GetAvailableAgentPoolVersions request. The method will close the -// http.Response Body if it receives an error. -func (client AgentPoolsClient) GetAvailableAgentPoolVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetAvailableAgentPoolVersionsResponder handles the response to the GetAvailableAgentPoolVersions request. The method always -// closes the http.Response Body. -func (client AgentPoolsClient) GetAvailableAgentPoolVersionsResponder(resp *http.Response) (result AgentPoolAvailableVersions, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetUpgradeProfile sends the get upgrade profile request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// agentPoolName - the name of the agent pool. -func (client AgentPoolsClient) GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPoolUpgradeProfile, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.GetUpgradeProfile") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.AgentPoolsClient", "GetUpgradeProfile", err.Error()) - } - - req, err := client.GetUpgradeProfilePreparer(ctx, resourceGroupName, resourceName, agentPoolName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetUpgradeProfile", nil, "Failure preparing request") - return - } - - resp, err := client.GetUpgradeProfileSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetUpgradeProfile", resp, "Failure sending request") - return - } - - result, err = client.GetUpgradeProfileResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "GetUpgradeProfile", resp, "Failure responding to request") - return - } - - return -} - -// GetUpgradeProfilePreparer prepares the GetUpgradeProfile request. -func (client AgentPoolsClient) GetUpgradeProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "agentPoolName": autorest.Encode("path", agentPoolName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetUpgradeProfileSender sends the GetUpgradeProfile request. The method will close the -// http.Response Body if it receives an error. -func (client AgentPoolsClient) GetUpgradeProfileSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetUpgradeProfileResponder handles the response to the GetUpgradeProfile request. The method always -// closes the http.Response Body. -func (client AgentPoolsClient) GetUpgradeProfileResponder(resp *http.Response) (result AgentPoolUpgradeProfile, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List sends the list request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client AgentPoolsClient) List(ctx context.Context, resourceGroupName string, resourceName string) (result AgentPoolListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.List") - defer func() { - sc := -1 - if result.aplr.Response.Response != nil { - sc = result.aplr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.AgentPoolsClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.aplr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "List", resp, "Failure sending request") - return - } - - result.aplr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "List", resp, "Failure responding to request") - return - } - if result.aplr.hasNextLink() && result.aplr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client AgentPoolsClient) ListPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client AgentPoolsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client AgentPoolsClient) ListResponder(resp *http.Response) (result AgentPoolListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client AgentPoolsClient) listNextResults(ctx context.Context, lastResults AgentPoolListResult) (result AgentPoolListResult, err error) { - req, err := lastResults.agentPoolListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client AgentPoolsClient) ListComplete(ctx context.Context, resourceGroupName string, resourceName string) (result AgentPoolListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, resourceGroupName, resourceName) - return -} - -// UpgradeNodeImageVersion upgrading the node image version of an agent pool applies the newest OS and runtime updates -// to the nodes. AKS provides one new image per week with the latest updates. For more details on node image versions, -// see: https://docs.microsoft.com/azure/aks/node-image-upgrade -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// agentPoolName - the name of the agent pool. -func (client AgentPoolsClient) UpgradeNodeImageVersion(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (result AgentPoolsUpgradeNodeImageVersionFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolsClient.UpgradeNodeImageVersion") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.AgentPoolsClient", "UpgradeNodeImageVersion", err.Error()) - } - - req, err := client.UpgradeNodeImageVersionPreparer(ctx, resourceGroupName, resourceName, agentPoolName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "UpgradeNodeImageVersion", nil, "Failure preparing request") - return - } - - result, err = client.UpgradeNodeImageVersionSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsClient", "UpgradeNodeImageVersion", result.Response(), "Failure sending request") - return - } - - return -} - -// UpgradeNodeImageVersionPreparer prepares the UpgradeNodeImageVersion request. -func (client AgentPoolsClient) UpgradeNodeImageVersionPreparer(ctx context.Context, resourceGroupName string, resourceName string, agentPoolName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "agentPoolName": autorest.Encode("path", agentPoolName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpgradeNodeImageVersionSender sends the UpgradeNodeImageVersion request. The method will close the -// http.Response Body if it receives an error. -func (client AgentPoolsClient) UpgradeNodeImageVersionSender(req *http.Request) (future AgentPoolsUpgradeNodeImageVersionFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// UpgradeNodeImageVersionResponder handles the response to the UpgradeNodeImageVersion request. The method always -// closes the http.Response Body. -func (client AgentPoolsClient) UpgradeNodeImageVersionResponder(resp *http.Response) (result AgentPool, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/client.go deleted file mode 100644 index b73f14a3605b..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/client.go +++ /dev/null @@ -1,43 +0,0 @@ -// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details. -// -// Package containerservice implements the Azure ARM Containerservice service API version 2022-03-02-preview. -// -// The Container Service Client. -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -const ( - // DefaultBaseURI is the default URI used for the service Containerservice - DefaultBaseURI = "https://management.azure.com" -) - -// BaseClient is the base client for Containerservice. -type BaseClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the BaseClient client. -func New(subscriptionID string) BaseClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with -// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/enums.go deleted file mode 100644 index 60781e75500b..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/enums.go +++ /dev/null @@ -1,984 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// AgentPoolMode enumerates the values for agent pool mode. -type AgentPoolMode string - -const ( - // AgentPoolModeSystem System agent pools are primarily for hosting critical system pods such as CoreDNS - // and metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at - // least 2vCPUs and 4GB of memory. - AgentPoolModeSystem AgentPoolMode = "System" - // AgentPoolModeUser User agent pools are primarily for hosting your application pods. - AgentPoolModeUser AgentPoolMode = "User" -) - -// PossibleAgentPoolModeValues returns an array of possible values for the AgentPoolMode const type. -func PossibleAgentPoolModeValues() []AgentPoolMode { - return []AgentPoolMode{AgentPoolModeSystem, AgentPoolModeUser} -} - -// AgentPoolType enumerates the values for agent pool type. -type AgentPoolType string - -const ( - // AgentPoolTypeAvailabilitySet Use of this is strongly discouraged. - AgentPoolTypeAvailabilitySet AgentPoolType = "AvailabilitySet" - // AgentPoolTypeVirtualMachineScaleSets Create an Agent Pool backed by a Virtual Machine Scale Set. - AgentPoolTypeVirtualMachineScaleSets AgentPoolType = "VirtualMachineScaleSets" -) - -// PossibleAgentPoolTypeValues returns an array of possible values for the AgentPoolType const type. -func PossibleAgentPoolTypeValues() []AgentPoolType { - return []AgentPoolType{AgentPoolTypeAvailabilitySet, AgentPoolTypeVirtualMachineScaleSets} -} - -// Code enumerates the values for code. -type Code string - -const ( - // CodeRunning The cluster is running. - CodeRunning Code = "Running" - // CodeStopped The cluster is stopped. - CodeStopped Code = "Stopped" -) - -// PossibleCodeValues returns an array of possible values for the Code const type. -func PossibleCodeValues() []Code { - return []Code{CodeRunning, CodeStopped} -} - -// ConnectionStatus enumerates the values for connection status. -type ConnectionStatus string - -const ( - // ConnectionStatusApproved ... - ConnectionStatusApproved ConnectionStatus = "Approved" - // ConnectionStatusDisconnected ... - ConnectionStatusDisconnected ConnectionStatus = "Disconnected" - // ConnectionStatusPending ... - ConnectionStatusPending ConnectionStatus = "Pending" - // ConnectionStatusRejected ... - ConnectionStatusRejected ConnectionStatus = "Rejected" -) - -// PossibleConnectionStatusValues returns an array of possible values for the ConnectionStatus const type. -func PossibleConnectionStatusValues() []ConnectionStatus { - return []ConnectionStatus{ConnectionStatusApproved, ConnectionStatusDisconnected, ConnectionStatusPending, ConnectionStatusRejected} -} - -// CreatedByType enumerates the values for created by type. -type CreatedByType string - -const ( - // CreatedByTypeApplication ... - CreatedByTypeApplication CreatedByType = "Application" - // CreatedByTypeKey ... - CreatedByTypeKey CreatedByType = "Key" - // CreatedByTypeManagedIdentity ... - CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity" - // CreatedByTypeUser ... - CreatedByTypeUser CreatedByType = "User" -) - -// PossibleCreatedByTypeValues returns an array of possible values for the CreatedByType const type. -func PossibleCreatedByTypeValues() []CreatedByType { - return []CreatedByType{CreatedByTypeApplication, CreatedByTypeKey, CreatedByTypeManagedIdentity, CreatedByTypeUser} -} - -// Expander enumerates the values for expander. -type Expander string - -const ( - // ExpanderLeastWaste Selects the node group that will have the least idle CPU (if tied, unused memory) - // after scale-up. This is useful when you have different classes of nodes, for example, high CPU or high - // memory nodes, and only want to expand those when there are pending pods that need a lot of those - // resources. - ExpanderLeastWaste Expander = "least-waste" - // ExpanderMostPods Selects the node group that would be able to schedule the most pods when scaling up. - // This is useful when you are using nodeSelector to make sure certain pods land on certain nodes. Note - // that this won't cause the autoscaler to select bigger nodes vs. smaller, as it can add multiple smaller - // nodes at once. - ExpanderMostPods Expander = "most-pods" - // ExpanderPriority Selects the node group that has the highest priority assigned by the user. It's - // configuration is described in more details - // [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/expander/priority/readme.md). - ExpanderPriority Expander = "priority" - // ExpanderRandom Used when you don't have a particular need for the node groups to scale differently. - ExpanderRandom Expander = "random" -) - -// PossibleExpanderValues returns an array of possible values for the Expander const type. -func PossibleExpanderValues() []Expander { - return []Expander{ExpanderLeastWaste, ExpanderMostPods, ExpanderPriority, ExpanderRandom} -} - -// ExtendedLocationTypes enumerates the values for extended location types. -type ExtendedLocationTypes string - -const ( - // ExtendedLocationTypesEdgeZone ... - ExtendedLocationTypesEdgeZone ExtendedLocationTypes = "EdgeZone" -) - -// PossibleExtendedLocationTypesValues returns an array of possible values for the ExtendedLocationTypes const type. -func PossibleExtendedLocationTypesValues() []ExtendedLocationTypes { - return []ExtendedLocationTypes{ExtendedLocationTypesEdgeZone} -} - -// Format enumerates the values for format. -type Format string - -const ( - // FormatAzure Return azure auth-provider kubeconfig. This format is deprecated in 1.22 and will be fully - // removed in 1.25. - FormatAzure Format = "azure" - // FormatExec Return exec format kubeconfig. This format requires kubelogin binary in the path. - FormatExec Format = "exec" -) - -// PossibleFormatValues returns an array of possible values for the Format const type. -func PossibleFormatValues() []Format { - return []Format{FormatAzure, FormatExec} -} - -// GPUInstanceProfile enumerates the values for gpu instance profile. -type GPUInstanceProfile string - -const ( - // GPUInstanceProfileMIG1g ... - GPUInstanceProfileMIG1g GPUInstanceProfile = "MIG1g" - // GPUInstanceProfileMIG2g ... - GPUInstanceProfileMIG2g GPUInstanceProfile = "MIG2g" - // GPUInstanceProfileMIG3g ... - GPUInstanceProfileMIG3g GPUInstanceProfile = "MIG3g" - // GPUInstanceProfileMIG4g ... - GPUInstanceProfileMIG4g GPUInstanceProfile = "MIG4g" - // GPUInstanceProfileMIG7g ... - GPUInstanceProfileMIG7g GPUInstanceProfile = "MIG7g" -) - -// PossibleGPUInstanceProfileValues returns an array of possible values for the GPUInstanceProfile const type. -func PossibleGPUInstanceProfileValues() []GPUInstanceProfile { - return []GPUInstanceProfile{GPUInstanceProfileMIG1g, GPUInstanceProfileMIG2g, GPUInstanceProfileMIG3g, GPUInstanceProfileMIG4g, GPUInstanceProfileMIG7g} -} - -// IPFamily enumerates the values for ip family. -type IPFamily string - -const ( - // IPFamilyIPv4 ... - IPFamilyIPv4 IPFamily = "IPv4" - // IPFamilyIPv6 ... - IPFamilyIPv6 IPFamily = "IPv6" -) - -// PossibleIPFamilyValues returns an array of possible values for the IPFamily const type. -func PossibleIPFamilyValues() []IPFamily { - return []IPFamily{IPFamilyIPv4, IPFamilyIPv6} -} - -// KubeletDiskType enumerates the values for kubelet disk type. -type KubeletDiskType string - -const ( - // KubeletDiskTypeOS Kubelet will use the OS disk for its data. - KubeletDiskTypeOS KubeletDiskType = "OS" - // KubeletDiskTypeTemporary Kubelet will use the temporary disk for its data. - KubeletDiskTypeTemporary KubeletDiskType = "Temporary" -) - -// PossibleKubeletDiskTypeValues returns an array of possible values for the KubeletDiskType const type. -func PossibleKubeletDiskTypeValues() []KubeletDiskType { - return []KubeletDiskType{KubeletDiskTypeOS, KubeletDiskTypeTemporary} -} - -// LicenseType enumerates the values for license type. -type LicenseType string - -const ( - // LicenseTypeNone No additional licensing is applied. - LicenseTypeNone LicenseType = "None" - // LicenseTypeWindowsServer Enables Azure Hybrid User Benefits for Windows VMs. - LicenseTypeWindowsServer LicenseType = "Windows_Server" -) - -// PossibleLicenseTypeValues returns an array of possible values for the LicenseType const type. -func PossibleLicenseTypeValues() []LicenseType { - return []LicenseType{LicenseTypeNone, LicenseTypeWindowsServer} -} - -// LoadBalancerSku enumerates the values for load balancer sku. -type LoadBalancerSku string - -const ( - // LoadBalancerSkuBasic Use a basic Load Balancer with limited functionality. - LoadBalancerSkuBasic LoadBalancerSku = "basic" - // LoadBalancerSkuStandard Use a a standard Load Balancer. This is the recommended Load Balancer SKU. For - // more information about on working with the load balancer in the managed cluster, see the [standard Load - // Balancer](https://docs.microsoft.com/azure/aks/load-balancer-standard) article. - LoadBalancerSkuStandard LoadBalancerSku = "standard" -) - -// PossibleLoadBalancerSkuValues returns an array of possible values for the LoadBalancerSku const type. -func PossibleLoadBalancerSkuValues() []LoadBalancerSku { - return []LoadBalancerSku{LoadBalancerSkuBasic, LoadBalancerSkuStandard} -} - -// ManagedClusterPodIdentityProvisioningState enumerates the values for managed cluster pod identity -// provisioning state. -type ManagedClusterPodIdentityProvisioningState string - -const ( - // ManagedClusterPodIdentityProvisioningStateAssigned ... - ManagedClusterPodIdentityProvisioningStateAssigned ManagedClusterPodIdentityProvisioningState = "Assigned" - // ManagedClusterPodIdentityProvisioningStateDeleting ... - ManagedClusterPodIdentityProvisioningStateDeleting ManagedClusterPodIdentityProvisioningState = "Deleting" - // ManagedClusterPodIdentityProvisioningStateFailed ... - ManagedClusterPodIdentityProvisioningStateFailed ManagedClusterPodIdentityProvisioningState = "Failed" - // ManagedClusterPodIdentityProvisioningStateUpdating ... - ManagedClusterPodIdentityProvisioningStateUpdating ManagedClusterPodIdentityProvisioningState = "Updating" -) - -// PossibleManagedClusterPodIdentityProvisioningStateValues returns an array of possible values for the ManagedClusterPodIdentityProvisioningState const type. -func PossibleManagedClusterPodIdentityProvisioningStateValues() []ManagedClusterPodIdentityProvisioningState { - return []ManagedClusterPodIdentityProvisioningState{ManagedClusterPodIdentityProvisioningStateAssigned, ManagedClusterPodIdentityProvisioningStateDeleting, ManagedClusterPodIdentityProvisioningStateFailed, ManagedClusterPodIdentityProvisioningStateUpdating} -} - -// ManagedClusterSKUName enumerates the values for managed cluster sku name. -type ManagedClusterSKUName string - -const ( - // ManagedClusterSKUNameBasic ... - ManagedClusterSKUNameBasic ManagedClusterSKUName = "Basic" -) - -// PossibleManagedClusterSKUNameValues returns an array of possible values for the ManagedClusterSKUName const type. -func PossibleManagedClusterSKUNameValues() []ManagedClusterSKUName { - return []ManagedClusterSKUName{ManagedClusterSKUNameBasic} -} - -// ManagedClusterSKUTier enumerates the values for managed cluster sku tier. -type ManagedClusterSKUTier string - -const ( - // ManagedClusterSKUTierFree No guaranteed SLA, no additional charges. Free tier clusters have an SLO of - // 99.5%. - ManagedClusterSKUTierFree ManagedClusterSKUTier = "Free" - // ManagedClusterSKUTierPaid Guarantees 99.95% availability of the Kubernetes API server endpoint for - // clusters that use Availability Zones and 99.9% of availability for clusters that don't use Availability - // Zones. - ManagedClusterSKUTierPaid ManagedClusterSKUTier = "Paid" -) - -// PossibleManagedClusterSKUTierValues returns an array of possible values for the ManagedClusterSKUTier const type. -func PossibleManagedClusterSKUTierValues() []ManagedClusterSKUTier { - return []ManagedClusterSKUTier{ManagedClusterSKUTierFree, ManagedClusterSKUTierPaid} -} - -// NetworkMode enumerates the values for network mode. -type NetworkMode string - -const ( - // NetworkModeBridge This is no longer supported - NetworkModeBridge NetworkMode = "bridge" - // NetworkModeTransparent No bridge is created. Intra-VM Pod to Pod communication is through IP routes - // created by Azure CNI. See [Transparent Mode](https://docs.microsoft.com/azure/aks/faq#transparent-mode) - // for more information. - NetworkModeTransparent NetworkMode = "transparent" -) - -// PossibleNetworkModeValues returns an array of possible values for the NetworkMode const type. -func PossibleNetworkModeValues() []NetworkMode { - return []NetworkMode{NetworkModeBridge, NetworkModeTransparent} -} - -// NetworkPlugin enumerates the values for network plugin. -type NetworkPlugin string - -const ( - // NetworkPluginAzure Use the Azure CNI network plugin. See [Azure CNI (advanced) - // networking](https://docs.microsoft.com/azure/aks/concepts-network#azure-cni-advanced-networking) for - // more information. - NetworkPluginAzure NetworkPlugin = "azure" - // NetworkPluginKubenet Use the Kubenet network plugin. See [Kubenet (basic) - // networking](https://docs.microsoft.com/azure/aks/concepts-network#kubenet-basic-networking) for more - // information. - NetworkPluginKubenet NetworkPlugin = "kubenet" - // NetworkPluginNone Do not use a network plugin. A custom CNI will need to be installed after cluster - // creation for networking functionality. - NetworkPluginNone NetworkPlugin = "none" -) - -// PossibleNetworkPluginValues returns an array of possible values for the NetworkPlugin const type. -func PossibleNetworkPluginValues() []NetworkPlugin { - return []NetworkPlugin{NetworkPluginAzure, NetworkPluginKubenet, NetworkPluginNone} -} - -// NetworkPolicy enumerates the values for network policy. -type NetworkPolicy string - -const ( - // NetworkPolicyAzure Use Azure network policies. See [differences between Azure and Calico - // policies](https://docs.microsoft.com/azure/aks/use-network-policies#differences-between-azure-and-calico-policies-and-their-capabilities) - // for more information. - NetworkPolicyAzure NetworkPolicy = "azure" - // NetworkPolicyCalico Use Calico network policies. See [differences between Azure and Calico - // policies](https://docs.microsoft.com/azure/aks/use-network-policies#differences-between-azure-and-calico-policies-and-their-capabilities) - // for more information. - NetworkPolicyCalico NetworkPolicy = "calico" -) - -// PossibleNetworkPolicyValues returns an array of possible values for the NetworkPolicy const type. -func PossibleNetworkPolicyValues() []NetworkPolicy { - return []NetworkPolicy{NetworkPolicyAzure, NetworkPolicyCalico} -} - -// OSDiskType enumerates the values for os disk type. -type OSDiskType string - -const ( - // OSDiskTypeEphemeral Ephemeral OS disks are stored only on the host machine, just like a temporary disk. - // This provides lower read/write latency, along with faster node scaling and cluster upgrades. - OSDiskTypeEphemeral OSDiskType = "Ephemeral" - // OSDiskTypeManaged Azure replicates the operating system disk for a virtual machine to Azure storage to - // avoid data loss should the VM need to be relocated to another host. Since containers aren't designed to - // have local state persisted, this behavior offers limited value while providing some drawbacks, including - // slower node provisioning and higher read/write latency. - OSDiskTypeManaged OSDiskType = "Managed" -) - -// PossibleOSDiskTypeValues returns an array of possible values for the OSDiskType const type. -func PossibleOSDiskTypeValues() []OSDiskType { - return []OSDiskType{OSDiskTypeEphemeral, OSDiskTypeManaged} -} - -// OSSKU enumerates the values for ossku. -type OSSKU string - -const ( - // OSSKUCBLMariner ... - OSSKUCBLMariner OSSKU = "CBLMariner" - // OSSKUUbuntu ... - OSSKUUbuntu OSSKU = "Ubuntu" -) - -// PossibleOSSKUValues returns an array of possible values for the OSSKU const type. -func PossibleOSSKUValues() []OSSKU { - return []OSSKU{OSSKUCBLMariner, OSSKUUbuntu} -} - -// OSType enumerates the values for os type. -type OSType string - -const ( - // OSTypeLinux Use Linux. - OSTypeLinux OSType = "Linux" - // OSTypeWindows Use Windows. - OSTypeWindows OSType = "Windows" -) - -// PossibleOSTypeValues returns an array of possible values for the OSType const type. -func PossibleOSTypeValues() []OSType { - return []OSType{OSTypeLinux, OSTypeWindows} -} - -// OutboundType enumerates the values for outbound type. -type OutboundType string - -const ( - // OutboundTypeLoadBalancer The load balancer is used for egress through an AKS assigned public IP. This - // supports Kubernetes services of type 'loadBalancer'. For more information see [outbound type - // loadbalancer](https://docs.microsoft.com/azure/aks/egress-outboundtype#outbound-type-of-loadbalancer). - OutboundTypeLoadBalancer OutboundType = "loadBalancer" - // OutboundTypeManagedNATGateway The AKS-managed NAT gateway is used for egress. - OutboundTypeManagedNATGateway OutboundType = "managedNATGateway" - // OutboundTypeUserAssignedNATGateway The user-assigned NAT gateway associated to the cluster subnet is - // used for egress. This is an advanced scenario and requires proper network configuration. - OutboundTypeUserAssignedNATGateway OutboundType = "userAssignedNATGateway" - // OutboundTypeUserDefinedRouting Egress paths must be defined by the user. This is an advanced scenario - // and requires proper network configuration. For more information see [outbound type - // userDefinedRouting](https://docs.microsoft.com/azure/aks/egress-outboundtype#outbound-type-of-userdefinedrouting). - OutboundTypeUserDefinedRouting OutboundType = "userDefinedRouting" -) - -// PossibleOutboundTypeValues returns an array of possible values for the OutboundType const type. -func PossibleOutboundTypeValues() []OutboundType { - return []OutboundType{OutboundTypeLoadBalancer, OutboundTypeManagedNATGateway, OutboundTypeUserAssignedNATGateway, OutboundTypeUserDefinedRouting} -} - -// PrivateEndpointConnectionProvisioningState enumerates the values for private endpoint connection -// provisioning state. -type PrivateEndpointConnectionProvisioningState string - -const ( - // PrivateEndpointConnectionProvisioningStateCreating ... - PrivateEndpointConnectionProvisioningStateCreating PrivateEndpointConnectionProvisioningState = "Creating" - // PrivateEndpointConnectionProvisioningStateDeleting ... - PrivateEndpointConnectionProvisioningStateDeleting PrivateEndpointConnectionProvisioningState = "Deleting" - // PrivateEndpointConnectionProvisioningStateFailed ... - PrivateEndpointConnectionProvisioningStateFailed PrivateEndpointConnectionProvisioningState = "Failed" - // PrivateEndpointConnectionProvisioningStateSucceeded ... - PrivateEndpointConnectionProvisioningStateSucceeded PrivateEndpointConnectionProvisioningState = "Succeeded" -) - -// PossiblePrivateEndpointConnectionProvisioningStateValues returns an array of possible values for the PrivateEndpointConnectionProvisioningState const type. -func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState { - return []PrivateEndpointConnectionProvisioningState{PrivateEndpointConnectionProvisioningStateCreating, PrivateEndpointConnectionProvisioningStateDeleting, PrivateEndpointConnectionProvisioningStateFailed, PrivateEndpointConnectionProvisioningStateSucceeded} -} - -// PublicNetworkAccess enumerates the values for public network access. -type PublicNetworkAccess string - -const ( - // PublicNetworkAccessDisabled ... - PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled" - // PublicNetworkAccessEnabled ... - PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" -) - -// PossiblePublicNetworkAccessValues returns an array of possible values for the PublicNetworkAccess const type. -func PossiblePublicNetworkAccessValues() []PublicNetworkAccess { - return []PublicNetworkAccess{PublicNetworkAccessDisabled, PublicNetworkAccessEnabled} -} - -// ResourceIdentityType enumerates the values for resource identity type. -type ResourceIdentityType string - -const ( - // ResourceIdentityTypeNone Do not use a managed identity for the Managed Cluster, service principal will - // be used instead. - ResourceIdentityTypeNone ResourceIdentityType = "None" - // ResourceIdentityTypeSystemAssigned Use an implicitly created system assigned managed identity to manage - // cluster resources. Master components in the control plane such as kube-controller-manager will use the - // system assigned managed identity to manipulate Azure resources. - ResourceIdentityTypeSystemAssigned ResourceIdentityType = "SystemAssigned" - // ResourceIdentityTypeUserAssigned Use a user-specified identity to manage cluster resources. Master - // components in the control plane such as kube-controller-manager will use the specified user assigned - // managed identity to manipulate Azure resources. - ResourceIdentityTypeUserAssigned ResourceIdentityType = "UserAssigned" -) - -// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type. -func PossibleResourceIdentityTypeValues() []ResourceIdentityType { - return []ResourceIdentityType{ResourceIdentityTypeNone, ResourceIdentityTypeSystemAssigned, ResourceIdentityTypeUserAssigned} -} - -// ScaleDownMode enumerates the values for scale down mode. -type ScaleDownMode string - -const ( - // ScaleDownModeDeallocate Attempt to start deallocated instances (if they exist) during scale up and - // deallocate instances during scale down. - ScaleDownModeDeallocate ScaleDownMode = "Deallocate" - // ScaleDownModeDelete Create new instances during scale up and remove instances during scale down. - ScaleDownModeDelete ScaleDownMode = "Delete" -) - -// PossibleScaleDownModeValues returns an array of possible values for the ScaleDownMode const type. -func PossibleScaleDownModeValues() []ScaleDownMode { - return []ScaleDownMode{ScaleDownModeDeallocate, ScaleDownModeDelete} -} - -// ScaleSetEvictionPolicy enumerates the values for scale set eviction policy. -type ScaleSetEvictionPolicy string - -const ( - // ScaleSetEvictionPolicyDeallocate Nodes in the underlying Scale Set of the node pool are set to the - // stopped-deallocated state upon eviction. Nodes in the stopped-deallocated state count against your - // compute quota and can cause issues with cluster scaling or upgrading. - ScaleSetEvictionPolicyDeallocate ScaleSetEvictionPolicy = "Deallocate" - // ScaleSetEvictionPolicyDelete Nodes in the underlying Scale Set of the node pool are deleted when they're - // evicted. - ScaleSetEvictionPolicyDelete ScaleSetEvictionPolicy = "Delete" -) - -// PossibleScaleSetEvictionPolicyValues returns an array of possible values for the ScaleSetEvictionPolicy const type. -func PossibleScaleSetEvictionPolicyValues() []ScaleSetEvictionPolicy { - return []ScaleSetEvictionPolicy{ScaleSetEvictionPolicyDeallocate, ScaleSetEvictionPolicyDelete} -} - -// ScaleSetPriority enumerates the values for scale set priority. -type ScaleSetPriority string - -const ( - // ScaleSetPriorityRegular Regular VMs will be used. - ScaleSetPriorityRegular ScaleSetPriority = "Regular" - // ScaleSetPrioritySpot Spot priority VMs will be used. There is no SLA for spot nodes. See [spot on - // AKS](https://docs.microsoft.com/azure/aks/spot-node-pool) for more information. - ScaleSetPrioritySpot ScaleSetPriority = "Spot" -) - -// PossibleScaleSetPriorityValues returns an array of possible values for the ScaleSetPriority const type. -func PossibleScaleSetPriorityValues() []ScaleSetPriority { - return []ScaleSetPriority{ScaleSetPriorityRegular, ScaleSetPrioritySpot} -} - -// SnapshotType enumerates the values for snapshot type. -type SnapshotType string - -const ( - // SnapshotTypeManagedCluster The snapshot is a snapshot of a managed cluster. - SnapshotTypeManagedCluster SnapshotType = "ManagedCluster" - // SnapshotTypeNodePool The snapshot is a snapshot of a node pool. - SnapshotTypeNodePool SnapshotType = "NodePool" -) - -// PossibleSnapshotTypeValues returns an array of possible values for the SnapshotType const type. -func PossibleSnapshotTypeValues() []SnapshotType { - return []SnapshotType{SnapshotTypeManagedCluster, SnapshotTypeNodePool} -} - -// StorageProfileTypes enumerates the values for storage profile types. -type StorageProfileTypes string - -const ( - // StorageProfileTypesManagedDisks ... - StorageProfileTypesManagedDisks StorageProfileTypes = "ManagedDisks" - // StorageProfileTypesStorageAccount ... - StorageProfileTypesStorageAccount StorageProfileTypes = "StorageAccount" -) - -// PossibleStorageProfileTypesValues returns an array of possible values for the StorageProfileTypes const type. -func PossibleStorageProfileTypesValues() []StorageProfileTypes { - return []StorageProfileTypes{StorageProfileTypesManagedDisks, StorageProfileTypesStorageAccount} -} - -// UpgradeChannel enumerates the values for upgrade channel. -type UpgradeChannel string - -const ( - // UpgradeChannelNodeImage Automatically upgrade the node image to the latest version available. Microsoft - // provides patches and new images for image nodes frequently (usually weekly), but your running nodes - // won't get the new images unless you do a node image upgrade. Turning on the node-image channel will - // automatically update your node images whenever a new version is available. - UpgradeChannelNodeImage UpgradeChannel = "node-image" - // UpgradeChannelNone Disables auto-upgrades and keeps the cluster at its current version of Kubernetes. - UpgradeChannelNone UpgradeChannel = "none" - // UpgradeChannelPatch Automatically upgrade the cluster to the latest supported patch version when it - // becomes available while keeping the minor version the same. For example, if a cluster is running version - // 1.17.7 and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster is upgraded to - // 1.17.9. - UpgradeChannelPatch UpgradeChannel = "patch" - // UpgradeChannelRapid Automatically upgrade the cluster to the latest supported patch release on the - // latest supported minor version. In cases where the cluster is at a version of Kubernetes that is at an - // N-2 minor version where N is the latest supported minor version, the cluster first upgrades to the - // latest supported patch version on N-1 minor version. For example, if a cluster is running version 1.17.7 - // and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster first is upgraded to 1.18.6, - // then is upgraded to 1.19.1. - UpgradeChannelRapid UpgradeChannel = "rapid" - // UpgradeChannelStable Automatically upgrade the cluster to the latest supported patch release on minor - // version N-1, where N is the latest supported minor version. For example, if a cluster is running version - // 1.17.7 and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster is upgraded to - // 1.18.6. - UpgradeChannelStable UpgradeChannel = "stable" -) - -// PossibleUpgradeChannelValues returns an array of possible values for the UpgradeChannel const type. -func PossibleUpgradeChannelValues() []UpgradeChannel { - return []UpgradeChannel{UpgradeChannelNodeImage, UpgradeChannelNone, UpgradeChannelPatch, UpgradeChannelRapid, UpgradeChannelStable} -} - -// VMSizeTypes enumerates the values for vm size types. -type VMSizeTypes string - -const ( - // VMSizeTypesStandardA1 ... - VMSizeTypesStandardA1 VMSizeTypes = "Standard_A1" - // VMSizeTypesStandardA10 ... - VMSizeTypesStandardA10 VMSizeTypes = "Standard_A10" - // VMSizeTypesStandardA11 ... - VMSizeTypesStandardA11 VMSizeTypes = "Standard_A11" - // VMSizeTypesStandardA1V2 ... - VMSizeTypesStandardA1V2 VMSizeTypes = "Standard_A1_v2" - // VMSizeTypesStandardA2 ... - VMSizeTypesStandardA2 VMSizeTypes = "Standard_A2" - // VMSizeTypesStandardA2mV2 ... - VMSizeTypesStandardA2mV2 VMSizeTypes = "Standard_A2m_v2" - // VMSizeTypesStandardA2V2 ... - VMSizeTypesStandardA2V2 VMSizeTypes = "Standard_A2_v2" - // VMSizeTypesStandardA3 ... - VMSizeTypesStandardA3 VMSizeTypes = "Standard_A3" - // VMSizeTypesStandardA4 ... - VMSizeTypesStandardA4 VMSizeTypes = "Standard_A4" - // VMSizeTypesStandardA4mV2 ... - VMSizeTypesStandardA4mV2 VMSizeTypes = "Standard_A4m_v2" - // VMSizeTypesStandardA4V2 ... - VMSizeTypesStandardA4V2 VMSizeTypes = "Standard_A4_v2" - // VMSizeTypesStandardA5 ... - VMSizeTypesStandardA5 VMSizeTypes = "Standard_A5" - // VMSizeTypesStandardA6 ... - VMSizeTypesStandardA6 VMSizeTypes = "Standard_A6" - // VMSizeTypesStandardA7 ... - VMSizeTypesStandardA7 VMSizeTypes = "Standard_A7" - // VMSizeTypesStandardA8 ... - VMSizeTypesStandardA8 VMSizeTypes = "Standard_A8" - // VMSizeTypesStandardA8mV2 ... - VMSizeTypesStandardA8mV2 VMSizeTypes = "Standard_A8m_v2" - // VMSizeTypesStandardA8V2 ... - VMSizeTypesStandardA8V2 VMSizeTypes = "Standard_A8_v2" - // VMSizeTypesStandardA9 ... - VMSizeTypesStandardA9 VMSizeTypes = "Standard_A9" - // VMSizeTypesStandardB2ms ... - VMSizeTypesStandardB2ms VMSizeTypes = "Standard_B2ms" - // VMSizeTypesStandardB2s ... - VMSizeTypesStandardB2s VMSizeTypes = "Standard_B2s" - // VMSizeTypesStandardB4ms ... - VMSizeTypesStandardB4ms VMSizeTypes = "Standard_B4ms" - // VMSizeTypesStandardB8ms ... - VMSizeTypesStandardB8ms VMSizeTypes = "Standard_B8ms" - // VMSizeTypesStandardD1 ... - VMSizeTypesStandardD1 VMSizeTypes = "Standard_D1" - // VMSizeTypesStandardD11 ... - VMSizeTypesStandardD11 VMSizeTypes = "Standard_D11" - // VMSizeTypesStandardD11V2 ... - VMSizeTypesStandardD11V2 VMSizeTypes = "Standard_D11_v2" - // VMSizeTypesStandardD11V2Promo ... - VMSizeTypesStandardD11V2Promo VMSizeTypes = "Standard_D11_v2_Promo" - // VMSizeTypesStandardD12 ... - VMSizeTypesStandardD12 VMSizeTypes = "Standard_D12" - // VMSizeTypesStandardD12V2 ... - VMSizeTypesStandardD12V2 VMSizeTypes = "Standard_D12_v2" - // VMSizeTypesStandardD12V2Promo ... - VMSizeTypesStandardD12V2Promo VMSizeTypes = "Standard_D12_v2_Promo" - // VMSizeTypesStandardD13 ... - VMSizeTypesStandardD13 VMSizeTypes = "Standard_D13" - // VMSizeTypesStandardD13V2 ... - VMSizeTypesStandardD13V2 VMSizeTypes = "Standard_D13_v2" - // VMSizeTypesStandardD13V2Promo ... - VMSizeTypesStandardD13V2Promo VMSizeTypes = "Standard_D13_v2_Promo" - // VMSizeTypesStandardD14 ... - VMSizeTypesStandardD14 VMSizeTypes = "Standard_D14" - // VMSizeTypesStandardD14V2 ... - VMSizeTypesStandardD14V2 VMSizeTypes = "Standard_D14_v2" - // VMSizeTypesStandardD14V2Promo ... - VMSizeTypesStandardD14V2Promo VMSizeTypes = "Standard_D14_v2_Promo" - // VMSizeTypesStandardD15V2 ... - VMSizeTypesStandardD15V2 VMSizeTypes = "Standard_D15_v2" - // VMSizeTypesStandardD16sV3 ... - VMSizeTypesStandardD16sV3 VMSizeTypes = "Standard_D16s_v3" - // VMSizeTypesStandardD16V3 ... - VMSizeTypesStandardD16V3 VMSizeTypes = "Standard_D16_v3" - // VMSizeTypesStandardD1V2 ... - VMSizeTypesStandardD1V2 VMSizeTypes = "Standard_D1_v2" - // VMSizeTypesStandardD2 ... - VMSizeTypesStandardD2 VMSizeTypes = "Standard_D2" - // VMSizeTypesStandardD2sV3 ... - VMSizeTypesStandardD2sV3 VMSizeTypes = "Standard_D2s_v3" - // VMSizeTypesStandardD2V2 ... - VMSizeTypesStandardD2V2 VMSizeTypes = "Standard_D2_v2" - // VMSizeTypesStandardD2V2Promo ... - VMSizeTypesStandardD2V2Promo VMSizeTypes = "Standard_D2_v2_Promo" - // VMSizeTypesStandardD2V3 ... - VMSizeTypesStandardD2V3 VMSizeTypes = "Standard_D2_v3" - // VMSizeTypesStandardD3 ... - VMSizeTypesStandardD3 VMSizeTypes = "Standard_D3" - // VMSizeTypesStandardD32sV3 ... - VMSizeTypesStandardD32sV3 VMSizeTypes = "Standard_D32s_v3" - // VMSizeTypesStandardD32V3 ... - VMSizeTypesStandardD32V3 VMSizeTypes = "Standard_D32_v3" - // VMSizeTypesStandardD3V2 ... - VMSizeTypesStandardD3V2 VMSizeTypes = "Standard_D3_v2" - // VMSizeTypesStandardD3V2Promo ... - VMSizeTypesStandardD3V2Promo VMSizeTypes = "Standard_D3_v2_Promo" - // VMSizeTypesStandardD4 ... - VMSizeTypesStandardD4 VMSizeTypes = "Standard_D4" - // VMSizeTypesStandardD4sV3 ... - VMSizeTypesStandardD4sV3 VMSizeTypes = "Standard_D4s_v3" - // VMSizeTypesStandardD4V2 ... - VMSizeTypesStandardD4V2 VMSizeTypes = "Standard_D4_v2" - // VMSizeTypesStandardD4V2Promo ... - VMSizeTypesStandardD4V2Promo VMSizeTypes = "Standard_D4_v2_Promo" - // VMSizeTypesStandardD4V3 ... - VMSizeTypesStandardD4V3 VMSizeTypes = "Standard_D4_v3" - // VMSizeTypesStandardD5V2 ... - VMSizeTypesStandardD5V2 VMSizeTypes = "Standard_D5_v2" - // VMSizeTypesStandardD5V2Promo ... - VMSizeTypesStandardD5V2Promo VMSizeTypes = "Standard_D5_v2_Promo" - // VMSizeTypesStandardD64sV3 ... - VMSizeTypesStandardD64sV3 VMSizeTypes = "Standard_D64s_v3" - // VMSizeTypesStandardD64V3 ... - VMSizeTypesStandardD64V3 VMSizeTypes = "Standard_D64_v3" - // VMSizeTypesStandardD8sV3 ... - VMSizeTypesStandardD8sV3 VMSizeTypes = "Standard_D8s_v3" - // VMSizeTypesStandardD8V3 ... - VMSizeTypesStandardD8V3 VMSizeTypes = "Standard_D8_v3" - // VMSizeTypesStandardDS1 ... - VMSizeTypesStandardDS1 VMSizeTypes = "Standard_DS1" - // VMSizeTypesStandardDS11 ... - VMSizeTypesStandardDS11 VMSizeTypes = "Standard_DS11" - // VMSizeTypesStandardDS11V2 ... - VMSizeTypesStandardDS11V2 VMSizeTypes = "Standard_DS11_v2" - // VMSizeTypesStandardDS11V2Promo ... - VMSizeTypesStandardDS11V2Promo VMSizeTypes = "Standard_DS11_v2_Promo" - // VMSizeTypesStandardDS12 ... - VMSizeTypesStandardDS12 VMSizeTypes = "Standard_DS12" - // VMSizeTypesStandardDS12V2 ... - VMSizeTypesStandardDS12V2 VMSizeTypes = "Standard_DS12_v2" - // VMSizeTypesStandardDS12V2Promo ... - VMSizeTypesStandardDS12V2Promo VMSizeTypes = "Standard_DS12_v2_Promo" - // VMSizeTypesStandardDS13 ... - VMSizeTypesStandardDS13 VMSizeTypes = "Standard_DS13" - // VMSizeTypesStandardDS132V2 ... - VMSizeTypesStandardDS132V2 VMSizeTypes = "Standard_DS13-2_v2" - // VMSizeTypesStandardDS134V2 ... - VMSizeTypesStandardDS134V2 VMSizeTypes = "Standard_DS13-4_v2" - // VMSizeTypesStandardDS13V2 ... - VMSizeTypesStandardDS13V2 VMSizeTypes = "Standard_DS13_v2" - // VMSizeTypesStandardDS13V2Promo ... - VMSizeTypesStandardDS13V2Promo VMSizeTypes = "Standard_DS13_v2_Promo" - // VMSizeTypesStandardDS14 ... - VMSizeTypesStandardDS14 VMSizeTypes = "Standard_DS14" - // VMSizeTypesStandardDS144V2 ... - VMSizeTypesStandardDS144V2 VMSizeTypes = "Standard_DS14-4_v2" - // VMSizeTypesStandardDS148V2 ... - VMSizeTypesStandardDS148V2 VMSizeTypes = "Standard_DS14-8_v2" - // VMSizeTypesStandardDS14V2 ... - VMSizeTypesStandardDS14V2 VMSizeTypes = "Standard_DS14_v2" - // VMSizeTypesStandardDS14V2Promo ... - VMSizeTypesStandardDS14V2Promo VMSizeTypes = "Standard_DS14_v2_Promo" - // VMSizeTypesStandardDS15V2 ... - VMSizeTypesStandardDS15V2 VMSizeTypes = "Standard_DS15_v2" - // VMSizeTypesStandardDS1V2 ... - VMSizeTypesStandardDS1V2 VMSizeTypes = "Standard_DS1_v2" - // VMSizeTypesStandardDS2 ... - VMSizeTypesStandardDS2 VMSizeTypes = "Standard_DS2" - // VMSizeTypesStandardDS2V2 ... - VMSizeTypesStandardDS2V2 VMSizeTypes = "Standard_DS2_v2" - // VMSizeTypesStandardDS2V2Promo ... - VMSizeTypesStandardDS2V2Promo VMSizeTypes = "Standard_DS2_v2_Promo" - // VMSizeTypesStandardDS3 ... - VMSizeTypesStandardDS3 VMSizeTypes = "Standard_DS3" - // VMSizeTypesStandardDS3V2 ... - VMSizeTypesStandardDS3V2 VMSizeTypes = "Standard_DS3_v2" - // VMSizeTypesStandardDS3V2Promo ... - VMSizeTypesStandardDS3V2Promo VMSizeTypes = "Standard_DS3_v2_Promo" - // VMSizeTypesStandardDS4 ... - VMSizeTypesStandardDS4 VMSizeTypes = "Standard_DS4" - // VMSizeTypesStandardDS4V2 ... - VMSizeTypesStandardDS4V2 VMSizeTypes = "Standard_DS4_v2" - // VMSizeTypesStandardDS4V2Promo ... - VMSizeTypesStandardDS4V2Promo VMSizeTypes = "Standard_DS4_v2_Promo" - // VMSizeTypesStandardDS5V2 ... - VMSizeTypesStandardDS5V2 VMSizeTypes = "Standard_DS5_v2" - // VMSizeTypesStandardDS5V2Promo ... - VMSizeTypesStandardDS5V2Promo VMSizeTypes = "Standard_DS5_v2_Promo" - // VMSizeTypesStandardE16sV3 ... - VMSizeTypesStandardE16sV3 VMSizeTypes = "Standard_E16s_v3" - // VMSizeTypesStandardE16V3 ... - VMSizeTypesStandardE16V3 VMSizeTypes = "Standard_E16_v3" - // VMSizeTypesStandardE2sV3 ... - VMSizeTypesStandardE2sV3 VMSizeTypes = "Standard_E2s_v3" - // VMSizeTypesStandardE2V3 ... - VMSizeTypesStandardE2V3 VMSizeTypes = "Standard_E2_v3" - // VMSizeTypesStandardE3216sV3 ... - VMSizeTypesStandardE3216sV3 VMSizeTypes = "Standard_E32-16s_v3" - // VMSizeTypesStandardE328sV3 ... - VMSizeTypesStandardE328sV3 VMSizeTypes = "Standard_E32-8s_v3" - // VMSizeTypesStandardE32sV3 ... - VMSizeTypesStandardE32sV3 VMSizeTypes = "Standard_E32s_v3" - // VMSizeTypesStandardE32V3 ... - VMSizeTypesStandardE32V3 VMSizeTypes = "Standard_E32_v3" - // VMSizeTypesStandardE4sV3 ... - VMSizeTypesStandardE4sV3 VMSizeTypes = "Standard_E4s_v3" - // VMSizeTypesStandardE4V3 ... - VMSizeTypesStandardE4V3 VMSizeTypes = "Standard_E4_v3" - // VMSizeTypesStandardE6416sV3 ... - VMSizeTypesStandardE6416sV3 VMSizeTypes = "Standard_E64-16s_v3" - // VMSizeTypesStandardE6432sV3 ... - VMSizeTypesStandardE6432sV3 VMSizeTypes = "Standard_E64-32s_v3" - // VMSizeTypesStandardE64sV3 ... - VMSizeTypesStandardE64sV3 VMSizeTypes = "Standard_E64s_v3" - // VMSizeTypesStandardE64V3 ... - VMSizeTypesStandardE64V3 VMSizeTypes = "Standard_E64_v3" - // VMSizeTypesStandardE8sV3 ... - VMSizeTypesStandardE8sV3 VMSizeTypes = "Standard_E8s_v3" - // VMSizeTypesStandardE8V3 ... - VMSizeTypesStandardE8V3 VMSizeTypes = "Standard_E8_v3" - // VMSizeTypesStandardF1 ... - VMSizeTypesStandardF1 VMSizeTypes = "Standard_F1" - // VMSizeTypesStandardF16 ... - VMSizeTypesStandardF16 VMSizeTypes = "Standard_F16" - // VMSizeTypesStandardF16s ... - VMSizeTypesStandardF16s VMSizeTypes = "Standard_F16s" - // VMSizeTypesStandardF16sV2 ... - VMSizeTypesStandardF16sV2 VMSizeTypes = "Standard_F16s_v2" - // VMSizeTypesStandardF1s ... - VMSizeTypesStandardF1s VMSizeTypes = "Standard_F1s" - // VMSizeTypesStandardF2 ... - VMSizeTypesStandardF2 VMSizeTypes = "Standard_F2" - // VMSizeTypesStandardF2s ... - VMSizeTypesStandardF2s VMSizeTypes = "Standard_F2s" - // VMSizeTypesStandardF2sV2 ... - VMSizeTypesStandardF2sV2 VMSizeTypes = "Standard_F2s_v2" - // VMSizeTypesStandardF32sV2 ... - VMSizeTypesStandardF32sV2 VMSizeTypes = "Standard_F32s_v2" - // VMSizeTypesStandardF4 ... - VMSizeTypesStandardF4 VMSizeTypes = "Standard_F4" - // VMSizeTypesStandardF4s ... - VMSizeTypesStandardF4s VMSizeTypes = "Standard_F4s" - // VMSizeTypesStandardF4sV2 ... - VMSizeTypesStandardF4sV2 VMSizeTypes = "Standard_F4s_v2" - // VMSizeTypesStandardF64sV2 ... - VMSizeTypesStandardF64sV2 VMSizeTypes = "Standard_F64s_v2" - // VMSizeTypesStandardF72sV2 ... - VMSizeTypesStandardF72sV2 VMSizeTypes = "Standard_F72s_v2" - // VMSizeTypesStandardF8 ... - VMSizeTypesStandardF8 VMSizeTypes = "Standard_F8" - // VMSizeTypesStandardF8s ... - VMSizeTypesStandardF8s VMSizeTypes = "Standard_F8s" - // VMSizeTypesStandardF8sV2 ... - VMSizeTypesStandardF8sV2 VMSizeTypes = "Standard_F8s_v2" - // VMSizeTypesStandardG1 ... - VMSizeTypesStandardG1 VMSizeTypes = "Standard_G1" - // VMSizeTypesStandardG2 ... - VMSizeTypesStandardG2 VMSizeTypes = "Standard_G2" - // VMSizeTypesStandardG3 ... - VMSizeTypesStandardG3 VMSizeTypes = "Standard_G3" - // VMSizeTypesStandardG4 ... - VMSizeTypesStandardG4 VMSizeTypes = "Standard_G4" - // VMSizeTypesStandardG5 ... - VMSizeTypesStandardG5 VMSizeTypes = "Standard_G5" - // VMSizeTypesStandardGS1 ... - VMSizeTypesStandardGS1 VMSizeTypes = "Standard_GS1" - // VMSizeTypesStandardGS2 ... - VMSizeTypesStandardGS2 VMSizeTypes = "Standard_GS2" - // VMSizeTypesStandardGS3 ... - VMSizeTypesStandardGS3 VMSizeTypes = "Standard_GS3" - // VMSizeTypesStandardGS4 ... - VMSizeTypesStandardGS4 VMSizeTypes = "Standard_GS4" - // VMSizeTypesStandardGS44 ... - VMSizeTypesStandardGS44 VMSizeTypes = "Standard_GS4-4" - // VMSizeTypesStandardGS48 ... - VMSizeTypesStandardGS48 VMSizeTypes = "Standard_GS4-8" - // VMSizeTypesStandardGS5 ... - VMSizeTypesStandardGS5 VMSizeTypes = "Standard_GS5" - // VMSizeTypesStandardGS516 ... - VMSizeTypesStandardGS516 VMSizeTypes = "Standard_GS5-16" - // VMSizeTypesStandardGS58 ... - VMSizeTypesStandardGS58 VMSizeTypes = "Standard_GS5-8" - // VMSizeTypesStandardH16 ... - VMSizeTypesStandardH16 VMSizeTypes = "Standard_H16" - // VMSizeTypesStandardH16m ... - VMSizeTypesStandardH16m VMSizeTypes = "Standard_H16m" - // VMSizeTypesStandardH16mr ... - VMSizeTypesStandardH16mr VMSizeTypes = "Standard_H16mr" - // VMSizeTypesStandardH16r ... - VMSizeTypesStandardH16r VMSizeTypes = "Standard_H16r" - // VMSizeTypesStandardH8 ... - VMSizeTypesStandardH8 VMSizeTypes = "Standard_H8" - // VMSizeTypesStandardH8m ... - VMSizeTypesStandardH8m VMSizeTypes = "Standard_H8m" - // VMSizeTypesStandardL16s ... - VMSizeTypesStandardL16s VMSizeTypes = "Standard_L16s" - // VMSizeTypesStandardL32s ... - VMSizeTypesStandardL32s VMSizeTypes = "Standard_L32s" - // VMSizeTypesStandardL4s ... - VMSizeTypesStandardL4s VMSizeTypes = "Standard_L4s" - // VMSizeTypesStandardL8s ... - VMSizeTypesStandardL8s VMSizeTypes = "Standard_L8s" - // VMSizeTypesStandardM12832ms ... - VMSizeTypesStandardM12832ms VMSizeTypes = "Standard_M128-32ms" - // VMSizeTypesStandardM12864ms ... - VMSizeTypesStandardM12864ms VMSizeTypes = "Standard_M128-64ms" - // VMSizeTypesStandardM128ms ... - VMSizeTypesStandardM128ms VMSizeTypes = "Standard_M128ms" - // VMSizeTypesStandardM128s ... - VMSizeTypesStandardM128s VMSizeTypes = "Standard_M128s" - // VMSizeTypesStandardM6416ms ... - VMSizeTypesStandardM6416ms VMSizeTypes = "Standard_M64-16ms" - // VMSizeTypesStandardM6432ms ... - VMSizeTypesStandardM6432ms VMSizeTypes = "Standard_M64-32ms" - // VMSizeTypesStandardM64ms ... - VMSizeTypesStandardM64ms VMSizeTypes = "Standard_M64ms" - // VMSizeTypesStandardM64s ... - VMSizeTypesStandardM64s VMSizeTypes = "Standard_M64s" - // VMSizeTypesStandardNC12 ... - VMSizeTypesStandardNC12 VMSizeTypes = "Standard_NC12" - // VMSizeTypesStandardNC12sV2 ... - VMSizeTypesStandardNC12sV2 VMSizeTypes = "Standard_NC12s_v2" - // VMSizeTypesStandardNC12sV3 ... - VMSizeTypesStandardNC12sV3 VMSizeTypes = "Standard_NC12s_v3" - // VMSizeTypesStandardNC24 ... - VMSizeTypesStandardNC24 VMSizeTypes = "Standard_NC24" - // VMSizeTypesStandardNC24r ... - VMSizeTypesStandardNC24r VMSizeTypes = "Standard_NC24r" - // VMSizeTypesStandardNC24rsV2 ... - VMSizeTypesStandardNC24rsV2 VMSizeTypes = "Standard_NC24rs_v2" - // VMSizeTypesStandardNC24rsV3 ... - VMSizeTypesStandardNC24rsV3 VMSizeTypes = "Standard_NC24rs_v3" - // VMSizeTypesStandardNC24sV2 ... - VMSizeTypesStandardNC24sV2 VMSizeTypes = "Standard_NC24s_v2" - // VMSizeTypesStandardNC24sV3 ... - VMSizeTypesStandardNC24sV3 VMSizeTypes = "Standard_NC24s_v3" - // VMSizeTypesStandardNC6 ... - VMSizeTypesStandardNC6 VMSizeTypes = "Standard_NC6" - // VMSizeTypesStandardNC6sV2 ... - VMSizeTypesStandardNC6sV2 VMSizeTypes = "Standard_NC6s_v2" - // VMSizeTypesStandardNC6sV3 ... - VMSizeTypesStandardNC6sV3 VMSizeTypes = "Standard_NC6s_v3" - // VMSizeTypesStandardND12s ... - VMSizeTypesStandardND12s VMSizeTypes = "Standard_ND12s" - // VMSizeTypesStandardND24rs ... - VMSizeTypesStandardND24rs VMSizeTypes = "Standard_ND24rs" - // VMSizeTypesStandardND24s ... - VMSizeTypesStandardND24s VMSizeTypes = "Standard_ND24s" - // VMSizeTypesStandardND6s ... - VMSizeTypesStandardND6s VMSizeTypes = "Standard_ND6s" - // VMSizeTypesStandardNV12 ... - VMSizeTypesStandardNV12 VMSizeTypes = "Standard_NV12" - // VMSizeTypesStandardNV24 ... - VMSizeTypesStandardNV24 VMSizeTypes = "Standard_NV24" - // VMSizeTypesStandardNV6 ... - VMSizeTypesStandardNV6 VMSizeTypes = "Standard_NV6" -) - -// PossibleVMSizeTypesValues returns an array of possible values for the VMSizeTypes const type. -func PossibleVMSizeTypesValues() []VMSizeTypes { - return []VMSizeTypes{VMSizeTypesStandardA1, VMSizeTypesStandardA10, VMSizeTypesStandardA11, VMSizeTypesStandardA1V2, VMSizeTypesStandardA2, VMSizeTypesStandardA2mV2, VMSizeTypesStandardA2V2, VMSizeTypesStandardA3, VMSizeTypesStandardA4, VMSizeTypesStandardA4mV2, VMSizeTypesStandardA4V2, VMSizeTypesStandardA5, VMSizeTypesStandardA6, VMSizeTypesStandardA7, VMSizeTypesStandardA8, VMSizeTypesStandardA8mV2, VMSizeTypesStandardA8V2, VMSizeTypesStandardA9, VMSizeTypesStandardB2ms, VMSizeTypesStandardB2s, VMSizeTypesStandardB4ms, VMSizeTypesStandardB8ms, VMSizeTypesStandardD1, VMSizeTypesStandardD11, VMSizeTypesStandardD11V2, VMSizeTypesStandardD11V2Promo, VMSizeTypesStandardD12, VMSizeTypesStandardD12V2, VMSizeTypesStandardD12V2Promo, VMSizeTypesStandardD13, VMSizeTypesStandardD13V2, VMSizeTypesStandardD13V2Promo, VMSizeTypesStandardD14, VMSizeTypesStandardD14V2, VMSizeTypesStandardD14V2Promo, VMSizeTypesStandardD15V2, VMSizeTypesStandardD16sV3, VMSizeTypesStandardD16V3, VMSizeTypesStandardD1V2, VMSizeTypesStandardD2, VMSizeTypesStandardD2sV3, VMSizeTypesStandardD2V2, VMSizeTypesStandardD2V2Promo, VMSizeTypesStandardD2V3, VMSizeTypesStandardD3, VMSizeTypesStandardD32sV3, VMSizeTypesStandardD32V3, VMSizeTypesStandardD3V2, VMSizeTypesStandardD3V2Promo, VMSizeTypesStandardD4, VMSizeTypesStandardD4sV3, VMSizeTypesStandardD4V2, VMSizeTypesStandardD4V2Promo, VMSizeTypesStandardD4V3, VMSizeTypesStandardD5V2, VMSizeTypesStandardD5V2Promo, VMSizeTypesStandardD64sV3, VMSizeTypesStandardD64V3, VMSizeTypesStandardD8sV3, VMSizeTypesStandardD8V3, VMSizeTypesStandardDS1, VMSizeTypesStandardDS11, VMSizeTypesStandardDS11V2, VMSizeTypesStandardDS11V2Promo, VMSizeTypesStandardDS12, VMSizeTypesStandardDS12V2, VMSizeTypesStandardDS12V2Promo, VMSizeTypesStandardDS13, VMSizeTypesStandardDS132V2, VMSizeTypesStandardDS134V2, VMSizeTypesStandardDS13V2, VMSizeTypesStandardDS13V2Promo, VMSizeTypesStandardDS14, VMSizeTypesStandardDS144V2, VMSizeTypesStandardDS148V2, VMSizeTypesStandardDS14V2, VMSizeTypesStandardDS14V2Promo, VMSizeTypesStandardDS15V2, VMSizeTypesStandardDS1V2, VMSizeTypesStandardDS2, VMSizeTypesStandardDS2V2, VMSizeTypesStandardDS2V2Promo, VMSizeTypesStandardDS3, VMSizeTypesStandardDS3V2, VMSizeTypesStandardDS3V2Promo, VMSizeTypesStandardDS4, VMSizeTypesStandardDS4V2, VMSizeTypesStandardDS4V2Promo, VMSizeTypesStandardDS5V2, VMSizeTypesStandardDS5V2Promo, VMSizeTypesStandardE16sV3, VMSizeTypesStandardE16V3, VMSizeTypesStandardE2sV3, VMSizeTypesStandardE2V3, VMSizeTypesStandardE3216sV3, VMSizeTypesStandardE328sV3, VMSizeTypesStandardE32sV3, VMSizeTypesStandardE32V3, VMSizeTypesStandardE4sV3, VMSizeTypesStandardE4V3, VMSizeTypesStandardE6416sV3, VMSizeTypesStandardE6432sV3, VMSizeTypesStandardE64sV3, VMSizeTypesStandardE64V3, VMSizeTypesStandardE8sV3, VMSizeTypesStandardE8V3, VMSizeTypesStandardF1, VMSizeTypesStandardF16, VMSizeTypesStandardF16s, VMSizeTypesStandardF16sV2, VMSizeTypesStandardF1s, VMSizeTypesStandardF2, VMSizeTypesStandardF2s, VMSizeTypesStandardF2sV2, VMSizeTypesStandardF32sV2, VMSizeTypesStandardF4, VMSizeTypesStandardF4s, VMSizeTypesStandardF4sV2, VMSizeTypesStandardF64sV2, VMSizeTypesStandardF72sV2, VMSizeTypesStandardF8, VMSizeTypesStandardF8s, VMSizeTypesStandardF8sV2, VMSizeTypesStandardG1, VMSizeTypesStandardG2, VMSizeTypesStandardG3, VMSizeTypesStandardG4, VMSizeTypesStandardG5, VMSizeTypesStandardGS1, VMSizeTypesStandardGS2, VMSizeTypesStandardGS3, VMSizeTypesStandardGS4, VMSizeTypesStandardGS44, VMSizeTypesStandardGS48, VMSizeTypesStandardGS5, VMSizeTypesStandardGS516, VMSizeTypesStandardGS58, VMSizeTypesStandardH16, VMSizeTypesStandardH16m, VMSizeTypesStandardH16mr, VMSizeTypesStandardH16r, VMSizeTypesStandardH8, VMSizeTypesStandardH8m, VMSizeTypesStandardL16s, VMSizeTypesStandardL32s, VMSizeTypesStandardL4s, VMSizeTypesStandardL8s, VMSizeTypesStandardM12832ms, VMSizeTypesStandardM12864ms, VMSizeTypesStandardM128ms, VMSizeTypesStandardM128s, VMSizeTypesStandardM6416ms, VMSizeTypesStandardM6432ms, VMSizeTypesStandardM64ms, VMSizeTypesStandardM64s, VMSizeTypesStandardNC12, VMSizeTypesStandardNC12sV2, VMSizeTypesStandardNC12sV3, VMSizeTypesStandardNC24, VMSizeTypesStandardNC24r, VMSizeTypesStandardNC24rsV2, VMSizeTypesStandardNC24rsV3, VMSizeTypesStandardNC24sV2, VMSizeTypesStandardNC24sV3, VMSizeTypesStandardNC6, VMSizeTypesStandardNC6sV2, VMSizeTypesStandardNC6sV3, VMSizeTypesStandardND12s, VMSizeTypesStandardND24rs, VMSizeTypesStandardND24s, VMSizeTypesStandardND6s, VMSizeTypesStandardNV12, VMSizeTypesStandardNV24, VMSizeTypesStandardNV6} -} - -// WeekDay enumerates the values for week day. -type WeekDay string - -const ( - // WeekDayFriday ... - WeekDayFriday WeekDay = "Friday" - // WeekDayMonday ... - WeekDayMonday WeekDay = "Monday" - // WeekDaySaturday ... - WeekDaySaturday WeekDay = "Saturday" - // WeekDaySunday ... - WeekDaySunday WeekDay = "Sunday" - // WeekDayThursday ... - WeekDayThursday WeekDay = "Thursday" - // WeekDayTuesday ... - WeekDayTuesday WeekDay = "Tuesday" - // WeekDayWednesday ... - WeekDayWednesday WeekDay = "Wednesday" -) - -// PossibleWeekDayValues returns an array of possible values for the WeekDay const type. -func PossibleWeekDayValues() []WeekDay { - return []WeekDay{WeekDayFriday, WeekDayMonday, WeekDaySaturday, WeekDaySunday, WeekDayThursday, WeekDayTuesday, WeekDayWednesday} -} - -// WorkloadRuntime enumerates the values for workload runtime. -type WorkloadRuntime string - -const ( - // WorkloadRuntimeOCIContainer Nodes will use Kubelet to run standard OCI container workloads. - WorkloadRuntimeOCIContainer WorkloadRuntime = "OCIContainer" - // WorkloadRuntimeWasmWasi Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview). - WorkloadRuntimeWasmWasi WorkloadRuntime = "WasmWasi" -) - -// PossibleWorkloadRuntimeValues returns an array of possible values for the WorkloadRuntime const type. -func PossibleWorkloadRuntimeValues() []WorkloadRuntime { - return []WorkloadRuntime{WorkloadRuntimeOCIContainer, WorkloadRuntimeWasmWasi} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/maintenanceconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/maintenanceconfigurations.go deleted file mode 100644 index 441e6d2d3bb5..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/maintenanceconfigurations.go +++ /dev/null @@ -1,440 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// MaintenanceConfigurationsClient is the the Container Service Client. -type MaintenanceConfigurationsClient struct { - BaseClient -} - -// NewMaintenanceConfigurationsClient creates an instance of the MaintenanceConfigurationsClient client. -func NewMaintenanceConfigurationsClient(subscriptionID string) MaintenanceConfigurationsClient { - return NewMaintenanceConfigurationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewMaintenanceConfigurationsClientWithBaseURI creates an instance of the MaintenanceConfigurationsClient client -// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign -// clouds, Azure stack). -func NewMaintenanceConfigurationsClientWithBaseURI(baseURI string, subscriptionID string) MaintenanceConfigurationsClient { - return MaintenanceConfigurationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate sends the create or update request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// configName - the name of the maintenance configuration. -// parameters - the maintenance configuration to create or update. -func (client MaintenanceConfigurationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, configName string, parameters MaintenanceConfiguration) (result MaintenanceConfiguration, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MaintenanceConfigurationsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.MaintenanceConfigurationsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, configName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client MaintenanceConfigurationsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, configName string, parameters MaintenanceConfiguration) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "configName": autorest.Encode("path", configName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - parameters.SystemData = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client MaintenanceConfigurationsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client MaintenanceConfigurationsClient) CreateOrUpdateResponder(resp *http.Response) (result MaintenanceConfiguration, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete sends the delete request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// configName - the name of the maintenance configuration. -func (client MaintenanceConfigurationsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, configName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MaintenanceConfigurationsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.MaintenanceConfigurationsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName, configName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client MaintenanceConfigurationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string, configName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "configName": autorest.Encode("path", configName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client MaintenanceConfigurationsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client MaintenanceConfigurationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get sends the get request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// configName - the name of the maintenance configuration. -func (client MaintenanceConfigurationsClient) Get(ctx context.Context, resourceGroupName string, resourceName string, configName string) (result MaintenanceConfiguration, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MaintenanceConfigurationsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.MaintenanceConfigurationsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, resourceName, configName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client MaintenanceConfigurationsClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string, configName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "configName": autorest.Encode("path", configName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client MaintenanceConfigurationsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client MaintenanceConfigurationsClient) GetResponder(resp *http.Response) (result MaintenanceConfiguration, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByManagedCluster sends the list by managed cluster request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client MaintenanceConfigurationsClient) ListByManagedCluster(ctx context.Context, resourceGroupName string, resourceName string) (result MaintenanceConfigurationListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MaintenanceConfigurationsClient.ListByManagedCluster") - defer func() { - sc := -1 - if result.mclr.Response.Response != nil { - sc = result.mclr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.MaintenanceConfigurationsClient", "ListByManagedCluster", err.Error()) - } - - result.fn = client.listByManagedClusterNextResults - req, err := client.ListByManagedClusterPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "ListByManagedCluster", nil, "Failure preparing request") - return - } - - resp, err := client.ListByManagedClusterSender(req) - if err != nil { - result.mclr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "ListByManagedCluster", resp, "Failure sending request") - return - } - - result.mclr, err = client.ListByManagedClusterResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "ListByManagedCluster", resp, "Failure responding to request") - return - } - if result.mclr.hasNextLink() && result.mclr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByManagedClusterPreparer prepares the ListByManagedCluster request. -func (client MaintenanceConfigurationsClient) ListByManagedClusterPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByManagedClusterSender sends the ListByManagedCluster request. The method will close the -// http.Response Body if it receives an error. -func (client MaintenanceConfigurationsClient) ListByManagedClusterSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByManagedClusterResponder handles the response to the ListByManagedCluster request. The method always -// closes the http.Response Body. -func (client MaintenanceConfigurationsClient) ListByManagedClusterResponder(resp *http.Response) (result MaintenanceConfigurationListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByManagedClusterNextResults retrieves the next set of results, if any. -func (client MaintenanceConfigurationsClient) listByManagedClusterNextResults(ctx context.Context, lastResults MaintenanceConfigurationListResult) (result MaintenanceConfigurationListResult, err error) { - req, err := lastResults.maintenanceConfigurationListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "listByManagedClusterNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByManagedClusterSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "listByManagedClusterNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByManagedClusterResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.MaintenanceConfigurationsClient", "listByManagedClusterNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByManagedClusterComplete enumerates all values, automatically crossing page boundaries as required. -func (client MaintenanceConfigurationsClient) ListByManagedClusterComplete(ctx context.Context, resourceGroupName string, resourceName string) (result MaintenanceConfigurationListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MaintenanceConfigurationsClient.ListByManagedCluster") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByManagedCluster(ctx, resourceGroupName, resourceName) - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/managedclusters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/managedclusters.go deleted file mode 100644 index 1c1c93d3eefb..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/managedclusters.go +++ /dev/null @@ -1,2139 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ManagedClustersClient is the the Container Service Client. -type ManagedClustersClient struct { - BaseClient -} - -// NewManagedClustersClient creates an instance of the ManagedClustersClient client. -func NewManagedClustersClient(subscriptionID string) ManagedClustersClient { - return NewManagedClustersClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewManagedClustersClientWithBaseURI creates an instance of the ManagedClustersClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewManagedClustersClientWithBaseURI(baseURI string, subscriptionID string) ManagedClustersClient { - return ManagedClustersClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate sends the create or update request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// parameters - the managed cluster to create or update. -func (client ManagedClustersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster) (result ManagedClustersCreateOrUpdateFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.ManagedClusterProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[A-Za-z][-A-Za-z0-9_]*$`, Chain: nil}}}, - {Target: "parameters.ManagedClusterProperties.LinuxProfile.SSH", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}}, - }}, - {Target: "parameters.ManagedClusterProperties.WindowsProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "parameters.ManagedClusterProperties.ServicePrincipalProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.PodCidr", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.PodCidr", Name: validation.Pattern, Rule: `^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`, Chain: nil}}}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.ServiceCidr", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.ServiceCidr", Name: validation.Pattern, Rule: `^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`, Chain: nil}}}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.DNSServiceIP", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.DNSServiceIP", Name: validation.Pattern, Rule: `^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$`, Chain: nil}}}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.DockerBridgeCidr", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.DockerBridgeCidr", Name: validation.Pattern, Rule: `^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$`, Chain: nil}}}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.Count", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.Count", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.Count", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.CountIPv6", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.CountIPv6", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.ManagedOutboundIPs.CountIPv6", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, - }}, - }}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.AllocatedOutboundPorts", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.AllocatedOutboundPorts", Name: validation.InclusiveMaximum, Rule: int64(64000), Chain: nil}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.AllocatedOutboundPorts", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, - }}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.IdleTimeoutInMinutes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.IdleTimeoutInMinutes", Name: validation.InclusiveMaximum, Rule: int64(120), Chain: nil}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile.IdleTimeoutInMinutes", Name: validation.InclusiveMinimum, Rule: int64(4), Chain: nil}, - }}, - }}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.NatGatewayProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.NatGatewayProfile.ManagedOutboundIPProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.NatGatewayProfile.ManagedOutboundIPProfile.Count", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.NatGatewayProfile.ManagedOutboundIPProfile.Count", Name: validation.InclusiveMaximum, Rule: int64(16), Chain: nil}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.NatGatewayProfile.ManagedOutboundIPProfile.Count", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}, - }}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.NatGatewayProfile.IdleTimeoutInMinutes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.NetworkProfile.NatGatewayProfile.IdleTimeoutInMinutes", Name: validation.InclusiveMaximum, Rule: int64(120), Chain: nil}, - {Target: "parameters.ManagedClusterProperties.NetworkProfile.NatGatewayProfile.IdleTimeoutInMinutes", Name: validation.InclusiveMinimum, Rule: int64(4), Chain: nil}, - }}, - }}, - }}, - }}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - result, err = client.CreateOrUpdateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "CreateOrUpdate", result.Response(), "Failure sending request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ManagedClustersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedCluster) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) CreateOrUpdateSender(req *http.Request) (future ManagedClustersCreateOrUpdateFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) CreateOrUpdateResponder(resp *http.Response) (result ManagedCluster, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete sends the delete request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// ignorePodDisruptionBudget - ignore-pod-disruption-budget=true to delete those pods on a node without -// considering Pod Disruption Budget -func (client ManagedClustersClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, ignorePodDisruptionBudget *bool) (result ManagedClustersDeleteFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.Delete") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName, ignorePodDisruptionBudget) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Delete", nil, "Failure preparing request") - return - } - - result, err = client.DeleteSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Delete", result.Response(), "Failure sending request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ManagedClustersClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string, ignorePodDisruptionBudget *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if ignorePodDisruptionBudget != nil { - queryParameters["ignore-pod-disruption-budget"] = autorest.Encode("query", *ignorePodDisruptionBudget) - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) DeleteSender(req *http.Request) (future ManagedClustersDeleteFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get sends the get request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client ManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedCluster, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ManagedClustersClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) GetResponder(resp *http.Response) (result ManagedCluster, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetAccessProfile **WARNING**: This API will be deprecated. Instead use -// [ListClusterUserCredentials](https://docs.microsoft.com/rest/api/aks/managedclusters/listclusterusercredentials) or -// [ListClusterAdminCredentials](https://docs.microsoft.com/rest/api/aks/managedclusters/listclusteradmincredentials) . -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// roleName - the name of the role for managed cluster accessProfile resource. -func (client ManagedClustersClient) GetAccessProfile(ctx context.Context, resourceGroupName string, resourceName string, roleName string) (result ManagedClusterAccessProfile, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.GetAccessProfile") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "GetAccessProfile", err.Error()) - } - - req, err := client.GetAccessProfilePreparer(ctx, resourceGroupName, resourceName, roleName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", nil, "Failure preparing request") - return - } - - resp, err := client.GetAccessProfileSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", resp, "Failure sending request") - return - } - - result, err = client.GetAccessProfileResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetAccessProfile", resp, "Failure responding to request") - return - } - - return -} - -// GetAccessProfilePreparer prepares the GetAccessProfile request. -func (client ManagedClustersClient) GetAccessProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, roleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "roleName": autorest.Encode("path", roleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetAccessProfileSender sends the GetAccessProfile request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) GetAccessProfileSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetAccessProfileResponder handles the response to the GetAccessProfile request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) GetAccessProfileResponder(resp *http.Response) (result ManagedClusterAccessProfile, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCommandResult sends the get command result request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// commandID - id of the command. -func (client ManagedClustersClient) GetCommandResult(ctx context.Context, resourceGroupName string, resourceName string, commandID string) (result RunCommandResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.GetCommandResult") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "GetCommandResult", err.Error()) - } - - req, err := client.GetCommandResultPreparer(ctx, resourceGroupName, resourceName, commandID) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetCommandResult", nil, "Failure preparing request") - return - } - - resp, err := client.GetCommandResultSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetCommandResult", resp, "Failure sending request") - return - } - - result, err = client.GetCommandResultResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetCommandResult", resp, "Failure responding to request") - return - } - - return -} - -// GetCommandResultPreparer prepares the GetCommandResult request. -func (client ManagedClustersClient) GetCommandResultPreparer(ctx context.Context, resourceGroupName string, resourceName string, commandID string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "commandId": autorest.Encode("path", commandID), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/commandResults/{commandId}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCommandResultSender sends the GetCommandResult request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) GetCommandResultSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetCommandResultResponder handles the response to the GetCommandResult request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) GetCommandResultResponder(resp *http.Response) (result RunCommandResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetOSOptions sends the get os options request. -// Parameters: -// location - the name of Azure region. -// resourceType - the resource type for which the OS options needs to be returned -func (client ManagedClustersClient) GetOSOptions(ctx context.Context, location string, resourceType string) (result OSOptionProfile, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.GetOSOptions") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: location, - Constraints: []validation.Constraint{{Target: "location", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "GetOSOptions", err.Error()) - } - - req, err := client.GetOSOptionsPreparer(ctx, location, resourceType) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetOSOptions", nil, "Failure preparing request") - return - } - - resp, err := client.GetOSOptionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetOSOptions", resp, "Failure sending request") - return - } - - result, err = client.GetOSOptionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetOSOptions", resp, "Failure responding to request") - return - } - - return -} - -// GetOSOptionsPreparer prepares the GetOSOptions request. -func (client ManagedClustersClient) GetOSOptionsPreparer(ctx context.Context, location string, resourceType string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "location": autorest.Encode("path", location), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(resourceType) > 0 { - queryParameters["resource-type"] = autorest.Encode("query", resourceType) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/osOptions/default", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetOSOptionsSender sends the GetOSOptions request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) GetOSOptionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetOSOptionsResponder handles the response to the GetOSOptions request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) GetOSOptionsResponder(resp *http.Response) (result OSOptionProfile, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetUpgradeProfile sends the get upgrade profile request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client ManagedClustersClient) GetUpgradeProfile(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClusterUpgradeProfile, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.GetUpgradeProfile") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "GetUpgradeProfile", err.Error()) - } - - req, err := client.GetUpgradeProfilePreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", nil, "Failure preparing request") - return - } - - resp, err := client.GetUpgradeProfileSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", resp, "Failure sending request") - return - } - - result, err = client.GetUpgradeProfileResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "GetUpgradeProfile", resp, "Failure responding to request") - return - } - - return -} - -// GetUpgradeProfilePreparer prepares the GetUpgradeProfile request. -func (client ManagedClustersClient) GetUpgradeProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetUpgradeProfileSender sends the GetUpgradeProfile request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) GetUpgradeProfileSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetUpgradeProfileResponder handles the response to the GetUpgradeProfile request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) GetUpgradeProfileResponder(resp *http.Response) (result ManagedClusterUpgradeProfile, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List sends the list request. -func (client ManagedClustersClient) List(ctx context.Context) (result ManagedClusterListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.List") - defer func() { - sc := -1 - if result.mclr.Response.Response != nil { - sc = result.mclr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.mclr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "List", resp, "Failure sending request") - return - } - - result.mclr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "List", resp, "Failure responding to request") - return - } - if result.mclr.hasNextLink() && result.mclr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client ManagedClustersClient) ListPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) ListResponder(resp *http.Response) (result ManagedClusterListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client ManagedClustersClient) listNextResults(ctx context.Context, lastResults ManagedClusterListResult) (result ManagedClusterListResult, err error) { - req, err := lastResults.managedClusterListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client ManagedClustersClient) ListComplete(ctx context.Context) (result ManagedClusterListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx) - return -} - -// ListByResourceGroup sends the list by resource group request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -func (client ManagedClustersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ManagedClusterListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.mclr.Response.Response != nil { - sc = result.mclr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "ListByResourceGroup", err.Error()) - } - - result.fn = client.listByResourceGroupNextResults - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.mclr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result.mclr, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - if result.mclr.hasNextLink() && result.mclr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client ManagedClustersClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) ListByResourceGroupResponder(resp *http.Response) (result ManagedClusterListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByResourceGroupNextResults retrieves the next set of results, if any. -func (client ManagedClustersClient) listByResourceGroupNextResults(ctx context.Context, lastResults ManagedClusterListResult) (result ManagedClusterListResult, err error) { - req, err := lastResults.managedClusterListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. -func (client ManagedClustersClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ManagedClusterListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) - return -} - -// ListClusterAdminCredentials sends the list cluster admin credentials request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// serverFqdn - server fqdn type for credentials to be returned -func (client ManagedClustersClient) ListClusterAdminCredentials(ctx context.Context, resourceGroupName string, resourceName string, serverFqdn string) (result CredentialResults, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListClusterAdminCredentials") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterAdminCredentials", err.Error()) - } - - req, err := client.ListClusterAdminCredentialsPreparer(ctx, resourceGroupName, resourceName, serverFqdn) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", nil, "Failure preparing request") - return - } - - resp, err := client.ListClusterAdminCredentialsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", resp, "Failure sending request") - return - } - - result, err = client.ListClusterAdminCredentialsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterAdminCredentials", resp, "Failure responding to request") - return - } - - return -} - -// ListClusterAdminCredentialsPreparer prepares the ListClusterAdminCredentials request. -func (client ManagedClustersClient) ListClusterAdminCredentialsPreparer(ctx context.Context, resourceGroupName string, resourceName string, serverFqdn string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(serverFqdn) > 0 { - queryParameters["server-fqdn"] = autorest.Encode("query", serverFqdn) - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListClusterAdminCredentialsSender sends the ListClusterAdminCredentials request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) ListClusterAdminCredentialsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListClusterAdminCredentialsResponder handles the response to the ListClusterAdminCredentials request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) ListClusterAdminCredentialsResponder(resp *http.Response) (result CredentialResults, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListClusterMonitoringUserCredentials sends the list cluster monitoring user credentials request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// serverFqdn - server fqdn type for credentials to be returned -func (client ManagedClustersClient) ListClusterMonitoringUserCredentials(ctx context.Context, resourceGroupName string, resourceName string, serverFqdn string) (result CredentialResults, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListClusterMonitoringUserCredentials") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterMonitoringUserCredentials", err.Error()) - } - - req, err := client.ListClusterMonitoringUserCredentialsPreparer(ctx, resourceGroupName, resourceName, serverFqdn) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterMonitoringUserCredentials", nil, "Failure preparing request") - return - } - - resp, err := client.ListClusterMonitoringUserCredentialsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterMonitoringUserCredentials", resp, "Failure sending request") - return - } - - result, err = client.ListClusterMonitoringUserCredentialsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterMonitoringUserCredentials", resp, "Failure responding to request") - return - } - - return -} - -// ListClusterMonitoringUserCredentialsPreparer prepares the ListClusterMonitoringUserCredentials request. -func (client ManagedClustersClient) ListClusterMonitoringUserCredentialsPreparer(ctx context.Context, resourceGroupName string, resourceName string, serverFqdn string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(serverFqdn) > 0 { - queryParameters["server-fqdn"] = autorest.Encode("query", serverFqdn) - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListClusterMonitoringUserCredentialsSender sends the ListClusterMonitoringUserCredentials request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) ListClusterMonitoringUserCredentialsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListClusterMonitoringUserCredentialsResponder handles the response to the ListClusterMonitoringUserCredentials request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) ListClusterMonitoringUserCredentialsResponder(resp *http.Response) (result CredentialResults, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListClusterUserCredentials sends the list cluster user credentials request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// serverFqdn - server fqdn type for credentials to be returned -// formatParameter - only apply to AAD clusters, specifies the format of returned kubeconfig. Format 'azure' -// will return azure auth-provider kubeconfig; format 'exec' will return exec format kubeconfig, which requires -// kubelogin binary in the path. -func (client ManagedClustersClient) ListClusterUserCredentials(ctx context.Context, resourceGroupName string, resourceName string, serverFqdn string, formatParameter Format) (result CredentialResults, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListClusterUserCredentials") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "ListClusterUserCredentials", err.Error()) - } - - req, err := client.ListClusterUserCredentialsPreparer(ctx, resourceGroupName, resourceName, serverFqdn, formatParameter) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", nil, "Failure preparing request") - return - } - - resp, err := client.ListClusterUserCredentialsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", resp, "Failure sending request") - return - } - - result, err = client.ListClusterUserCredentialsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListClusterUserCredentials", resp, "Failure responding to request") - return - } - - return -} - -// ListClusterUserCredentialsPreparer prepares the ListClusterUserCredentials request. -func (client ManagedClustersClient) ListClusterUserCredentialsPreparer(ctx context.Context, resourceGroupName string, resourceName string, serverFqdn string, formatParameter Format) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(serverFqdn) > 0 { - queryParameters["server-fqdn"] = autorest.Encode("query", serverFqdn) - } - if len(string(formatParameter)) > 0 { - queryParameters["format"] = autorest.Encode("query", formatParameter) - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListClusterUserCredentialsSender sends the ListClusterUserCredentials request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) ListClusterUserCredentialsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListClusterUserCredentialsResponder handles the response to the ListClusterUserCredentials request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) ListClusterUserCredentialsResponder(resp *http.Response) (result CredentialResults, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListOutboundNetworkDependenciesEndpoints gets a list of egress endpoints (network endpoints of all outbound -// dependencies) in the specified managed cluster. The operation returns properties of each egress endpoint. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client ManagedClustersClient) ListOutboundNetworkDependenciesEndpoints(ctx context.Context, resourceGroupName string, resourceName string) (result OutboundEnvironmentEndpointCollectionPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListOutboundNetworkDependenciesEndpoints") - defer func() { - sc := -1 - if result.oeec.Response.Response != nil { - sc = result.oeec.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", err.Error()) - } - - result.fn = client.listOutboundNetworkDependenciesEndpointsNextResults - req, err := client.ListOutboundNetworkDependenciesEndpointsPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", nil, "Failure preparing request") - return - } - - resp, err := client.ListOutboundNetworkDependenciesEndpointsSender(req) - if err != nil { - result.oeec.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", resp, "Failure sending request") - return - } - - result.oeec, err = client.ListOutboundNetworkDependenciesEndpointsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", resp, "Failure responding to request") - return - } - if result.oeec.hasNextLink() && result.oeec.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListOutboundNetworkDependenciesEndpointsPreparer prepares the ListOutboundNetworkDependenciesEndpoints request. -func (client ManagedClustersClient) ListOutboundNetworkDependenciesEndpointsPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/outboundNetworkDependenciesEndpoints", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListOutboundNetworkDependenciesEndpointsSender sends the ListOutboundNetworkDependenciesEndpoints request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) ListOutboundNetworkDependenciesEndpointsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListOutboundNetworkDependenciesEndpointsResponder handles the response to the ListOutboundNetworkDependenciesEndpoints request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) ListOutboundNetworkDependenciesEndpointsResponder(resp *http.Response) (result OutboundEnvironmentEndpointCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listOutboundNetworkDependenciesEndpointsNextResults retrieves the next set of results, if any. -func (client ManagedClustersClient) listOutboundNetworkDependenciesEndpointsNextResults(ctx context.Context, lastResults OutboundEnvironmentEndpointCollection) (result OutboundEnvironmentEndpointCollection, err error) { - req, err := lastResults.outboundEnvironmentEndpointCollectionPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listOutboundNetworkDependenciesEndpointsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListOutboundNetworkDependenciesEndpointsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listOutboundNetworkDependenciesEndpointsNextResults", resp, "Failure sending next results request") - } - result, err = client.ListOutboundNetworkDependenciesEndpointsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "listOutboundNetworkDependenciesEndpointsNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListOutboundNetworkDependenciesEndpointsComplete enumerates all values, automatically crossing page boundaries as required. -func (client ManagedClustersClient) ListOutboundNetworkDependenciesEndpointsComplete(ctx context.Context, resourceGroupName string, resourceName string) (result OutboundEnvironmentEndpointCollectionIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ListOutboundNetworkDependenciesEndpoints") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListOutboundNetworkDependenciesEndpoints(ctx, resourceGroupName, resourceName) - return -} - -// ResetAADProfile sends the reset aad profile request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// parameters - the AAD profile to set on the Managed Cluster -func (client ManagedClustersClient) ResetAADProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile) (result ManagedClustersResetAADProfileFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ResetAADProfile") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "ResetAADProfile", err.Error()) - } - - req, err := client.ResetAADProfilePreparer(ctx, resourceGroupName, resourceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetAADProfile", nil, "Failure preparing request") - return - } - - result, err = client.ResetAADProfileSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetAADProfile", result.Response(), "Failure sending request") - return - } - - return -} - -// ResetAADProfilePreparer prepares the ResetAADProfile request. -func (client ManagedClustersClient) ResetAADProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterAADProfile) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ResetAADProfileSender sends the ResetAADProfile request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) ResetAADProfileSender(req *http.Request) (future ManagedClustersResetAADProfileFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// ResetAADProfileResponder handles the response to the ResetAADProfile request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) ResetAADProfileResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// ResetServicePrincipalProfile this action cannot be performed on a cluster that is not using a service principal -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// parameters - the service principal profile to set on the managed cluster. -func (client ManagedClustersClient) ResetServicePrincipalProfile(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile) (result ManagedClustersResetServicePrincipalProfileFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.ResetServicePrincipalProfile") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.ClientID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", err.Error()) - } - - req, err := client.ResetServicePrincipalProfilePreparer(ctx, resourceGroupName, resourceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", nil, "Failure preparing request") - return - } - - result, err = client.ResetServicePrincipalProfileSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "ResetServicePrincipalProfile", result.Response(), "Failure sending request") - return - } - - return -} - -// ResetServicePrincipalProfilePreparer prepares the ResetServicePrincipalProfile request. -func (client ManagedClustersClient) ResetServicePrincipalProfilePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterServicePrincipalProfile) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ResetServicePrincipalProfileSender sends the ResetServicePrincipalProfile request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) ResetServicePrincipalProfileSender(req *http.Request) (future ManagedClustersResetServicePrincipalProfileFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// ResetServicePrincipalProfileResponder handles the response to the ResetServicePrincipalProfile request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) ResetServicePrincipalProfileResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// RotateClusterCertificates see [Certificate rotation](https://docs.microsoft.com/azure/aks/certificate-rotation) for -// more details about rotating managed cluster certificates. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client ManagedClustersClient) RotateClusterCertificates(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClustersRotateClusterCertificatesFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.RotateClusterCertificates") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "RotateClusterCertificates", err.Error()) - } - - req, err := client.RotateClusterCertificatesPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "RotateClusterCertificates", nil, "Failure preparing request") - return - } - - result, err = client.RotateClusterCertificatesSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "RotateClusterCertificates", result.Response(), "Failure sending request") - return - } - - return -} - -// RotateClusterCertificatesPreparer prepares the RotateClusterCertificates request. -func (client ManagedClustersClient) RotateClusterCertificatesPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RotateClusterCertificatesSender sends the RotateClusterCertificates request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) RotateClusterCertificatesSender(req *http.Request) (future ManagedClustersRotateClusterCertificatesFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// RotateClusterCertificatesResponder handles the response to the RotateClusterCertificates request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) RotateClusterCertificatesResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// RotateServiceAccountSigningKeys sends the rotate service account signing keys request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client ManagedClustersClient) RotateServiceAccountSigningKeys(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClustersRotateServiceAccountSigningKeysFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.RotateServiceAccountSigningKeys") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "RotateServiceAccountSigningKeys", err.Error()) - } - - req, err := client.RotateServiceAccountSigningKeysPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "RotateServiceAccountSigningKeys", nil, "Failure preparing request") - return - } - - result, err = client.RotateServiceAccountSigningKeysSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "RotateServiceAccountSigningKeys", result.Response(), "Failure sending request") - return - } - - return -} - -// RotateServiceAccountSigningKeysPreparer prepares the RotateServiceAccountSigningKeys request. -func (client ManagedClustersClient) RotateServiceAccountSigningKeysPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateServiceAccountSigningKeys", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RotateServiceAccountSigningKeysSender sends the RotateServiceAccountSigningKeys request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) RotateServiceAccountSigningKeysSender(req *http.Request) (future ManagedClustersRotateServiceAccountSigningKeysFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// RotateServiceAccountSigningKeysResponder handles the response to the RotateServiceAccountSigningKeys request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) RotateServiceAccountSigningKeysResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// RunCommand AKS will create a pod to run the command. This is primarily useful for private clusters. For more -// information see [AKS Run Command](https://docs.microsoft.com/azure/aks/private-clusters#aks-run-command-preview). -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// requestPayload - the run command request -func (client ManagedClustersClient) RunCommand(ctx context.Context, resourceGroupName string, resourceName string, requestPayload RunCommandRequest) (result ManagedClustersRunCommandFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.RunCommand") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, - {TargetValue: requestPayload, - Constraints: []validation.Constraint{{Target: "requestPayload.Command", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "RunCommand", err.Error()) - } - - req, err := client.RunCommandPreparer(ctx, resourceGroupName, resourceName, requestPayload) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "RunCommand", nil, "Failure preparing request") - return - } - - result, err = client.RunCommandSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "RunCommand", result.Response(), "Failure sending request") - return - } - - return -} - -// RunCommandPreparer prepares the RunCommand request. -func (client ManagedClustersClient) RunCommandPreparer(ctx context.Context, resourceGroupName string, resourceName string, requestPayload RunCommandRequest) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/runCommand", pathParameters), - autorest.WithJSON(requestPayload), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RunCommandSender sends the RunCommand request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) RunCommandSender(req *http.Request) (future ManagedClustersRunCommandFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// RunCommandResponder handles the response to the RunCommand request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) RunCommandResponder(resp *http.Response) (result RunCommandResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Start see [starting a cluster](https://docs.microsoft.com/azure/aks/start-stop-cluster) for more details about -// starting a cluster. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client ManagedClustersClient) Start(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClustersStartFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.Start") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "Start", err.Error()) - } - - req, err := client.StartPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Start", nil, "Failure preparing request") - return - } - - result, err = client.StartSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Start", result.Response(), "Failure sending request") - return - } - - return -} - -// StartPreparer prepares the Start request. -func (client ManagedClustersClient) StartPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// StartSender sends the Start request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) StartSender(req *http.Request) (future ManagedClustersStartFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// StartResponder handles the response to the Start request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Stop this can only be performed on Azure Virtual Machine Scale set backed clusters. Stopping a cluster stops the -// control plane and agent nodes entirely, while maintaining all object and cluster state. A cluster does not accrue -// charges while it is stopped. See [stopping a cluster](https://docs.microsoft.com/azure/aks/start-stop-cluster) for -// more details about stopping a cluster. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client ManagedClustersClient) Stop(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClustersStopFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.Stop") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "Stop", err.Error()) - } - - req, err := client.StopPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Stop", nil, "Failure preparing request") - return - } - - result, err = client.StopSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "Stop", result.Response(), "Failure sending request") - return - } - - return -} - -// StopPreparer prepares the Stop request. -func (client ManagedClustersClient) StopPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// StopSender sends the Stop request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) StopSender(req *http.Request) (future ManagedClustersStopFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// StopResponder handles the response to the Stop request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// UpdateTags sends the update tags request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// parameters - parameters supplied to the Update Managed Cluster Tags operation. -func (client ManagedClustersClient) UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (result ManagedClustersUpdateTagsFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClustersClient.UpdateTags") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClustersClient", "UpdateTags", err.Error()) - } - - req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", nil, "Failure preparing request") - return - } - - result, err = client.UpdateTagsSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersClient", "UpdateTags", result.Response(), "Failure sending request") - return - } - - return -} - -// UpdateTagsPreparer prepares the UpdateTags request. -func (client ManagedClustersClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateTagsSender sends the UpdateTags request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClustersClient) UpdateTagsSender(req *http.Request) (future ManagedClustersUpdateTagsFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// UpdateTagsResponder handles the response to the UpdateTags request. The method always -// closes the http.Response Body. -func (client ManagedClustersClient) UpdateTagsResponder(resp *http.Response) (result ManagedCluster, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/managedclustersnapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/managedclustersnapshots.go deleted file mode 100644 index 89f23ffed8c1..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/managedclustersnapshots.go +++ /dev/null @@ -1,638 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ManagedClusterSnapshotsClient is the the Container Service Client. -type ManagedClusterSnapshotsClient struct { - BaseClient -} - -// NewManagedClusterSnapshotsClient creates an instance of the ManagedClusterSnapshotsClient client. -func NewManagedClusterSnapshotsClient(subscriptionID string) ManagedClusterSnapshotsClient { - return NewManagedClusterSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewManagedClusterSnapshotsClientWithBaseURI creates an instance of the ManagedClusterSnapshotsClient client using a -// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, -// Azure stack). -func NewManagedClusterSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) ManagedClusterSnapshotsClient { - return ManagedClusterSnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate sends the create or update request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// parameters - the managed cluster snapshot to create or update. -func (client ManagedClusterSnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterSnapshot) (result ManagedClusterSnapshot, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClusterSnapshotsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ManagedClusterSnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters ManagedClusterSnapshot) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClusterSnapshotsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ManagedClusterSnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result ManagedClusterSnapshot, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete sends the delete request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client ManagedClusterSnapshotsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClusterSnapshotsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ManagedClusterSnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClusterSnapshotsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ManagedClusterSnapshotsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get sends the get request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client ManagedClusterSnapshotsClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result ManagedClusterSnapshot, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClusterSnapshotsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ManagedClusterSnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClusterSnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ManagedClusterSnapshotsClient) GetResponder(resp *http.Response) (result ManagedClusterSnapshot, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List sends the list request. -func (client ManagedClusterSnapshotsClient) List(ctx context.Context) (result ManagedClusterSnapshotListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotsClient.List") - defer func() { - sc := -1 - if result.mcslr.Response.Response != nil { - sc = result.mcslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClusterSnapshotsClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.mcslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "List", resp, "Failure sending request") - return - } - - result.mcslr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "List", resp, "Failure responding to request") - return - } - if result.mcslr.hasNextLink() && result.mcslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client ManagedClusterSnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedclustersnapshots", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClusterSnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ManagedClusterSnapshotsClient) ListResponder(resp *http.Response) (result ManagedClusterSnapshotListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client ManagedClusterSnapshotsClient) listNextResults(ctx context.Context, lastResults ManagedClusterSnapshotListResult) (result ManagedClusterSnapshotListResult, err error) { - req, err := lastResults.managedClusterSnapshotListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client ManagedClusterSnapshotsClient) ListComplete(ctx context.Context) (result ManagedClusterSnapshotListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx) - return -} - -// ListByResourceGroup sends the list by resource group request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -func (client ManagedClusterSnapshotsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ManagedClusterSnapshotListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.mcslr.Response.Response != nil { - sc = result.mcslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClusterSnapshotsClient", "ListByResourceGroup", err.Error()) - } - - result.fn = client.listByResourceGroupNextResults - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.mcslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result.mcslr, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - if result.mcslr.hasNextLink() && result.mcslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client ManagedClusterSnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClusterSnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client ManagedClusterSnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result ManagedClusterSnapshotListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByResourceGroupNextResults retrieves the next set of results, if any. -func (client ManagedClusterSnapshotsClient) listByResourceGroupNextResults(ctx context.Context, lastResults ManagedClusterSnapshotListResult) (result ManagedClusterSnapshotListResult, err error) { - req, err := lastResults.managedClusterSnapshotListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. -func (client ManagedClusterSnapshotsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ManagedClusterSnapshotListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) - return -} - -// UpdateTags sends the update tags request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// parameters - parameters supplied to the Update managed cluster snapshot Tags operation. -func (client ManagedClusterSnapshotsClient) UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (result ManagedClusterSnapshot, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotsClient.UpdateTags") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ManagedClusterSnapshotsClient", "UpdateTags", err.Error()) - } - - req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "UpdateTags", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateTagsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "UpdateTags", resp, "Failure sending request") - return - } - - result, err = client.UpdateTagsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClusterSnapshotsClient", "UpdateTags", resp, "Failure responding to request") - return - } - - return -} - -// UpdateTagsPreparer prepares the UpdateTags request. -func (client ManagedClusterSnapshotsClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateTagsSender sends the UpdateTags request. The method will close the -// http.Response Body if it receives an error. -func (client ManagedClusterSnapshotsClient) UpdateTagsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateTagsResponder handles the response to the UpdateTags request. The method always -// closes the http.Response Body. -func (client ManagedClusterSnapshotsClient) UpdateTagsResponder(resp *http.Response) (result ManagedClusterSnapshot, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/models.go deleted file mode 100644 index 0f058147120e..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/models.go +++ /dev/null @@ -1,4579 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice" - -// AccessProfile profile for enabling a user to access a managed cluster. -type AccessProfile struct { - // KubeConfig - Base64-encoded Kubernetes configuration file. - KubeConfig *[]byte `json:"kubeConfig,omitempty"` -} - -// AgentPool agent Pool. -type AgentPool struct { - autorest.Response `json:"-"` - // ManagedClusterAgentPoolProfileProperties - Properties of an agent pool. - *ManagedClusterAgentPoolProfileProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource that is unique within a resource group. This name can be used to access the resource. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for AgentPool. -func (ap AgentPool) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ap.ManagedClusterAgentPoolProfileProperties != nil { - objectMap["properties"] = ap.ManagedClusterAgentPoolProfileProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AgentPool struct. -func (ap *AgentPool) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var managedClusterAgentPoolProfileProperties ManagedClusterAgentPoolProfileProperties - err = json.Unmarshal(*v, &managedClusterAgentPoolProfileProperties) - if err != nil { - return err - } - ap.ManagedClusterAgentPoolProfileProperties = &managedClusterAgentPoolProfileProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - ap.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ap.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - ap.Type = &typeVar - } - } - } - - return nil -} - -// AgentPoolAvailableVersions the list of available versions for an agent pool. -type AgentPoolAvailableVersions struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The ID of the agent pool version list. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the agent pool version list. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Type of the agent pool version list. - Type *string `json:"type,omitempty"` - // AgentPoolAvailableVersionsProperties - Properties of agent pool available versions. - *AgentPoolAvailableVersionsProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AgentPoolAvailableVersions. -func (apav AgentPoolAvailableVersions) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if apav.AgentPoolAvailableVersionsProperties != nil { - objectMap["properties"] = apav.AgentPoolAvailableVersionsProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AgentPoolAvailableVersions struct. -func (apav *AgentPoolAvailableVersions) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - apav.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - apav.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - apav.Type = &typeVar - } - case "properties": - if v != nil { - var agentPoolAvailableVersionsProperties AgentPoolAvailableVersionsProperties - err = json.Unmarshal(*v, &agentPoolAvailableVersionsProperties) - if err != nil { - return err - } - apav.AgentPoolAvailableVersionsProperties = &agentPoolAvailableVersionsProperties - } - } - } - - return nil -} - -// AgentPoolAvailableVersionsProperties the list of available agent pool versions. -type AgentPoolAvailableVersionsProperties struct { - // AgentPoolVersions - List of versions available for agent pool. - AgentPoolVersions *[]AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem `json:"agentPoolVersions,omitempty"` -} - -// AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem ... -type AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem struct { - // Default - Whether this version is the default agent pool version. - Default *bool `json:"default,omitempty"` - // KubernetesVersion - The Kubernetes version (major.minor.patch). - KubernetesVersion *string `json:"kubernetesVersion,omitempty"` - // IsPreview - Whether Kubernetes version is currently in preview. - IsPreview *bool `json:"isPreview,omitempty"` -} - -// AgentPoolListResult the response from the List Agent Pools operation. -type AgentPoolListResult struct { - autorest.Response `json:"-"` - // Value - The list of agent pools. - Value *[]AgentPool `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of agent pool results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for AgentPoolListResult. -func (aplr AgentPoolListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if aplr.Value != nil { - objectMap["value"] = aplr.Value - } - return json.Marshal(objectMap) -} - -// AgentPoolListResultIterator provides access to a complete listing of AgentPool values. -type AgentPoolListResultIterator struct { - i int - page AgentPoolListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *AgentPoolListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *AgentPoolListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter AgentPoolListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter AgentPoolListResultIterator) Response() AgentPoolListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter AgentPoolListResultIterator) Value() AgentPool { - if !iter.page.NotDone() { - return AgentPool{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the AgentPoolListResultIterator type. -func NewAgentPoolListResultIterator(page AgentPoolListResultPage) AgentPoolListResultIterator { - return AgentPoolListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (aplr AgentPoolListResult) IsEmpty() bool { - return aplr.Value == nil || len(*aplr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (aplr AgentPoolListResult) hasNextLink() bool { - return aplr.NextLink != nil && len(*aplr.NextLink) != 0 -} - -// agentPoolListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (aplr AgentPoolListResult) agentPoolListResultPreparer(ctx context.Context) (*http.Request, error) { - if !aplr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(aplr.NextLink))) -} - -// AgentPoolListResultPage contains a page of AgentPool values. -type AgentPoolListResultPage struct { - fn func(context.Context, AgentPoolListResult) (AgentPoolListResult, error) - aplr AgentPoolListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *AgentPoolListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AgentPoolListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.aplr) - if err != nil { - return err - } - page.aplr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *AgentPoolListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page AgentPoolListResultPage) NotDone() bool { - return !page.aplr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page AgentPoolListResultPage) Response() AgentPoolListResult { - return page.aplr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page AgentPoolListResultPage) Values() []AgentPool { - if page.aplr.IsEmpty() { - return nil - } - return *page.aplr.Value -} - -// Creates a new instance of the AgentPoolListResultPage type. -func NewAgentPoolListResultPage(cur AgentPoolListResult, getNextPage func(context.Context, AgentPoolListResult) (AgentPoolListResult, error)) AgentPoolListResultPage { - return AgentPoolListResultPage{ - fn: getNextPage, - aplr: cur, - } -} - -// AgentPoolsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type AgentPoolsCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AgentPoolsClient) (AgentPool, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AgentPoolsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AgentPoolsCreateOrUpdateFuture.Result. -func (future *AgentPoolsCreateOrUpdateFuture) result(client AgentPoolsClient) (ap AgentPool, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ap.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.AgentPoolsCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if ap.Response.Response, err = future.GetResult(sender); err == nil && ap.Response.Response.StatusCode != http.StatusNoContent { - ap, err = client.CreateOrUpdateResponder(ap.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsCreateOrUpdateFuture", "Result", ap.Response.Response, "Failure responding to request") - } - } - return -} - -// AgentPoolsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type AgentPoolsDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AgentPoolsClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AgentPoolsDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AgentPoolsDeleteFuture.Result. -func (future *AgentPoolsDeleteFuture) result(client AgentPoolsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.AgentPoolsDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// AgentPoolsUpgradeNodeImageVersionFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type AgentPoolsUpgradeNodeImageVersionFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(AgentPoolsClient) (AgentPool, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *AgentPoolsUpgradeNodeImageVersionFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for AgentPoolsUpgradeNodeImageVersionFuture.Result. -func (future *AgentPoolsUpgradeNodeImageVersionFuture) result(client AgentPoolsClient) (ap AgentPool, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsUpgradeNodeImageVersionFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ap.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.AgentPoolsUpgradeNodeImageVersionFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if ap.Response.Response, err = future.GetResult(sender); err == nil && ap.Response.Response.StatusCode != http.StatusNoContent { - ap, err = client.UpgradeNodeImageVersionResponder(ap.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.AgentPoolsUpgradeNodeImageVersionFuture", "Result", ap.Response.Response, "Failure responding to request") - } - } - return -} - -// AgentPoolUpgradeProfile the list of available upgrades for an agent pool. -type AgentPoolUpgradeProfile struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The ID of the agent pool upgrade profile. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the agent pool upgrade profile. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the agent pool upgrade profile. - Type *string `json:"type,omitempty"` - // AgentPoolUpgradeProfileProperties - The properties of the agent pool upgrade profile. - *AgentPoolUpgradeProfileProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AgentPoolUpgradeProfile. -func (apup AgentPoolUpgradeProfile) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if apup.AgentPoolUpgradeProfileProperties != nil { - objectMap["properties"] = apup.AgentPoolUpgradeProfileProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AgentPoolUpgradeProfile struct. -func (apup *AgentPoolUpgradeProfile) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - apup.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - apup.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - apup.Type = &typeVar - } - case "properties": - if v != nil { - var agentPoolUpgradeProfileProperties AgentPoolUpgradeProfileProperties - err = json.Unmarshal(*v, &agentPoolUpgradeProfileProperties) - if err != nil { - return err - } - apup.AgentPoolUpgradeProfileProperties = &agentPoolUpgradeProfileProperties - } - } - } - - return nil -} - -// AgentPoolUpgradeProfileProperties the list of available upgrade versions. -type AgentPoolUpgradeProfileProperties struct { - // KubernetesVersion - The Kubernetes version (major.minor.patch). - KubernetesVersion *string `json:"kubernetesVersion,omitempty"` - // OsType - Possible values include: 'OSTypeLinux', 'OSTypeWindows' - OsType OSType `json:"osType,omitempty"` - // Upgrades - List of orchestrator types and versions available for upgrade. - Upgrades *[]AgentPoolUpgradeProfilePropertiesUpgradesItem `json:"upgrades,omitempty"` - // LatestNodeImageVersion - The latest AKS supported node image version. - LatestNodeImageVersion *string `json:"latestNodeImageVersion,omitempty"` -} - -// AgentPoolUpgradeProfilePropertiesUpgradesItem ... -type AgentPoolUpgradeProfilePropertiesUpgradesItem struct { - // KubernetesVersion - The Kubernetes version (major.minor.patch). - KubernetesVersion *string `json:"kubernetesVersion,omitempty"` - // IsPreview - Whether the Kubernetes version is currently in preview. - IsPreview *bool `json:"isPreview,omitempty"` -} - -// AgentPoolUpgradeSettings settings for upgrading an agentpool -type AgentPoolUpgradeSettings struct { - // MaxSurge - This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified, the default is 1. For more information, including best practices, see: https://docs.microsoft.com/azure/aks/upgrade-cluster#customize-node-surge-upgrade - MaxSurge *string `json:"maxSurge,omitempty"` -} - -// AzureEntityResource the resource model definition for an Azure Resource Manager resource with an etag. -type AzureEntityResource struct { - // Etag - READ-ONLY; Resource Etag. - Etag *string `json:"etag,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` - // SystemData - READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData `json:"systemData,omitempty"` -} - -// MarshalJSON is the custom marshaler for AzureEntityResource. -func (aer AzureEntityResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// AzureKeyVaultKms azure Key Vault key management service settings for the security profile. -type AzureKeyVaultKms struct { - // Enabled - Whether to enable Azure Key Vault key management service. The default is false. - Enabled *bool `json:"enabled,omitempty"` - // KeyID - Identifier of Azure Key Vault key. See [key identifier format](https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#vault-name-and-object-name) for more details. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier. When Azure Key Vault key management service is disabled, leave the field empty. - KeyID *string `json:"keyId,omitempty"` -} - -// CloudError an error response from the Container service. -type CloudError struct { - // Error - Details about the error. - Error *CloudErrorBody `json:"error,omitempty"` -} - -// CloudErrorBody an error response from the Container service. -type CloudErrorBody struct { - // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically. - Code *string `json:"code,omitempty"` - // Message - A message describing the error, intended to be suitable for display in a user interface. - Message *string `json:"message,omitempty"` - // Target - The target of the particular error. For example, the name of the property in error. - Target *string `json:"target,omitempty"` - // Details - A list of additional details about the error. - Details *[]CloudErrorBody `json:"details,omitempty"` -} - -// CommandResultProperties the results of a run command -type CommandResultProperties struct { - // ProvisioningState - READ-ONLY; provisioning State - ProvisioningState *string `json:"provisioningState,omitempty"` - // ExitCode - READ-ONLY; The exit code of the command - ExitCode *int32 `json:"exitCode,omitempty"` - // StartedAt - READ-ONLY; The time when the command started. - StartedAt *date.Time `json:"startedAt,omitempty"` - // FinishedAt - READ-ONLY; The time when the command finished. - FinishedAt *date.Time `json:"finishedAt,omitempty"` - // Logs - READ-ONLY; The command output. - Logs *string `json:"logs,omitempty"` - // Reason - READ-ONLY; An explanation of why provisioningState is set to failed (if so). - Reason *string `json:"reason,omitempty"` -} - -// MarshalJSON is the custom marshaler for CommandResultProperties. -func (crp CommandResultProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CreationData data used when creating a target resource from a source resource. -type CreationData struct { - // SourceResourceID - This is the ARM ID of the source object to be used to create the target object. - SourceResourceID *string `json:"sourceResourceId,omitempty"` -} - -// CredentialResult the credential result response. -type CredentialResult struct { - // Name - READ-ONLY; The name of the credential. - Name *string `json:"name,omitempty"` - // Value - READ-ONLY; Base64-encoded Kubernetes configuration file. - Value *[]byte `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for CredentialResult. -func (cr CredentialResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CredentialResults the list credential result response. -type CredentialResults struct { - autorest.Response `json:"-"` - // Kubeconfigs - READ-ONLY; Base64-encoded Kubernetes configuration file. - Kubeconfigs *[]CredentialResult `json:"kubeconfigs,omitempty"` -} - -// MarshalJSON is the custom marshaler for CredentialResults. -func (cr CredentialResults) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DiagnosticsProfile profile for diagnostics on the container service cluster. -type DiagnosticsProfile struct { - // VMDiagnostics - Profile for diagnostics on the container service VMs. - VMDiagnostics *VMDiagnostics `json:"vmDiagnostics,omitempty"` -} - -// EndpointDependency a domain name that AKS agent nodes are reaching at. -type EndpointDependency struct { - // DomainName - The domain name of the dependency. - DomainName *string `json:"domainName,omitempty"` - // EndpointDetails - The Ports and Protocols used when connecting to domainName. - EndpointDetails *[]EndpointDetail `json:"endpointDetails,omitempty"` -} - -// EndpointDetail connect information from the AKS agent nodes to a single endpoint. -type EndpointDetail struct { - // IPAddress - An IP Address that Domain Name currently resolves to. - IPAddress *string `json:"ipAddress,omitempty"` - // Port - The port an endpoint is connected to. - Port *int32 `json:"port,omitempty"` - // Protocol - The protocol used for connection - Protocol *string `json:"protocol,omitempty"` - // Description - Description of the detail - Description *string `json:"description,omitempty"` -} - -// ExtendedLocation the complex type of the extended location. -type ExtendedLocation struct { - // Name - The name of the extended location. - Name *string `json:"name,omitempty"` - // Type - The type of the extended location. Possible values include: 'ExtendedLocationTypesEdgeZone' - Type ExtendedLocationTypes `json:"type,omitempty"` -} - -// KubeletConfig see [AKS custom node -// configuration](https://docs.microsoft.com/azure/aks/custom-node-configuration) for more details. -type KubeletConfig struct { - // CPUManagerPolicy - The default is 'none'. See [Kubernetes CPU management policies](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#cpu-management-policies) for more information. Allowed values are 'none' and 'static'. - CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty"` - // CPUCfsQuota - The default is true. - CPUCfsQuota *bool `json:"cpuCfsQuota,omitempty"` - // CPUCfsQuotaPeriod - The default is '100ms.' Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix. For example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'. - CPUCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty"` - // ImageGcHighThreshold - To disable image garbage collection, set to 100. The default is 85% - ImageGcHighThreshold *int32 `json:"imageGcHighThreshold,omitempty"` - // ImageGcLowThreshold - This cannot be set higher than imageGcHighThreshold. The default is 80% - ImageGcLowThreshold *int32 `json:"imageGcLowThreshold,omitempty"` - // TopologyManagerPolicy - For more information see [Kubernetes Topology Manager](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager). The default is 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'. - TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty"` - // AllowedUnsafeSysctls - Allowed list of unsafe sysctls or unsafe sysctl patterns (ending in `*`). - AllowedUnsafeSysctls *[]string `json:"allowedUnsafeSysctls,omitempty"` - // FailSwapOn - If set to true it will make the Kubelet fail to start if swap is enabled on the node. - FailSwapOn *bool `json:"failSwapOn,omitempty"` - // ContainerLogMaxSizeMB - The maximum size (e.g. 10Mi) of container log file before it is rotated. - ContainerLogMaxSizeMB *int32 `json:"containerLogMaxSizeMB,omitempty"` - // ContainerLogMaxFiles - The maximum number of container log files that can be present for a container. The number must be ≥ 2. - ContainerLogMaxFiles *int32 `json:"containerLogMaxFiles,omitempty"` - // PodMaxPids - The maximum number of processes per pod. - PodMaxPids *int32 `json:"podMaxPids,omitempty"` -} - -// LinuxOSConfig see [AKS custom node -// configuration](https://docs.microsoft.com/azure/aks/custom-node-configuration) for more details. -type LinuxOSConfig struct { - // Sysctls - Sysctl settings for Linux agent nodes. - Sysctls *SysctlConfig `json:"sysctls,omitempty"` - // TransparentHugePageEnabled - Valid values are 'always', 'madvise', and 'never'. The default is 'always'. For more information see [Transparent Hugepages](https://www.kernel.org/doc/html/latest/admin-guide/mm/transhuge.html#admin-guide-transhuge). - TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty"` - // TransparentHugePageDefrag - Valid values are 'always', 'defer', 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see [Transparent Hugepages](https://www.kernel.org/doc/html/latest/admin-guide/mm/transhuge.html#admin-guide-transhuge). - TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty"` - // SwapFileSizeMB - The size in MB of a swap file that will be created on each node. - SwapFileSizeMB *int32 `json:"swapFileSizeMB,omitempty"` -} - -// LinuxProfile profile for Linux VMs in the container service cluster. -type LinuxProfile struct { - // AdminUsername - The administrator username to use for Linux VMs. - AdminUsername *string `json:"adminUsername,omitempty"` - // SSH - The SSH configuration for Linux-based VMs running on Azure. - SSH *SSHConfiguration `json:"ssh,omitempty"` -} - -// MaintenanceConfiguration see [planned -// maintenance](https://docs.microsoft.com/azure/aks/planned-maintenance) for more information about -// planned maintenance. -type MaintenanceConfiguration struct { - autorest.Response `json:"-"` - // SystemData - READ-ONLY; The system metadata relating to this resource. - SystemData *SystemData `json:"systemData,omitempty"` - // MaintenanceConfigurationProperties - Properties of a default maintenance configuration. - *MaintenanceConfigurationProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource that is unique within a resource group. This name can be used to access the resource. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for MaintenanceConfiguration. -func (mc MaintenanceConfiguration) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mc.MaintenanceConfigurationProperties != nil { - objectMap["properties"] = mc.MaintenanceConfigurationProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for MaintenanceConfiguration struct. -func (mc *MaintenanceConfiguration) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "systemData": - if v != nil { - var systemData SystemData - err = json.Unmarshal(*v, &systemData) - if err != nil { - return err - } - mc.SystemData = &systemData - } - case "properties": - if v != nil { - var maintenanceConfigurationProperties MaintenanceConfigurationProperties - err = json.Unmarshal(*v, &maintenanceConfigurationProperties) - if err != nil { - return err - } - mc.MaintenanceConfigurationProperties = &maintenanceConfigurationProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - mc.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mc.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - mc.Type = &typeVar - } - } - } - - return nil -} - -// MaintenanceConfigurationListResult the response from the List maintenance configurations operation. -type MaintenanceConfigurationListResult struct { - autorest.Response `json:"-"` - // Value - The list of maintenance configurations. - Value *[]MaintenanceConfiguration `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of maintenance configuration results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for MaintenanceConfigurationListResult. -func (mclr MaintenanceConfigurationListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mclr.Value != nil { - objectMap["value"] = mclr.Value - } - return json.Marshal(objectMap) -} - -// MaintenanceConfigurationListResultIterator provides access to a complete listing of -// MaintenanceConfiguration values. -type MaintenanceConfigurationListResultIterator struct { - i int - page MaintenanceConfigurationListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *MaintenanceConfigurationListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MaintenanceConfigurationListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *MaintenanceConfigurationListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter MaintenanceConfigurationListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter MaintenanceConfigurationListResultIterator) Response() MaintenanceConfigurationListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter MaintenanceConfigurationListResultIterator) Value() MaintenanceConfiguration { - if !iter.page.NotDone() { - return MaintenanceConfiguration{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the MaintenanceConfigurationListResultIterator type. -func NewMaintenanceConfigurationListResultIterator(page MaintenanceConfigurationListResultPage) MaintenanceConfigurationListResultIterator { - return MaintenanceConfigurationListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (mclr MaintenanceConfigurationListResult) IsEmpty() bool { - return mclr.Value == nil || len(*mclr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (mclr MaintenanceConfigurationListResult) hasNextLink() bool { - return mclr.NextLink != nil && len(*mclr.NextLink) != 0 -} - -// maintenanceConfigurationListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (mclr MaintenanceConfigurationListResult) maintenanceConfigurationListResultPreparer(ctx context.Context) (*http.Request, error) { - if !mclr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(mclr.NextLink))) -} - -// MaintenanceConfigurationListResultPage contains a page of MaintenanceConfiguration values. -type MaintenanceConfigurationListResultPage struct { - fn func(context.Context, MaintenanceConfigurationListResult) (MaintenanceConfigurationListResult, error) - mclr MaintenanceConfigurationListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *MaintenanceConfigurationListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MaintenanceConfigurationListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.mclr) - if err != nil { - return err - } - page.mclr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *MaintenanceConfigurationListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page MaintenanceConfigurationListResultPage) NotDone() bool { - return !page.mclr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page MaintenanceConfigurationListResultPage) Response() MaintenanceConfigurationListResult { - return page.mclr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page MaintenanceConfigurationListResultPage) Values() []MaintenanceConfiguration { - if page.mclr.IsEmpty() { - return nil - } - return *page.mclr.Value -} - -// Creates a new instance of the MaintenanceConfigurationListResultPage type. -func NewMaintenanceConfigurationListResultPage(cur MaintenanceConfigurationListResult, getNextPage func(context.Context, MaintenanceConfigurationListResult) (MaintenanceConfigurationListResult, error)) MaintenanceConfigurationListResultPage { - return MaintenanceConfigurationListResultPage{ - fn: getNextPage, - mclr: cur, - } -} - -// MaintenanceConfigurationProperties properties used to configure planned maintenance for a Managed -// Cluster. -type MaintenanceConfigurationProperties struct { - // TimeInWeek - If two array entries specify the same day of the week, the applied configuration is the union of times in both entries. - TimeInWeek *[]TimeInWeek `json:"timeInWeek,omitempty"` - // NotAllowedTime - Time slots on which upgrade is not allowed. - NotAllowedTime *[]TimeSpan `json:"notAllowedTime,omitempty"` -} - -// ManagedCluster managed cluster. -type ManagedCluster struct { - autorest.Response `json:"-"` - // Sku - The managed cluster SKU. - Sku *ManagedClusterSKU `json:"sku,omitempty"` - // ExtendedLocation - The extended location of the Virtual Machine. - ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` - // Identity - The identity of the managed cluster, if configured. - Identity *ManagedClusterIdentity `json:"identity,omitempty"` - // ManagedClusterProperties - Properties of a managed cluster. - *ManagedClusterProperties `json:"properties,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The geo-location where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` - // SystemData - READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData `json:"systemData,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedCluster. -func (mc ManagedCluster) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mc.Sku != nil { - objectMap["sku"] = mc.Sku - } - if mc.ExtendedLocation != nil { - objectMap["extendedLocation"] = mc.ExtendedLocation - } - if mc.Identity != nil { - objectMap["identity"] = mc.Identity - } - if mc.ManagedClusterProperties != nil { - objectMap["properties"] = mc.ManagedClusterProperties - } - if mc.Tags != nil { - objectMap["tags"] = mc.Tags - } - if mc.Location != nil { - objectMap["location"] = mc.Location - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ManagedCluster struct. -func (mc *ManagedCluster) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku ManagedClusterSKU - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - mc.Sku = &sku - } - case "extendedLocation": - if v != nil { - var extendedLocation ExtendedLocation - err = json.Unmarshal(*v, &extendedLocation) - if err != nil { - return err - } - mc.ExtendedLocation = &extendedLocation - } - case "identity": - if v != nil { - var identity ManagedClusterIdentity - err = json.Unmarshal(*v, &identity) - if err != nil { - return err - } - mc.Identity = &identity - } - case "properties": - if v != nil { - var managedClusterProperties ManagedClusterProperties - err = json.Unmarshal(*v, &managedClusterProperties) - if err != nil { - return err - } - mc.ManagedClusterProperties = &managedClusterProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - mc.Tags = tags - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - mc.Location = &location - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - mc.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mc.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - mc.Type = &typeVar - } - case "systemData": - if v != nil { - var systemData SystemData - err = json.Unmarshal(*v, &systemData) - if err != nil { - return err - } - mc.SystemData = &systemData - } - } - } - - return nil -} - -// ManagedClusterAADProfile for more details see [managed AAD on -// AKS](https://docs.microsoft.com/azure/aks/managed-aad). -type ManagedClusterAADProfile struct { - // Managed - Whether to enable managed AAD. - Managed *bool `json:"managed,omitempty"` - // EnableAzureRBAC - Whether to enable Azure RBAC for Kubernetes authorization. - EnableAzureRBAC *bool `json:"enableAzureRBAC,omitempty"` - // AdminGroupObjectIDs - The list of AAD group object IDs that will have admin role of the cluster. - AdminGroupObjectIDs *[]string `json:"adminGroupObjectIDs,omitempty"` - // ClientAppID - The client AAD application ID. - ClientAppID *string `json:"clientAppID,omitempty"` - // ServerAppID - The server AAD application ID. - ServerAppID *string `json:"serverAppID,omitempty"` - // ServerAppSecret - The server AAD application secret. - ServerAppSecret *string `json:"serverAppSecret,omitempty"` - // TenantID - The AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription. - TenantID *string `json:"tenantID,omitempty"` -} - -// ManagedClusterAccessProfile managed cluster Access Profile. -type ManagedClusterAccessProfile struct { - autorest.Response `json:"-"` - // AccessProfile - AccessProfile of a managed cluster. - *AccessProfile `json:"properties,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The geo-location where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` - // SystemData - READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData `json:"systemData,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterAccessProfile. -func (mcap ManagedClusterAccessProfile) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcap.AccessProfile != nil { - objectMap["properties"] = mcap.AccessProfile - } - if mcap.Tags != nil { - objectMap["tags"] = mcap.Tags - } - if mcap.Location != nil { - objectMap["location"] = mcap.Location - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ManagedClusterAccessProfile struct. -func (mcap *ManagedClusterAccessProfile) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var accessProfile AccessProfile - err = json.Unmarshal(*v, &accessProfile) - if err != nil { - return err - } - mcap.AccessProfile = &accessProfile - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - mcap.Tags = tags - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - mcap.Location = &location - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - mcap.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mcap.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - mcap.Type = &typeVar - } - case "systemData": - if v != nil { - var systemData SystemData - err = json.Unmarshal(*v, &systemData) - if err != nil { - return err - } - mcap.SystemData = &systemData - } - } - } - - return nil -} - -// ManagedClusterAddonProfile a Kubernetes add-on profile for a managed cluster. -type ManagedClusterAddonProfile struct { - // Enabled - Whether the add-on is enabled or not. - Enabled *bool `json:"enabled,omitempty"` - // Config - Key-value pairs for configuring an add-on. - Config map[string]*string `json:"config"` - // Identity - READ-ONLY; Information of user assigned identity used by this add-on. - Identity *ManagedClusterAddonProfileIdentity `json:"identity,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterAddonProfile. -func (mcap ManagedClusterAddonProfile) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcap.Enabled != nil { - objectMap["enabled"] = mcap.Enabled - } - if mcap.Config != nil { - objectMap["config"] = mcap.Config - } - return json.Marshal(objectMap) -} - -// ManagedClusterAddonProfileIdentity information of user assigned identity used by this add-on. -type ManagedClusterAddonProfileIdentity struct { - // ResourceID - The resource ID of the user assigned identity. - ResourceID *string `json:"resourceId,omitempty"` - // ClientID - The client ID of the user assigned identity. - ClientID *string `json:"clientId,omitempty"` - // ObjectID - The object ID of the user assigned identity. - ObjectID *string `json:"objectId,omitempty"` -} - -// ManagedClusterAgentPoolProfile profile for the container service agent pool. -type ManagedClusterAgentPoolProfile struct { - // Name - Windows agent pool names must be 6 characters or less. - Name *string `json:"name,omitempty"` - // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1. - Count *int32 `json:"count,omitempty"` - // VMSize - VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions - VMSize *string `json:"vmSize,omitempty"` - OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` - // OsDiskType - Possible values include: 'OSDiskTypeManaged', 'OSDiskTypeEphemeral' - OsDiskType OSDiskType `json:"osDiskType,omitempty"` - // KubeletDiskType - Possible values include: 'KubeletDiskTypeOS', 'KubeletDiskTypeTemporary' - KubeletDiskType KubeletDiskType `json:"kubeletDiskType,omitempty"` - // WorkloadRuntime - Possible values include: 'WorkloadRuntimeOCIContainer', 'WorkloadRuntimeWasmWasi' - WorkloadRuntime WorkloadRuntime `json:"workloadRuntime,omitempty"` - // MessageOfTheDay - A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It must not be specified for Windows nodes. It must be a static string (i.e., will be printed raw and not be executed as a script). - MessageOfTheDay *string `json:"messageOfTheDay,omitempty"` - // VnetSubnetID - If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName} - VnetSubnetID *string `json:"vnetSubnetID,omitempty"` - // PodSubnetID - If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName} - PodSubnetID *string `json:"podSubnetID,omitempty"` - // MaxPods - The maximum number of pods that can run on a node. - MaxPods *int32 `json:"maxPods,omitempty"` - // OsType - Possible values include: 'OSTypeLinux', 'OSTypeWindows' - OsType OSType `json:"osType,omitempty"` - // OsSKU - Possible values include: 'OSSKUUbuntu', 'OSSKUCBLMariner' - OsSKU OSSKU `json:"osSKU,omitempty"` - // MaxCount - The maximum number of nodes for auto-scaling - MaxCount *int32 `json:"maxCount,omitempty"` - // MinCount - The minimum number of nodes for auto-scaling - MinCount *int32 `json:"minCount,omitempty"` - // EnableAutoScaling - Whether to enable auto-scaler - EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` - // ScaleDownMode - This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Possible values include: 'ScaleDownModeDelete', 'ScaleDownModeDeallocate' - ScaleDownMode ScaleDownMode `json:"scaleDownMode,omitempty"` - // Type - Possible values include: 'AgentPoolTypeVirtualMachineScaleSets', 'AgentPoolTypeAvailabilitySet' - Type AgentPoolType `json:"type,omitempty"` - // Mode - Possible values include: 'AgentPoolModeSystem', 'AgentPoolModeUser' - Mode AgentPoolMode `json:"mode,omitempty"` - // OrchestratorVersion - Both patch version and are supported. When is specified, the latest supported patch version is chosen automatically. Updating the agent pool with the same once it has been created will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see [upgrading a node pool](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#upgrade-a-node-pool). - OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` - // CurrentOrchestratorVersion - If orchestratorVersion was a fully specified version , this field will be exactly equal to it. If orchestratorVersion was , this field will contain the full version being used. - CurrentOrchestratorVersion *string `json:"currentOrchestratorVersion,omitempty"` - // NodeImageVersion - READ-ONLY; The version of node image - NodeImageVersion *string `json:"nodeImageVersion,omitempty"` - // UpgradeSettings - Settings for upgrading the agentpool - UpgradeSettings *AgentPoolUpgradeSettings `json:"upgradeSettings,omitempty"` - // ProvisioningState - READ-ONLY; The current deployment or provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` - // PowerState - When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded - PowerState *PowerState `json:"powerState,omitempty"` - // AvailabilityZones - The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'. - AvailabilityZones *[]string `json:"availabilityZones,omitempty"` - // EnableNodePublicIP - Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see [assigning a public IP per node](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#assign-a-public-ip-per-node-for-your-node-pools). The default is false. - EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"` - // NodePublicIPPrefixID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName} - NodePublicIPPrefixID *string `json:"nodePublicIPPrefixID,omitempty"` - // ScaleSetPriority - The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'. Possible values include: 'ScaleSetPrioritySpot', 'ScaleSetPriorityRegular' - ScaleSetPriority ScaleSetPriority `json:"scaleSetPriority,omitempty"` - // ScaleSetEvictionPolicy - This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'. Possible values include: 'ScaleSetEvictionPolicyDelete', 'ScaleSetEvictionPolicyDeallocate' - ScaleSetEvictionPolicy ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"` - // SpotMaxPrice - Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see [spot VMs pricing](https://docs.microsoft.com/azure/virtual-machines/spot-vms#pricing) - SpotMaxPrice *float64 `json:"spotMaxPrice,omitempty"` - // Tags - The tags to be persisted on the agent pool virtual machine scale set. - Tags map[string]*string `json:"tags"` - // NodeLabels - The node labels to be persisted across all nodes in agent pool. - NodeLabels map[string]*string `json:"nodeLabels"` - // NodeTaints - The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule. - NodeTaints *[]string `json:"nodeTaints,omitempty"` - // ProximityPlacementGroupID - The ID for Proximity Placement Group. - ProximityPlacementGroupID *string `json:"proximityPlacementGroupID,omitempty"` - // KubeletConfig - The Kubelet configuration on the agent pool nodes. - KubeletConfig *KubeletConfig `json:"kubeletConfig,omitempty"` - // LinuxOSConfig - The OS configuration of Linux agent nodes. - LinuxOSConfig *LinuxOSConfig `json:"linuxOSConfig,omitempty"` - // EnableEncryptionAtHost - This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption - EnableEncryptionAtHost *bool `json:"enableEncryptionAtHost,omitempty"` - // EnableUltraSSD - Whether to enable UltraSSD - EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` - // EnableFIPS - See [Add a FIPS-enabled node pool](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview) for more details. - EnableFIPS *bool `json:"enableFIPS,omitempty"` - // GpuInstanceProfile - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Possible values include: 'GPUInstanceProfileMIG1g', 'GPUInstanceProfileMIG2g', 'GPUInstanceProfileMIG3g', 'GPUInstanceProfileMIG4g', 'GPUInstanceProfileMIG7g' - GpuInstanceProfile GPUInstanceProfile `json:"gpuInstanceProfile,omitempty"` - // CreationData - CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot. - CreationData *CreationData `json:"creationData,omitempty"` - // CapacityReservationGroupID - AKS will associate the specified agent pool with the Capacity Reservation Group. - CapacityReservationGroupID *string `json:"capacityReservationGroupID,omitempty"` - // HostGroupID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see [Azure dedicated hosts](https://docs.microsoft.com/azure/virtual-machines/dedicated-hosts). - HostGroupID *string `json:"hostGroupID,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterAgentPoolProfile. -func (mcapp ManagedClusterAgentPoolProfile) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcapp.Name != nil { - objectMap["name"] = mcapp.Name - } - if mcapp.Count != nil { - objectMap["count"] = mcapp.Count - } - if mcapp.VMSize != nil { - objectMap["vmSize"] = mcapp.VMSize - } - if mcapp.OsDiskSizeGB != nil { - objectMap["osDiskSizeGB"] = mcapp.OsDiskSizeGB - } - if mcapp.OsDiskType != "" { - objectMap["osDiskType"] = mcapp.OsDiskType - } - if mcapp.KubeletDiskType != "" { - objectMap["kubeletDiskType"] = mcapp.KubeletDiskType - } - if mcapp.WorkloadRuntime != "" { - objectMap["workloadRuntime"] = mcapp.WorkloadRuntime - } - if mcapp.MessageOfTheDay != nil { - objectMap["messageOfTheDay"] = mcapp.MessageOfTheDay - } - if mcapp.VnetSubnetID != nil { - objectMap["vnetSubnetID"] = mcapp.VnetSubnetID - } - if mcapp.PodSubnetID != nil { - objectMap["podSubnetID"] = mcapp.PodSubnetID - } - if mcapp.MaxPods != nil { - objectMap["maxPods"] = mcapp.MaxPods - } - if mcapp.OsType != "" { - objectMap["osType"] = mcapp.OsType - } - if mcapp.OsSKU != "" { - objectMap["osSKU"] = mcapp.OsSKU - } - if mcapp.MaxCount != nil { - objectMap["maxCount"] = mcapp.MaxCount - } - if mcapp.MinCount != nil { - objectMap["minCount"] = mcapp.MinCount - } - if mcapp.EnableAutoScaling != nil { - objectMap["enableAutoScaling"] = mcapp.EnableAutoScaling - } - if mcapp.ScaleDownMode != "" { - objectMap["scaleDownMode"] = mcapp.ScaleDownMode - } - if mcapp.Type != "" { - objectMap["type"] = mcapp.Type - } - if mcapp.Mode != "" { - objectMap["mode"] = mcapp.Mode - } - if mcapp.OrchestratorVersion != nil { - objectMap["orchestratorVersion"] = mcapp.OrchestratorVersion - } - if mcapp.CurrentOrchestratorVersion != nil { - objectMap["currentOrchestratorVersion"] = mcapp.CurrentOrchestratorVersion - } - if mcapp.UpgradeSettings != nil { - objectMap["upgradeSettings"] = mcapp.UpgradeSettings - } - if mcapp.PowerState != nil { - objectMap["powerState"] = mcapp.PowerState - } - if mcapp.AvailabilityZones != nil { - objectMap["availabilityZones"] = mcapp.AvailabilityZones - } - if mcapp.EnableNodePublicIP != nil { - objectMap["enableNodePublicIP"] = mcapp.EnableNodePublicIP - } - if mcapp.NodePublicIPPrefixID != nil { - objectMap["nodePublicIPPrefixID"] = mcapp.NodePublicIPPrefixID - } - if mcapp.ScaleSetPriority != "" { - objectMap["scaleSetPriority"] = mcapp.ScaleSetPriority - } - if mcapp.ScaleSetEvictionPolicy != "" { - objectMap["scaleSetEvictionPolicy"] = mcapp.ScaleSetEvictionPolicy - } - if mcapp.SpotMaxPrice != nil { - objectMap["spotMaxPrice"] = mcapp.SpotMaxPrice - } - if mcapp.Tags != nil { - objectMap["tags"] = mcapp.Tags - } - if mcapp.NodeLabels != nil { - objectMap["nodeLabels"] = mcapp.NodeLabels - } - if mcapp.NodeTaints != nil { - objectMap["nodeTaints"] = mcapp.NodeTaints - } - if mcapp.ProximityPlacementGroupID != nil { - objectMap["proximityPlacementGroupID"] = mcapp.ProximityPlacementGroupID - } - if mcapp.KubeletConfig != nil { - objectMap["kubeletConfig"] = mcapp.KubeletConfig - } - if mcapp.LinuxOSConfig != nil { - objectMap["linuxOSConfig"] = mcapp.LinuxOSConfig - } - if mcapp.EnableEncryptionAtHost != nil { - objectMap["enableEncryptionAtHost"] = mcapp.EnableEncryptionAtHost - } - if mcapp.EnableUltraSSD != nil { - objectMap["enableUltraSSD"] = mcapp.EnableUltraSSD - } - if mcapp.EnableFIPS != nil { - objectMap["enableFIPS"] = mcapp.EnableFIPS - } - if mcapp.GpuInstanceProfile != "" { - objectMap["gpuInstanceProfile"] = mcapp.GpuInstanceProfile - } - if mcapp.CreationData != nil { - objectMap["creationData"] = mcapp.CreationData - } - if mcapp.CapacityReservationGroupID != nil { - objectMap["capacityReservationGroupID"] = mcapp.CapacityReservationGroupID - } - if mcapp.HostGroupID != nil { - objectMap["hostGroupID"] = mcapp.HostGroupID - } - return json.Marshal(objectMap) -} - -// ManagedClusterAgentPoolProfileProperties properties for the container service agent pool profile. -type ManagedClusterAgentPoolProfileProperties struct { - // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for system pools. The default value is 1. - Count *int32 `json:"count,omitempty"` - // VMSize - VM size availability varies by region. If a node contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly. For more details on restricted VM sizes, see: https://docs.microsoft.com/azure/aks/quotas-skus-regions - VMSize *string `json:"vmSize,omitempty"` - OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` - // OsDiskType - Possible values include: 'OSDiskTypeManaged', 'OSDiskTypeEphemeral' - OsDiskType OSDiskType `json:"osDiskType,omitempty"` - // KubeletDiskType - Possible values include: 'KubeletDiskTypeOS', 'KubeletDiskTypeTemporary' - KubeletDiskType KubeletDiskType `json:"kubeletDiskType,omitempty"` - // WorkloadRuntime - Possible values include: 'WorkloadRuntimeOCIContainer', 'WorkloadRuntimeWasmWasi' - WorkloadRuntime WorkloadRuntime `json:"workloadRuntime,omitempty"` - // MessageOfTheDay - A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It must not be specified for Windows nodes. It must be a static string (i.e., will be printed raw and not be executed as a script). - MessageOfTheDay *string `json:"messageOfTheDay,omitempty"` - // VnetSubnetID - If this is not specified, a VNET and subnet will be generated and used. If no podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes. This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName} - VnetSubnetID *string `json:"vnetSubnetID,omitempty"` - // PodSubnetID - If omitted, pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName} - PodSubnetID *string `json:"podSubnetID,omitempty"` - // MaxPods - The maximum number of pods that can run on a node. - MaxPods *int32 `json:"maxPods,omitempty"` - // OsType - Possible values include: 'OSTypeLinux', 'OSTypeWindows' - OsType OSType `json:"osType,omitempty"` - // OsSKU - Possible values include: 'OSSKUUbuntu', 'OSSKUCBLMariner' - OsSKU OSSKU `json:"osSKU,omitempty"` - // MaxCount - The maximum number of nodes for auto-scaling - MaxCount *int32 `json:"maxCount,omitempty"` - // MinCount - The minimum number of nodes for auto-scaling - MinCount *int32 `json:"minCount,omitempty"` - // EnableAutoScaling - Whether to enable auto-scaler - EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` - // ScaleDownMode - This also effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Possible values include: 'ScaleDownModeDelete', 'ScaleDownModeDeallocate' - ScaleDownMode ScaleDownMode `json:"scaleDownMode,omitempty"` - // Type - Possible values include: 'AgentPoolTypeVirtualMachineScaleSets', 'AgentPoolTypeAvailabilitySet' - Type AgentPoolType `json:"type,omitempty"` - // Mode - Possible values include: 'AgentPoolModeSystem', 'AgentPoolModeUser' - Mode AgentPoolMode `json:"mode,omitempty"` - // OrchestratorVersion - Both patch version and are supported. When is specified, the latest supported patch version is chosen automatically. Updating the agent pool with the same once it has been created will not trigger an upgrade, even if a newer patch version is available. As a best practice, you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node pool version must have the same major version as the control plane. The node pool minor version must be within two minor versions of the control plane version. The node pool version cannot be greater than the control plane version. For more information see [upgrading a node pool](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#upgrade-a-node-pool). - OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` - // CurrentOrchestratorVersion - If orchestratorVersion was a fully specified version , this field will be exactly equal to it. If orchestratorVersion was , this field will contain the full version being used. - CurrentOrchestratorVersion *string `json:"currentOrchestratorVersion,omitempty"` - // NodeImageVersion - READ-ONLY; The version of node image - NodeImageVersion *string `json:"nodeImageVersion,omitempty"` - // UpgradeSettings - Settings for upgrading the agentpool - UpgradeSettings *AgentPoolUpgradeSettings `json:"upgradeSettings,omitempty"` - // ProvisioningState - READ-ONLY; The current deployment or provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` - // PowerState - When an Agent Pool is first created it is initially Running. The Agent Pool can be stopped by setting this field to Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded - PowerState *PowerState `json:"powerState,omitempty"` - // AvailabilityZones - The list of Availability zones to use for nodes. This can only be specified if the AgentPoolType property is 'VirtualMachineScaleSets'. - AvailabilityZones *[]string `json:"availabilityZones,omitempty"` - // EnableNodePublicIP - Some scenarios may require nodes in a node pool to receive their own dedicated public IP addresses. A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops. For more information see [assigning a public IP per node](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#assign-a-public-ip-per-node-for-your-node-pools). The default is false. - EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"` - // NodePublicIPPrefixID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName} - NodePublicIPPrefixID *string `json:"nodePublicIPPrefixID,omitempty"` - // ScaleSetPriority - The Virtual Machine Scale Set priority. If not specified, the default is 'Regular'. Possible values include: 'ScaleSetPrioritySpot', 'ScaleSetPriorityRegular' - ScaleSetPriority ScaleSetPriority `json:"scaleSetPriority,omitempty"` - // ScaleSetEvictionPolicy - This cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is 'Delete'. Possible values include: 'ScaleSetEvictionPolicyDelete', 'ScaleSetEvictionPolicyDeallocate' - ScaleSetEvictionPolicy ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"` - // SpotMaxPrice - Possible values are any decimal value greater than zero or -1 which indicates the willingness to pay any on-demand price. For more details on spot pricing, see [spot VMs pricing](https://docs.microsoft.com/azure/virtual-machines/spot-vms#pricing) - SpotMaxPrice *float64 `json:"spotMaxPrice,omitempty"` - // Tags - The tags to be persisted on the agent pool virtual machine scale set. - Tags map[string]*string `json:"tags"` - // NodeLabels - The node labels to be persisted across all nodes in agent pool. - NodeLabels map[string]*string `json:"nodeLabels"` - // NodeTaints - The taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule. - NodeTaints *[]string `json:"nodeTaints,omitempty"` - // ProximityPlacementGroupID - The ID for Proximity Placement Group. - ProximityPlacementGroupID *string `json:"proximityPlacementGroupID,omitempty"` - // KubeletConfig - The Kubelet configuration on the agent pool nodes. - KubeletConfig *KubeletConfig `json:"kubeletConfig,omitempty"` - // LinuxOSConfig - The OS configuration of Linux agent nodes. - LinuxOSConfig *LinuxOSConfig `json:"linuxOSConfig,omitempty"` - // EnableEncryptionAtHost - This is only supported on certain VM sizes and in certain Azure regions. For more information, see: https://docs.microsoft.com/azure/aks/enable-host-encryption - EnableEncryptionAtHost *bool `json:"enableEncryptionAtHost,omitempty"` - // EnableUltraSSD - Whether to enable UltraSSD - EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` - // EnableFIPS - See [Add a FIPS-enabled node pool](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview) for more details. - EnableFIPS *bool `json:"enableFIPS,omitempty"` - // GpuInstanceProfile - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Possible values include: 'GPUInstanceProfileMIG1g', 'GPUInstanceProfileMIG2g', 'GPUInstanceProfileMIG3g', 'GPUInstanceProfileMIG4g', 'GPUInstanceProfileMIG7g' - GpuInstanceProfile GPUInstanceProfile `json:"gpuInstanceProfile,omitempty"` - // CreationData - CreationData to be used to specify the source Snapshot ID if the node pool will be created/upgraded using a snapshot. - CreationData *CreationData `json:"creationData,omitempty"` - // CapacityReservationGroupID - AKS will associate the specified agent pool with the Capacity Reservation Group. - CapacityReservationGroupID *string `json:"capacityReservationGroupID,omitempty"` - // HostGroupID - This is of the form: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}. For more information see [Azure dedicated hosts](https://docs.microsoft.com/azure/virtual-machines/dedicated-hosts). - HostGroupID *string `json:"hostGroupID,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterAgentPoolProfileProperties. -func (mcappp ManagedClusterAgentPoolProfileProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcappp.Count != nil { - objectMap["count"] = mcappp.Count - } - if mcappp.VMSize != nil { - objectMap["vmSize"] = mcappp.VMSize - } - if mcappp.OsDiskSizeGB != nil { - objectMap["osDiskSizeGB"] = mcappp.OsDiskSizeGB - } - if mcappp.OsDiskType != "" { - objectMap["osDiskType"] = mcappp.OsDiskType - } - if mcappp.KubeletDiskType != "" { - objectMap["kubeletDiskType"] = mcappp.KubeletDiskType - } - if mcappp.WorkloadRuntime != "" { - objectMap["workloadRuntime"] = mcappp.WorkloadRuntime - } - if mcappp.MessageOfTheDay != nil { - objectMap["messageOfTheDay"] = mcappp.MessageOfTheDay - } - if mcappp.VnetSubnetID != nil { - objectMap["vnetSubnetID"] = mcappp.VnetSubnetID - } - if mcappp.PodSubnetID != nil { - objectMap["podSubnetID"] = mcappp.PodSubnetID - } - if mcappp.MaxPods != nil { - objectMap["maxPods"] = mcappp.MaxPods - } - if mcappp.OsType != "" { - objectMap["osType"] = mcappp.OsType - } - if mcappp.OsSKU != "" { - objectMap["osSKU"] = mcappp.OsSKU - } - if mcappp.MaxCount != nil { - objectMap["maxCount"] = mcappp.MaxCount - } - if mcappp.MinCount != nil { - objectMap["minCount"] = mcappp.MinCount - } - if mcappp.EnableAutoScaling != nil { - objectMap["enableAutoScaling"] = mcappp.EnableAutoScaling - } - if mcappp.ScaleDownMode != "" { - objectMap["scaleDownMode"] = mcappp.ScaleDownMode - } - if mcappp.Type != "" { - objectMap["type"] = mcappp.Type - } - if mcappp.Mode != "" { - objectMap["mode"] = mcappp.Mode - } - if mcappp.OrchestratorVersion != nil { - objectMap["orchestratorVersion"] = mcappp.OrchestratorVersion - } - if mcappp.CurrentOrchestratorVersion != nil { - objectMap["currentOrchestratorVersion"] = mcappp.CurrentOrchestratorVersion - } - if mcappp.UpgradeSettings != nil { - objectMap["upgradeSettings"] = mcappp.UpgradeSettings - } - if mcappp.PowerState != nil { - objectMap["powerState"] = mcappp.PowerState - } - if mcappp.AvailabilityZones != nil { - objectMap["availabilityZones"] = mcappp.AvailabilityZones - } - if mcappp.EnableNodePublicIP != nil { - objectMap["enableNodePublicIP"] = mcappp.EnableNodePublicIP - } - if mcappp.NodePublicIPPrefixID != nil { - objectMap["nodePublicIPPrefixID"] = mcappp.NodePublicIPPrefixID - } - if mcappp.ScaleSetPriority != "" { - objectMap["scaleSetPriority"] = mcappp.ScaleSetPriority - } - if mcappp.ScaleSetEvictionPolicy != "" { - objectMap["scaleSetEvictionPolicy"] = mcappp.ScaleSetEvictionPolicy - } - if mcappp.SpotMaxPrice != nil { - objectMap["spotMaxPrice"] = mcappp.SpotMaxPrice - } - if mcappp.Tags != nil { - objectMap["tags"] = mcappp.Tags - } - if mcappp.NodeLabels != nil { - objectMap["nodeLabels"] = mcappp.NodeLabels - } - if mcappp.NodeTaints != nil { - objectMap["nodeTaints"] = mcappp.NodeTaints - } - if mcappp.ProximityPlacementGroupID != nil { - objectMap["proximityPlacementGroupID"] = mcappp.ProximityPlacementGroupID - } - if mcappp.KubeletConfig != nil { - objectMap["kubeletConfig"] = mcappp.KubeletConfig - } - if mcappp.LinuxOSConfig != nil { - objectMap["linuxOSConfig"] = mcappp.LinuxOSConfig - } - if mcappp.EnableEncryptionAtHost != nil { - objectMap["enableEncryptionAtHost"] = mcappp.EnableEncryptionAtHost - } - if mcappp.EnableUltraSSD != nil { - objectMap["enableUltraSSD"] = mcappp.EnableUltraSSD - } - if mcappp.EnableFIPS != nil { - objectMap["enableFIPS"] = mcappp.EnableFIPS - } - if mcappp.GpuInstanceProfile != "" { - objectMap["gpuInstanceProfile"] = mcappp.GpuInstanceProfile - } - if mcappp.CreationData != nil { - objectMap["creationData"] = mcappp.CreationData - } - if mcappp.CapacityReservationGroupID != nil { - objectMap["capacityReservationGroupID"] = mcappp.CapacityReservationGroupID - } - if mcappp.HostGroupID != nil { - objectMap["hostGroupID"] = mcappp.HostGroupID - } - return json.Marshal(objectMap) -} - -// ManagedClusterAPIServerAccessProfile access profile for managed cluster API server. -type ManagedClusterAPIServerAccessProfile struct { - // AuthorizedIPRanges - IP ranges are specified in CIDR format, e.g. 137.117.106.88/29. This feature is not compatible with clusters that use Public IP Per Node, or clusters that are using a Basic Load Balancer. For more information see [API server authorized IP ranges](https://docs.microsoft.com/azure/aks/api-server-authorized-ip-ranges). - AuthorizedIPRanges *[]string `json:"authorizedIPRanges,omitempty"` - // EnablePrivateCluster - For more details, see [Creating a private AKS cluster](https://docs.microsoft.com/azure/aks/private-clusters). - EnablePrivateCluster *bool `json:"enablePrivateCluster,omitempty"` - // PrivateDNSZone - The default is System. For more details see [configure private DNS zone](https://docs.microsoft.com/azure/aks/private-clusters#configure-private-dns-zone). Allowed values are 'system' and 'none'. - PrivateDNSZone *string `json:"privateDNSZone,omitempty"` - // EnablePrivateClusterPublicFQDN - Whether to create additional public FQDN for private cluster or not. - EnablePrivateClusterPublicFQDN *bool `json:"enablePrivateClusterPublicFQDN,omitempty"` - // DisableRunCommand - Whether to disable run command for the cluster or not. - DisableRunCommand *bool `json:"disableRunCommand,omitempty"` -} - -// ManagedClusterAutoUpgradeProfile auto upgrade profile for a managed cluster. -type ManagedClusterAutoUpgradeProfile struct { - // UpgradeChannel - For more information see [setting the AKS cluster auto-upgrade channel](https://docs.microsoft.com/azure/aks/upgrade-cluster#set-auto-upgrade-channel). Possible values include: 'UpgradeChannelRapid', 'UpgradeChannelStable', 'UpgradeChannelPatch', 'UpgradeChannelNodeImage', 'UpgradeChannelNone' - UpgradeChannel UpgradeChannel `json:"upgradeChannel,omitempty"` -} - -// ManagedClusterHTTPProxyConfig cluster HTTP proxy configuration. -type ManagedClusterHTTPProxyConfig struct { - // HTTPProxy - The HTTP proxy server endpoint to use. - HTTPProxy *string `json:"httpProxy,omitempty"` - // HTTPSProxy - The HTTPS proxy server endpoint to use. - HTTPSProxy *string `json:"httpsProxy,omitempty"` - // NoProxy - The endpoints that should not go through proxy. - NoProxy *[]string `json:"noProxy,omitempty"` - // EffectiveNoProxy - READ-ONLY; A read-only list of all endpoints for which traffic should not be sent to the proxy. This list is a superset of noProxy and values injected by AKS. - EffectiveNoProxy *[]string `json:"effectiveNoProxy,omitempty"` - // TrustedCa - Alternative CA cert to use for connecting to proxy servers. - TrustedCa *string `json:"trustedCa,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterHTTPProxyConfig. -func (mchpc ManagedClusterHTTPProxyConfig) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mchpc.HTTPProxy != nil { - objectMap["httpProxy"] = mchpc.HTTPProxy - } - if mchpc.HTTPSProxy != nil { - objectMap["httpsProxy"] = mchpc.HTTPSProxy - } - if mchpc.NoProxy != nil { - objectMap["noProxy"] = mchpc.NoProxy - } - if mchpc.TrustedCa != nil { - objectMap["trustedCa"] = mchpc.TrustedCa - } - return json.Marshal(objectMap) -} - -// ManagedClusterIdentity identity for the managed cluster. -type ManagedClusterIdentity struct { - // PrincipalID - READ-ONLY; The principal id of the system assigned identity which is used by master components. - PrincipalID *string `json:"principalId,omitempty"` - // TenantID - READ-ONLY; The tenant id of the system assigned identity which is used by master components. - TenantID *string `json:"tenantId,omitempty"` - // Type - For more information see [use managed identities in AKS](https://docs.microsoft.com/azure/aks/use-managed-identity). Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeNone' - Type ResourceIdentityType `json:"type,omitempty"` - // UserAssignedIdentities - The keys must be ARM resource IDs in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - UserAssignedIdentities map[string]*ManagedClusterIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterIdentity. -func (mci ManagedClusterIdentity) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mci.Type != "" { - objectMap["type"] = mci.Type - } - if mci.UserAssignedIdentities != nil { - objectMap["userAssignedIdentities"] = mci.UserAssignedIdentities - } - return json.Marshal(objectMap) -} - -// ManagedClusterIdentityUserAssignedIdentitiesValue ... -type ManagedClusterIdentityUserAssignedIdentitiesValue struct { - // PrincipalID - READ-ONLY; The principal id of user assigned identity. - PrincipalID *string `json:"principalId,omitempty"` - // ClientID - READ-ONLY; The client id of user assigned identity. - ClientID *string `json:"clientId,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterIdentityUserAssignedIdentitiesValue. -func (mciAiv ManagedClusterIdentityUserAssignedIdentitiesValue) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ManagedClusterIngressProfile ingress profile for the container service cluster. -type ManagedClusterIngressProfile struct { - // WebAppRouting - Web App Routing settings for the ingress profile. - WebAppRouting *ManagedClusterIngressProfileWebAppRouting `json:"webAppRouting,omitempty"` -} - -// ManagedClusterIngressProfileWebAppRouting web App Routing settings for the ingress profile. -type ManagedClusterIngressProfileWebAppRouting struct { - // Enabled - Whether to enable Web App Routing. - Enabled *bool `json:"enabled,omitempty"` - // DNSZoneResourceID - Resource ID of the DNS Zone to be associated with the web app. Used only when Web App Routing is enabled. - DNSZoneResourceID *string `json:"dnsZoneResourceId,omitempty"` -} - -// ManagedClusterListResult the response from the List Managed Clusters operation. -type ManagedClusterListResult struct { - autorest.Response `json:"-"` - // Value - The list of managed clusters. - Value *[]ManagedCluster `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of managed cluster results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterListResult. -func (mclr ManagedClusterListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mclr.Value != nil { - objectMap["value"] = mclr.Value - } - return json.Marshal(objectMap) -} - -// ManagedClusterListResultIterator provides access to a complete listing of ManagedCluster values. -type ManagedClusterListResultIterator struct { - i int - page ManagedClusterListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ManagedClusterListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *ManagedClusterListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ManagedClusterListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ManagedClusterListResultIterator) Response() ManagedClusterListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ManagedClusterListResultIterator) Value() ManagedCluster { - if !iter.page.NotDone() { - return ManagedCluster{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the ManagedClusterListResultIterator type. -func NewManagedClusterListResultIterator(page ManagedClusterListResultPage) ManagedClusterListResultIterator { - return ManagedClusterListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (mclr ManagedClusterListResult) IsEmpty() bool { - return mclr.Value == nil || len(*mclr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (mclr ManagedClusterListResult) hasNextLink() bool { - return mclr.NextLink != nil && len(*mclr.NextLink) != 0 -} - -// managedClusterListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (mclr ManagedClusterListResult) managedClusterListResultPreparer(ctx context.Context) (*http.Request, error) { - if !mclr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(mclr.NextLink))) -} - -// ManagedClusterListResultPage contains a page of ManagedCluster values. -type ManagedClusterListResultPage struct { - fn func(context.Context, ManagedClusterListResult) (ManagedClusterListResult, error) - mclr ManagedClusterListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ManagedClusterListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.mclr) - if err != nil { - return err - } - page.mclr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ManagedClusterListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ManagedClusterListResultPage) NotDone() bool { - return !page.mclr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ManagedClusterListResultPage) Response() ManagedClusterListResult { - return page.mclr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ManagedClusterListResultPage) Values() []ManagedCluster { - if page.mclr.IsEmpty() { - return nil - } - return *page.mclr.Value -} - -// Creates a new instance of the ManagedClusterListResultPage type. -func NewManagedClusterListResultPage(cur ManagedClusterListResult, getNextPage func(context.Context, ManagedClusterListResult) (ManagedClusterListResult, error)) ManagedClusterListResultPage { - return ManagedClusterListResultPage{ - fn: getNextPage, - mclr: cur, - } -} - -// ManagedClusterLoadBalancerProfile profile of the managed cluster load balancer. -type ManagedClusterLoadBalancerProfile struct { - // ManagedOutboundIPs - Desired managed outbound IPs for the cluster load balancer. - ManagedOutboundIPs *ManagedClusterLoadBalancerProfileManagedOutboundIPs `json:"managedOutboundIPs,omitempty"` - // OutboundIPPrefixes - Desired outbound IP Prefix resources for the cluster load balancer. - OutboundIPPrefixes *ManagedClusterLoadBalancerProfileOutboundIPPrefixes `json:"outboundIPPrefixes,omitempty"` - // OutboundIPs - Desired outbound IP resources for the cluster load balancer. - OutboundIPs *ManagedClusterLoadBalancerProfileOutboundIPs `json:"outboundIPs,omitempty"` - // EffectiveOutboundIPs - The effective outbound IP resources of the cluster load balancer. - EffectiveOutboundIPs *[]ResourceReference `json:"effectiveOutboundIPs,omitempty"` - // AllocatedOutboundPorts - The desired number of allocated SNAT ports per VM. Allowed values are in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports. - AllocatedOutboundPorts *int32 `json:"allocatedOutboundPorts,omitempty"` - // IdleTimeoutInMinutes - Desired outbound flow idle timeout in minutes. Allowed values are in the range of 4 to 120 (inclusive). The default value is 30 minutes. - IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` - // EnableMultipleStandardLoadBalancers - Enable multiple standard load balancers per AKS cluster or not. - EnableMultipleStandardLoadBalancers *bool `json:"enableMultipleStandardLoadBalancers,omitempty"` -} - -// ManagedClusterLoadBalancerProfileManagedOutboundIPs desired managed outbound IPs for the cluster load -// balancer. -type ManagedClusterLoadBalancerProfileManagedOutboundIPs struct { - // Count - The desired number of IPv4 outbound IPs created/managed by Azure for the cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. - Count *int32 `json:"count,omitempty"` - // CountIPv6 - The desired number of IPv6 outbound IPs created/managed by Azure for the cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 0 for single-stack and 1 for dual-stack. - CountIPv6 *int32 `json:"countIPv6,omitempty"` -} - -// ManagedClusterLoadBalancerProfileOutboundIPPrefixes desired outbound IP Prefix resources for the cluster -// load balancer. -type ManagedClusterLoadBalancerProfileOutboundIPPrefixes struct { - // PublicIPPrefixes - A list of public IP prefix resources. - PublicIPPrefixes *[]ResourceReference `json:"publicIPPrefixes,omitempty"` -} - -// ManagedClusterLoadBalancerProfileOutboundIPs desired outbound IP resources for the cluster load -// balancer. -type ManagedClusterLoadBalancerProfileOutboundIPs struct { - // PublicIPs - A list of public IP resources. - PublicIPs *[]ResourceReference `json:"publicIPs,omitempty"` -} - -// ManagedClusterManagedOutboundIPProfile profile of the managed outbound IP resources of the managed -// cluster. -type ManagedClusterManagedOutboundIPProfile struct { - // Count - The desired number of outbound IPs created/managed by Azure. Allowed values must be in the range of 1 to 16 (inclusive). The default value is 1. - Count *int32 `json:"count,omitempty"` -} - -// ManagedClusterNATGatewayProfile profile of the managed cluster NAT gateway. -type ManagedClusterNATGatewayProfile struct { - // ManagedOutboundIPProfile - Profile of the managed outbound IP resources of the cluster NAT gateway. - ManagedOutboundIPProfile *ManagedClusterManagedOutboundIPProfile `json:"managedOutboundIPProfile,omitempty"` - // EffectiveOutboundIPs - The effective outbound IP resources of the cluster NAT gateway. - EffectiveOutboundIPs *[]ResourceReference `json:"effectiveOutboundIPs,omitempty"` - // IdleTimeoutInMinutes - Desired outbound flow idle timeout in minutes. Allowed values are in the range of 4 to 120 (inclusive). The default value is 4 minutes. - IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` -} - -// ManagedClusterOIDCIssuerProfile the OIDC issuer profile of the Managed Cluster. -type ManagedClusterOIDCIssuerProfile struct { - // IssuerURL - READ-ONLY; The OIDC issuer url of the Managed Cluster. - IssuerURL *string `json:"issuerURL,omitempty"` - // Enabled - Whether the OIDC issuer is enabled. - Enabled *bool `json:"enabled,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterOIDCIssuerProfile. -func (mcoip ManagedClusterOIDCIssuerProfile) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcoip.Enabled != nil { - objectMap["enabled"] = mcoip.Enabled - } - return json.Marshal(objectMap) -} - -// ManagedClusterPodIdentity details about the pod identity assigned to the Managed Cluster. -type ManagedClusterPodIdentity struct { - // Name - The name of the pod identity. - Name *string `json:"name,omitempty"` - // Namespace - The namespace of the pod identity. - Namespace *string `json:"namespace,omitempty"` - // BindingSelector - The binding selector to use for the AzureIdentityBinding resource. - BindingSelector *string `json:"bindingSelector,omitempty"` - // Identity - The user assigned identity details. - Identity *UserAssignedIdentity `json:"identity,omitempty"` - // ProvisioningState - READ-ONLY; The current provisioning state of the pod identity. Possible values include: 'ManagedClusterPodIdentityProvisioningStateAssigned', 'ManagedClusterPodIdentityProvisioningStateUpdating', 'ManagedClusterPodIdentityProvisioningStateDeleting', 'ManagedClusterPodIdentityProvisioningStateFailed' - ProvisioningState ManagedClusterPodIdentityProvisioningState `json:"provisioningState,omitempty"` - // ProvisioningInfo - READ-ONLY - ProvisioningInfo *ManagedClusterPodIdentityProvisioningInfo `json:"provisioningInfo,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterPodIdentity. -func (mcpi ManagedClusterPodIdentity) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcpi.Name != nil { - objectMap["name"] = mcpi.Name - } - if mcpi.Namespace != nil { - objectMap["namespace"] = mcpi.Namespace - } - if mcpi.BindingSelector != nil { - objectMap["bindingSelector"] = mcpi.BindingSelector - } - if mcpi.Identity != nil { - objectMap["identity"] = mcpi.Identity - } - return json.Marshal(objectMap) -} - -// ManagedClusterPodIdentityException see [disable AAD Pod Identity for a specific -// Pod/Application](https://azure.github.io/aad-pod-identity/docs/configure/application_exception/) for -// more details. -type ManagedClusterPodIdentityException struct { - // Name - The name of the pod identity exception. - Name *string `json:"name,omitempty"` - // Namespace - The namespace of the pod identity exception. - Namespace *string `json:"namespace,omitempty"` - // PodLabels - The pod labels to match. - PodLabels map[string]*string `json:"podLabels"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterPodIdentityException. -func (mcpie ManagedClusterPodIdentityException) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcpie.Name != nil { - objectMap["name"] = mcpie.Name - } - if mcpie.Namespace != nil { - objectMap["namespace"] = mcpie.Namespace - } - if mcpie.PodLabels != nil { - objectMap["podLabels"] = mcpie.PodLabels - } - return json.Marshal(objectMap) -} - -// ManagedClusterPodIdentityProfile see [use AAD pod -// identity](https://docs.microsoft.com/azure/aks/use-azure-ad-pod-identity) for more details on pod -// identity integration. -type ManagedClusterPodIdentityProfile struct { - // Enabled - Whether the pod identity addon is enabled. - Enabled *bool `json:"enabled,omitempty"` - // AllowNetworkPluginKubenet - Running in Kubenet is disabled by default due to the security related nature of AAD Pod Identity and the risks of IP spoofing. See [using Kubenet network plugin with AAD Pod Identity](https://docs.microsoft.com/azure/aks/use-azure-ad-pod-identity#using-kubenet-network-plugin-with-azure-active-directory-pod-managed-identities) for more information. - AllowNetworkPluginKubenet *bool `json:"allowNetworkPluginKubenet,omitempty"` - // UserAssignedIdentities - The pod identities to use in the cluster. - UserAssignedIdentities *[]ManagedClusterPodIdentity `json:"userAssignedIdentities,omitempty"` - // UserAssignedIdentityExceptions - The pod identity exceptions to allow. - UserAssignedIdentityExceptions *[]ManagedClusterPodIdentityException `json:"userAssignedIdentityExceptions,omitempty"` -} - -// ManagedClusterPodIdentityProvisioningError an error response from the pod identity provisioning. -type ManagedClusterPodIdentityProvisioningError struct { - // Error - Details about the error. - Error *ManagedClusterPodIdentityProvisioningErrorBody `json:"error,omitempty"` -} - -// ManagedClusterPodIdentityProvisioningErrorBody an error response from the pod identity provisioning. -type ManagedClusterPodIdentityProvisioningErrorBody struct { - // Code - An identifier for the error. Codes are invariant and are intended to be consumed programmatically. - Code *string `json:"code,omitempty"` - // Message - A message describing the error, intended to be suitable for display in a user interface. - Message *string `json:"message,omitempty"` - // Target - The target of the particular error. For example, the name of the property in error. - Target *string `json:"target,omitempty"` - // Details - A list of additional details about the error. - Details *[]ManagedClusterPodIdentityProvisioningErrorBody `json:"details,omitempty"` -} - -// ManagedClusterPodIdentityProvisioningInfo ... -type ManagedClusterPodIdentityProvisioningInfo struct { - // Error - Pod identity assignment error (if any). - Error *ManagedClusterPodIdentityProvisioningError `json:"error,omitempty"` -} - -// ManagedClusterPoolUpgradeProfile the list of available upgrade versions. -type ManagedClusterPoolUpgradeProfile struct { - // KubernetesVersion - The Kubernetes version (major.minor.patch). - KubernetesVersion *string `json:"kubernetesVersion,omitempty"` - // Name - The Agent Pool name. - Name *string `json:"name,omitempty"` - // OsType - Possible values include: 'OSTypeLinux', 'OSTypeWindows' - OsType OSType `json:"osType,omitempty"` - // Upgrades - List of orchestrator types and versions available for upgrade. - Upgrades *[]ManagedClusterPoolUpgradeProfileUpgradesItem `json:"upgrades,omitempty"` -} - -// ManagedClusterPoolUpgradeProfileUpgradesItem ... -type ManagedClusterPoolUpgradeProfileUpgradesItem struct { - // KubernetesVersion - The Kubernetes version (major.minor.patch). - KubernetesVersion *string `json:"kubernetesVersion,omitempty"` - // IsPreview - Whether the Kubernetes version is currently in preview. - IsPreview *bool `json:"isPreview,omitempty"` -} - -// ManagedClusterProperties properties of the managed cluster. -type ManagedClusterProperties struct { - // ProvisioningState - READ-ONLY; The current provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` - // PowerState - READ-ONLY; The Power State of the cluster. - PowerState *PowerState `json:"powerState,omitempty"` - // CreationData - CreationData to be used to specify the source Snapshot ID if the cluster will be created/upgraded using a snapshot. - CreationData *CreationData `json:"creationData,omitempty"` - // MaxAgentPools - READ-ONLY; The max number of agent pools for the managed cluster. - MaxAgentPools *int32 `json:"maxAgentPools,omitempty"` - // KubernetesVersion - When you upgrade a supported AKS cluster, Kubernetes minor versions cannot be skipped. All upgrades must be performed sequentially by major version number. For example, upgrades between 1.14.x -> 1.15.x or 1.15.x -> 1.16.x are allowed, however 1.14.x -> 1.16.x is not allowed. See [upgrading an AKS cluster](https://docs.microsoft.com/azure/aks/upgrade-cluster) for more details. - KubernetesVersion *string `json:"kubernetesVersion,omitempty"` - // CurrentKubernetesVersion - READ-ONLY; The version of Kubernetes the Managed Cluster is running. - CurrentKubernetesVersion *string `json:"currentKubernetesVersion,omitempty"` - // DNSPrefix - This cannot be updated once the Managed Cluster has been created. - DNSPrefix *string `json:"dnsPrefix,omitempty"` - // FqdnSubdomain - This cannot be updated once the Managed Cluster has been created. - FqdnSubdomain *string `json:"fqdnSubdomain,omitempty"` - // Fqdn - READ-ONLY; The FQDN of the master pool. - Fqdn *string `json:"fqdn,omitempty"` - // PrivateFQDN - READ-ONLY; The FQDN of private cluster. - PrivateFQDN *string `json:"privateFQDN,omitempty"` - // AzurePortalFQDN - READ-ONLY; The Azure Portal requires certain Cross-Origin Resource Sharing (CORS) headers to be sent in some responses, which Kubernetes APIServer doesn't handle by default. This special FQDN supports CORS, allowing the Azure Portal to function properly. - AzurePortalFQDN *string `json:"azurePortalFQDN,omitempty"` - // AgentPoolProfiles - The agent pool properties. - AgentPoolProfiles *[]ManagedClusterAgentPoolProfile `json:"agentPoolProfiles,omitempty"` - // LinuxProfile - The profile for Linux VMs in the Managed Cluster. - LinuxProfile *LinuxProfile `json:"linuxProfile,omitempty"` - // WindowsProfile - The profile for Windows VMs in the Managed Cluster. - WindowsProfile *ManagedClusterWindowsProfile `json:"windowsProfile,omitempty"` - // ServicePrincipalProfile - Information about a service principal identity for the cluster to use for manipulating Azure APIs. - ServicePrincipalProfile *ManagedClusterServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"` - // AddonProfiles - The profile of managed cluster add-on. - AddonProfiles map[string]*ManagedClusterAddonProfile `json:"addonProfiles"` - // PodIdentityProfile - See [use AAD pod identity](https://docs.microsoft.com/azure/aks/use-azure-ad-pod-identity) for more details on AAD pod identity integration. - PodIdentityProfile *ManagedClusterPodIdentityProfile `json:"podIdentityProfile,omitempty"` - // OidcIssuerProfile - The OIDC issuer profile of the Managed Cluster. - OidcIssuerProfile *ManagedClusterOIDCIssuerProfile `json:"oidcIssuerProfile,omitempty"` - // NodeResourceGroup - The name of the resource group containing agent pool nodes. - NodeResourceGroup *string `json:"nodeResourceGroup,omitempty"` - // EnableRBAC - Whether to enable Kubernetes Role-Based Access Control. - EnableRBAC *bool `json:"enableRBAC,omitempty"` - // EnablePodSecurityPolicy - (DEPRECATING) Whether to enable Kubernetes pod security policy (preview). This feature is set for removal on October 15th, 2020. Learn more at aka.ms/aks/azpodpolicy. - EnablePodSecurityPolicy *bool `json:"enablePodSecurityPolicy,omitempty"` - // EnableNamespaceResources - The default value is false. It can be enabled/disabled on creation and updation of the managed cluster. See [https://aka.ms/NamespaceARMResource](https://aka.ms/NamespaceARMResource) for more details on Namespace as a ARM Resource. - EnableNamespaceResources *bool `json:"enableNamespaceResources,omitempty"` - // NetworkProfile - The network configuration profile. - NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` - // AadProfile - The Azure Active Directory configuration. - AadProfile *ManagedClusterAADProfile `json:"aadProfile,omitempty"` - // AutoUpgradeProfile - The auto upgrade configuration. - AutoUpgradeProfile *ManagedClusterAutoUpgradeProfile `json:"autoUpgradeProfile,omitempty"` - // AutoScalerProfile - Parameters to be applied to the cluster-autoscaler when enabled - AutoScalerProfile *ManagedClusterPropertiesAutoScalerProfile `json:"autoScalerProfile,omitempty"` - // APIServerAccessProfile - The access profile for managed cluster API server. - APIServerAccessProfile *ManagedClusterAPIServerAccessProfile `json:"apiServerAccessProfile,omitempty"` - // DiskEncryptionSetID - This is of the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{encryptionSetName}' - DiskEncryptionSetID *string `json:"diskEncryptionSetID,omitempty"` - // IdentityProfile - Identities associated with the cluster. - IdentityProfile map[string]*UserAssignedIdentity `json:"identityProfile"` - // PrivateLinkResources - Private link resources associated with the cluster. - PrivateLinkResources *[]PrivateLinkResource `json:"privateLinkResources,omitempty"` - // DisableLocalAccounts - If set to true, getting static credentials will be disabled for this cluster. This must only be used on Managed Clusters that are AAD enabled. For more details see [disable local accounts](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts-preview). - DisableLocalAccounts *bool `json:"disableLocalAccounts,omitempty"` - // HTTPProxyConfig - Configurations for provisioning the cluster with HTTP proxy servers. - HTTPProxyConfig *ManagedClusterHTTPProxyConfig `json:"httpProxyConfig,omitempty"` - // SecurityProfile - Security profile for the managed cluster. - SecurityProfile *ManagedClusterSecurityProfile `json:"securityProfile,omitempty"` - // IngressProfile - Ingress profile for the managed cluster. - IngressProfile *ManagedClusterIngressProfile `json:"ingressProfile,omitempty"` - // PublicNetworkAccess - Allow or deny public network access for AKS. Possible values include: 'PublicNetworkAccessEnabled', 'PublicNetworkAccessDisabled' - PublicNetworkAccess PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterProperties. -func (mcp ManagedClusterProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcp.CreationData != nil { - objectMap["creationData"] = mcp.CreationData - } - if mcp.KubernetesVersion != nil { - objectMap["kubernetesVersion"] = mcp.KubernetesVersion - } - if mcp.DNSPrefix != nil { - objectMap["dnsPrefix"] = mcp.DNSPrefix - } - if mcp.FqdnSubdomain != nil { - objectMap["fqdnSubdomain"] = mcp.FqdnSubdomain - } - if mcp.AgentPoolProfiles != nil { - objectMap["agentPoolProfiles"] = mcp.AgentPoolProfiles - } - if mcp.LinuxProfile != nil { - objectMap["linuxProfile"] = mcp.LinuxProfile - } - if mcp.WindowsProfile != nil { - objectMap["windowsProfile"] = mcp.WindowsProfile - } - if mcp.ServicePrincipalProfile != nil { - objectMap["servicePrincipalProfile"] = mcp.ServicePrincipalProfile - } - if mcp.AddonProfiles != nil { - objectMap["addonProfiles"] = mcp.AddonProfiles - } - if mcp.PodIdentityProfile != nil { - objectMap["podIdentityProfile"] = mcp.PodIdentityProfile - } - if mcp.OidcIssuerProfile != nil { - objectMap["oidcIssuerProfile"] = mcp.OidcIssuerProfile - } - if mcp.NodeResourceGroup != nil { - objectMap["nodeResourceGroup"] = mcp.NodeResourceGroup - } - if mcp.EnableRBAC != nil { - objectMap["enableRBAC"] = mcp.EnableRBAC - } - if mcp.EnablePodSecurityPolicy != nil { - objectMap["enablePodSecurityPolicy"] = mcp.EnablePodSecurityPolicy - } - if mcp.EnableNamespaceResources != nil { - objectMap["enableNamespaceResources"] = mcp.EnableNamespaceResources - } - if mcp.NetworkProfile != nil { - objectMap["networkProfile"] = mcp.NetworkProfile - } - if mcp.AadProfile != nil { - objectMap["aadProfile"] = mcp.AadProfile - } - if mcp.AutoUpgradeProfile != nil { - objectMap["autoUpgradeProfile"] = mcp.AutoUpgradeProfile - } - if mcp.AutoScalerProfile != nil { - objectMap["autoScalerProfile"] = mcp.AutoScalerProfile - } - if mcp.APIServerAccessProfile != nil { - objectMap["apiServerAccessProfile"] = mcp.APIServerAccessProfile - } - if mcp.DiskEncryptionSetID != nil { - objectMap["diskEncryptionSetID"] = mcp.DiskEncryptionSetID - } - if mcp.IdentityProfile != nil { - objectMap["identityProfile"] = mcp.IdentityProfile - } - if mcp.PrivateLinkResources != nil { - objectMap["privateLinkResources"] = mcp.PrivateLinkResources - } - if mcp.DisableLocalAccounts != nil { - objectMap["disableLocalAccounts"] = mcp.DisableLocalAccounts - } - if mcp.HTTPProxyConfig != nil { - objectMap["httpProxyConfig"] = mcp.HTTPProxyConfig - } - if mcp.SecurityProfile != nil { - objectMap["securityProfile"] = mcp.SecurityProfile - } - if mcp.IngressProfile != nil { - objectMap["ingressProfile"] = mcp.IngressProfile - } - if mcp.PublicNetworkAccess != "" { - objectMap["publicNetworkAccess"] = mcp.PublicNetworkAccess - } - return json.Marshal(objectMap) -} - -// ManagedClusterPropertiesAutoScalerProfile parameters to be applied to the cluster-autoscaler when -// enabled -type ManagedClusterPropertiesAutoScalerProfile struct { - // BalanceSimilarNodeGroups - Valid values are 'true' and 'false' - BalanceSimilarNodeGroups *string `json:"balance-similar-node-groups,omitempty"` - // Expander - If not specified, the default is 'random'. See [expanders](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-expanders) for more information. Possible values include: 'ExpanderLeastWaste', 'ExpanderMostPods', 'ExpanderPriority', 'ExpanderRandom' - Expander Expander `json:"expander,omitempty"` - // MaxEmptyBulkDelete - The default is 10. - MaxEmptyBulkDelete *string `json:"max-empty-bulk-delete,omitempty"` - // MaxGracefulTerminationSec - The default is 600. - MaxGracefulTerminationSec *string `json:"max-graceful-termination-sec,omitempty"` - // MaxNodeProvisionTime - The default is '15m'. Values must be an integer followed by an 'm'. No unit of time other than minutes (m) is supported. - MaxNodeProvisionTime *string `json:"max-node-provision-time,omitempty"` - // MaxTotalUnreadyPercentage - The default is 45. The maximum is 100 and the minimum is 0. - MaxTotalUnreadyPercentage *string `json:"max-total-unready-percentage,omitempty"` - // NewPodScaleUpDelay - For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. The default is '0s'. Values must be an integer followed by a unit ('s' for seconds, 'm' for minutes, 'h' for hours, etc). - NewPodScaleUpDelay *string `json:"new-pod-scale-up-delay,omitempty"` - // OkTotalUnreadyCount - This must be an integer. The default is 3. - OkTotalUnreadyCount *string `json:"ok-total-unready-count,omitempty"` - // ScanInterval - The default is '10'. Values must be an integer number of seconds. - ScanInterval *string `json:"scan-interval,omitempty"` - // ScaleDownDelayAfterAdd - The default is '10m'. Values must be an integer followed by an 'm'. No unit of time other than minutes (m) is supported. - ScaleDownDelayAfterAdd *string `json:"scale-down-delay-after-add,omitempty"` - // ScaleDownDelayAfterDelete - The default is the scan-interval. Values must be an integer followed by an 'm'. No unit of time other than minutes (m) is supported. - ScaleDownDelayAfterDelete *string `json:"scale-down-delay-after-delete,omitempty"` - // ScaleDownDelayAfterFailure - The default is '3m'. Values must be an integer followed by an 'm'. No unit of time other than minutes (m) is supported. - ScaleDownDelayAfterFailure *string `json:"scale-down-delay-after-failure,omitempty"` - // ScaleDownUnneededTime - The default is '10m'. Values must be an integer followed by an 'm'. No unit of time other than minutes (m) is supported. - ScaleDownUnneededTime *string `json:"scale-down-unneeded-time,omitempty"` - // ScaleDownUnreadyTime - The default is '20m'. Values must be an integer followed by an 'm'. No unit of time other than minutes (m) is supported. - ScaleDownUnreadyTime *string `json:"scale-down-unready-time,omitempty"` - // ScaleDownUtilizationThreshold - The default is '0.5'. - ScaleDownUtilizationThreshold *string `json:"scale-down-utilization-threshold,omitempty"` - // SkipNodesWithLocalStorage - The default is true. - SkipNodesWithLocalStorage *string `json:"skip-nodes-with-local-storage,omitempty"` - // SkipNodesWithSystemPods - The default is true. - SkipNodesWithSystemPods *string `json:"skip-nodes-with-system-pods,omitempty"` -} - -// ManagedClusterPropertiesForSnapshot managed cluster properties for snapshot, these properties are read -// only. -type ManagedClusterPropertiesForSnapshot struct { - // KubernetesVersion - The current kubernetes version. - KubernetesVersion *string `json:"kubernetesVersion,omitempty"` - // Sku - The current managed cluster sku. - Sku *ManagedClusterSKU `json:"sku,omitempty"` - // EnableRbac - Whether the cluster has enabled Kubernetes Role-Based Access Control or not. - EnableRbac *bool `json:"enableRbac,omitempty"` - // NetworkProfile - The current network profile. - NetworkProfile *NetworkProfileForSnapshot `json:"networkProfile,omitempty"` -} - -// ManagedClustersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type ManagedClustersCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (ManagedCluster, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersCreateOrUpdateFuture.Result. -func (future *ManagedClustersCreateOrUpdateFuture) result(client ManagedClustersClient) (mc ManagedCluster, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - mc.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if mc.Response.Response, err = future.GetResult(sender); err == nil && mc.Response.Response.StatusCode != http.StatusNoContent { - mc, err = client.CreateOrUpdateResponder(mc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersCreateOrUpdateFuture", "Result", mc.Response.Response, "Failure responding to request") - } - } - return -} - -// ManagedClustersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type ManagedClustersDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersDeleteFuture.Result. -func (future *ManagedClustersDeleteFuture) result(client ManagedClustersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// ManagedClusterSecurityProfile security profile for the container service cluster. -type ManagedClusterSecurityProfile struct { - // AzureDefender - Azure Defender settings for the security profile. - AzureDefender *ManagedClusterSecurityProfileAzureDefender `json:"azureDefender,omitempty"` - // AzureKeyVaultKms - Azure Key Vault [key management service](https://kubernetes.io/docs/tasks/administer-cluster/kms-provider/) settings for the security profile. - AzureKeyVaultKms *AzureKeyVaultKms `json:"azureKeyVaultKms,omitempty"` - // WorkloadIdentity - [Workload Identity](https://azure.github.io/azure-workload-identity/docs/) settings for the security profile. - WorkloadIdentity *ManagedClusterSecurityProfileWorkloadIdentity `json:"workloadIdentity,omitempty"` -} - -// ManagedClusterSecurityProfileAzureDefender azure Defender settings for the security profile. -type ManagedClusterSecurityProfileAzureDefender struct { - // Enabled - Whether to enable Azure Defender - Enabled *bool `json:"enabled,omitempty"` - // LogAnalyticsWorkspaceResourceID - Resource ID of the Log Analytics workspace to be associated with Azure Defender. When Azure Defender is enabled, this field is required and must be a valid workspace resource ID. When Azure Defender is disabled, leave the field empty. - LogAnalyticsWorkspaceResourceID *string `json:"logAnalyticsWorkspaceResourceId,omitempty"` -} - -// ManagedClusterSecurityProfileWorkloadIdentity workload Identity settings for the security profile. -type ManagedClusterSecurityProfileWorkloadIdentity struct { - // Enabled - Whether to enable Workload Identity - Enabled *bool `json:"enabled,omitempty"` -} - -// ManagedClusterServicePrincipalProfile information about a service principal identity for the cluster to -// use for manipulating Azure APIs. -type ManagedClusterServicePrincipalProfile struct { - // ClientID - The ID for the service principal. - ClientID *string `json:"clientId,omitempty"` - // Secret - The secret password associated with the service principal in plain text. - Secret *string `json:"secret,omitempty"` -} - -// ManagedClusterSKU the SKU of a Managed Cluster. -type ManagedClusterSKU struct { - // Name - The name of a managed cluster SKU. Possible values include: 'ManagedClusterSKUNameBasic' - Name ManagedClusterSKUName `json:"name,omitempty"` - // Tier - If not specified, the default is 'Free'. See [uptime SLA](https://docs.microsoft.com/azure/aks/uptime-sla) for more details. Possible values include: 'ManagedClusterSKUTierPaid', 'ManagedClusterSKUTierFree' - Tier ManagedClusterSKUTier `json:"tier,omitempty"` -} - -// ManagedClusterSnapshot a managed cluster snapshot resource. -type ManagedClusterSnapshot struct { - autorest.Response `json:"-"` - // ManagedClusterSnapshotProperties - Properties of a managed cluster snapshot. - *ManagedClusterSnapshotProperties `json:"properties,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The geo-location where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` - // SystemData - READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData `json:"systemData,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterSnapshot. -func (mcs ManagedClusterSnapshot) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcs.ManagedClusterSnapshotProperties != nil { - objectMap["properties"] = mcs.ManagedClusterSnapshotProperties - } - if mcs.Tags != nil { - objectMap["tags"] = mcs.Tags - } - if mcs.Location != nil { - objectMap["location"] = mcs.Location - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ManagedClusterSnapshot struct. -func (mcs *ManagedClusterSnapshot) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var managedClusterSnapshotProperties ManagedClusterSnapshotProperties - err = json.Unmarshal(*v, &managedClusterSnapshotProperties) - if err != nil { - return err - } - mcs.ManagedClusterSnapshotProperties = &managedClusterSnapshotProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - mcs.Tags = tags - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - mcs.Location = &location - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - mcs.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mcs.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - mcs.Type = &typeVar - } - case "systemData": - if v != nil { - var systemData SystemData - err = json.Unmarshal(*v, &systemData) - if err != nil { - return err - } - mcs.SystemData = &systemData - } - } - } - - return nil -} - -// ManagedClusterSnapshotListResult the response from the List Managed Cluster Snapshots operation. -type ManagedClusterSnapshotListResult struct { - autorest.Response `json:"-"` - // Value - The list of managed cluster snapshots. - Value *[]ManagedClusterSnapshot `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of managed cluster snapshot results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterSnapshotListResult. -func (mcslr ManagedClusterSnapshotListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcslr.Value != nil { - objectMap["value"] = mcslr.Value - } - return json.Marshal(objectMap) -} - -// ManagedClusterSnapshotListResultIterator provides access to a complete listing of ManagedClusterSnapshot -// values. -type ManagedClusterSnapshotListResultIterator struct { - i int - page ManagedClusterSnapshotListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ManagedClusterSnapshotListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *ManagedClusterSnapshotListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ManagedClusterSnapshotListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ManagedClusterSnapshotListResultIterator) Response() ManagedClusterSnapshotListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ManagedClusterSnapshotListResultIterator) Value() ManagedClusterSnapshot { - if !iter.page.NotDone() { - return ManagedClusterSnapshot{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the ManagedClusterSnapshotListResultIterator type. -func NewManagedClusterSnapshotListResultIterator(page ManagedClusterSnapshotListResultPage) ManagedClusterSnapshotListResultIterator { - return ManagedClusterSnapshotListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (mcslr ManagedClusterSnapshotListResult) IsEmpty() bool { - return mcslr.Value == nil || len(*mcslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (mcslr ManagedClusterSnapshotListResult) hasNextLink() bool { - return mcslr.NextLink != nil && len(*mcslr.NextLink) != 0 -} - -// managedClusterSnapshotListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (mcslr ManagedClusterSnapshotListResult) managedClusterSnapshotListResultPreparer(ctx context.Context) (*http.Request, error) { - if !mcslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(mcslr.NextLink))) -} - -// ManagedClusterSnapshotListResultPage contains a page of ManagedClusterSnapshot values. -type ManagedClusterSnapshotListResultPage struct { - fn func(context.Context, ManagedClusterSnapshotListResult) (ManagedClusterSnapshotListResult, error) - mcslr ManagedClusterSnapshotListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ManagedClusterSnapshotListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ManagedClusterSnapshotListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.mcslr) - if err != nil { - return err - } - page.mcslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ManagedClusterSnapshotListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ManagedClusterSnapshotListResultPage) NotDone() bool { - return !page.mcslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ManagedClusterSnapshotListResultPage) Response() ManagedClusterSnapshotListResult { - return page.mcslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ManagedClusterSnapshotListResultPage) Values() []ManagedClusterSnapshot { - if page.mcslr.IsEmpty() { - return nil - } - return *page.mcslr.Value -} - -// Creates a new instance of the ManagedClusterSnapshotListResultPage type. -func NewManagedClusterSnapshotListResultPage(cur ManagedClusterSnapshotListResult, getNextPage func(context.Context, ManagedClusterSnapshotListResult) (ManagedClusterSnapshotListResult, error)) ManagedClusterSnapshotListResultPage { - return ManagedClusterSnapshotListResultPage{ - fn: getNextPage, - mcslr: cur, - } -} - -// ManagedClusterSnapshotProperties properties for a managed cluster snapshot. -type ManagedClusterSnapshotProperties struct { - // CreationData - CreationData to be used to specify the source resource ID to create this snapshot. - CreationData *CreationData `json:"creationData,omitempty"` - // SnapshotType - Possible values include: 'SnapshotTypeNodePool', 'SnapshotTypeManagedCluster' - SnapshotType SnapshotType `json:"snapshotType,omitempty"` - // ManagedClusterPropertiesReadOnly - What the properties will be showed when getting managed cluster snapshot. Those properties are read-only. - ManagedClusterPropertiesReadOnly *ManagedClusterPropertiesForSnapshot `json:"managedClusterPropertiesReadOnly,omitempty"` -} - -// ManagedClustersResetAADProfileFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type ManagedClustersResetAADProfileFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersResetAADProfileFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersResetAADProfileFuture.Result. -func (future *ManagedClustersResetAADProfileFuture) result(client ManagedClustersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersResetAADProfileFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersResetAADProfileFuture") - return - } - ar.Response = future.Response() - return -} - -// ManagedClustersResetServicePrincipalProfileFuture an abstraction for monitoring and retrieving the -// results of a long-running operation. -type ManagedClustersResetServicePrincipalProfileFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersResetServicePrincipalProfileFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersResetServicePrincipalProfileFuture.Result. -func (future *ManagedClustersResetServicePrincipalProfileFuture) result(client ManagedClustersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersResetServicePrincipalProfileFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersResetServicePrincipalProfileFuture") - return - } - ar.Response = future.Response() - return -} - -// ManagedClustersRotateClusterCertificatesFuture an abstraction for monitoring and retrieving the results -// of a long-running operation. -type ManagedClustersRotateClusterCertificatesFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersRotateClusterCertificatesFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersRotateClusterCertificatesFuture.Result. -func (future *ManagedClustersRotateClusterCertificatesFuture) result(client ManagedClustersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersRotateClusterCertificatesFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersRotateClusterCertificatesFuture") - return - } - ar.Response = future.Response() - return -} - -// ManagedClustersRotateServiceAccountSigningKeysFuture an abstraction for monitoring and retrieving the -// results of a long-running operation. -type ManagedClustersRotateServiceAccountSigningKeysFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersRotateServiceAccountSigningKeysFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersRotateServiceAccountSigningKeysFuture.Result. -func (future *ManagedClustersRotateServiceAccountSigningKeysFuture) result(client ManagedClustersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersRotateServiceAccountSigningKeysFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersRotateServiceAccountSigningKeysFuture") - return - } - ar.Response = future.Response() - return -} - -// ManagedClustersRunCommandFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type ManagedClustersRunCommandFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (RunCommandResult, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersRunCommandFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersRunCommandFuture.Result. -func (future *ManagedClustersRunCommandFuture) result(client ManagedClustersClient) (rcr RunCommandResult, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersRunCommandFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - rcr.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersRunCommandFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if rcr.Response.Response, err = future.GetResult(sender); err == nil && rcr.Response.Response.StatusCode != http.StatusNoContent { - rcr, err = client.RunCommandResponder(rcr.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersRunCommandFuture", "Result", rcr.Response.Response, "Failure responding to request") - } - } - return -} - -// ManagedClustersStartFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type ManagedClustersStartFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersStartFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersStartFuture.Result. -func (future *ManagedClustersStartFuture) result(client ManagedClustersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersStartFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersStartFuture") - return - } - ar.Response = future.Response() - return -} - -// ManagedClustersStopFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type ManagedClustersStopFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersStopFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersStopFuture.Result. -func (future *ManagedClustersStopFuture) result(client ManagedClustersClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersStopFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersStopFuture") - return - } - ar.Response = future.Response() - return -} - -// ManagedClusterStorageProfile storage profile for the container service cluster. -type ManagedClusterStorageProfile struct { - // DiskCSIDriver - AzureDisk CSI Driver settings for the storage profile. - DiskCSIDriver *ManagedClusterStorageProfileDiskCSIDriver `json:"diskCSIDriver,omitempty"` - // FileCSIDriver - AzureFile CSI Driver settings for the storage profile. - FileCSIDriver *ManagedClusterStorageProfileFileCSIDriver `json:"fileCSIDriver,omitempty"` - // SnapshotController - Snapshot Controller settings for the storage profile. - SnapshotController *ManagedClusterStorageProfileSnapshotController `json:"snapshotController,omitempty"` -} - -// ManagedClusterStorageProfileDiskCSIDriver azureDisk CSI Driver settings for the storage profile. -type ManagedClusterStorageProfileDiskCSIDriver struct { - // Enabled - Whether to enable AzureDisk CSI Driver. The default value is true. - Enabled *bool `json:"enabled,omitempty"` - // Version - The version of AzureDisk CSI Driver. The default value is v1. - Version *string `json:"version,omitempty"` -} - -// ManagedClusterStorageProfileFileCSIDriver azureFile CSI Driver settings for the storage profile. -type ManagedClusterStorageProfileFileCSIDriver struct { - // Enabled - Whether to enable AzureFile CSI Driver. The default value is true. - Enabled *bool `json:"enabled,omitempty"` -} - -// ManagedClusterStorageProfileSnapshotController snapshot Controller settings for the storage profile. -type ManagedClusterStorageProfileSnapshotController struct { - // Enabled - Whether to enable Snapshot Controller. The default value is true. - Enabled *bool `json:"enabled,omitempty"` -} - -// ManagedClustersUpdateTagsFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type ManagedClustersUpdateTagsFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(ManagedClustersClient) (ManagedCluster, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *ManagedClustersUpdateTagsFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for ManagedClustersUpdateTagsFuture.Result. -func (future *ManagedClustersUpdateTagsFuture) result(client ManagedClustersClient) (mc ManagedCluster, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - mc.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.ManagedClustersUpdateTagsFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if mc.Response.Response, err = future.GetResult(sender); err == nil && mc.Response.Response.StatusCode != http.StatusNoContent { - mc, err = client.UpdateTagsResponder(mc.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ManagedClustersUpdateTagsFuture", "Result", mc.Response.Response, "Failure responding to request") - } - } - return -} - -// ManagedClusterUpgradeProfile the list of available upgrades for compute pools. -type ManagedClusterUpgradeProfile struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The ID of the upgrade profile. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the upgrade profile. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the upgrade profile. - Type *string `json:"type,omitempty"` - // ManagedClusterUpgradeProfileProperties - The properties of the upgrade profile. - *ManagedClusterUpgradeProfileProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagedClusterUpgradeProfile. -func (mcup ManagedClusterUpgradeProfile) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mcup.ManagedClusterUpgradeProfileProperties != nil { - objectMap["properties"] = mcup.ManagedClusterUpgradeProfileProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ManagedClusterUpgradeProfile struct. -func (mcup *ManagedClusterUpgradeProfile) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - mcup.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mcup.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - mcup.Type = &typeVar - } - case "properties": - if v != nil { - var managedClusterUpgradeProfileProperties ManagedClusterUpgradeProfileProperties - err = json.Unmarshal(*v, &managedClusterUpgradeProfileProperties) - if err != nil { - return err - } - mcup.ManagedClusterUpgradeProfileProperties = &managedClusterUpgradeProfileProperties - } - } - } - - return nil -} - -// ManagedClusterUpgradeProfileProperties control plane and agent pool upgrade profiles. -type ManagedClusterUpgradeProfileProperties struct { - // ControlPlaneProfile - The list of available upgrade versions for the control plane. - ControlPlaneProfile *ManagedClusterPoolUpgradeProfile `json:"controlPlaneProfile,omitempty"` - // AgentPoolProfiles - The list of available upgrade versions for agent pools. - AgentPoolProfiles *[]ManagedClusterPoolUpgradeProfile `json:"agentPoolProfiles,omitempty"` -} - -// ManagedClusterWindowsProfile profile for Windows VMs in the managed cluster. -type ManagedClusterWindowsProfile struct { - // AdminUsername - Specifies the name of the administrator account.

**Restriction:** Cannot end in "."

**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

**Minimum-length:** 1 character

**Max-length:** 20 characters - AdminUsername *string `json:"adminUsername,omitempty"` - // AdminPassword - Specifies the password of the administrator account.

**Minimum-length:** 8 characters

**Max-length:** 123 characters

**Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
Has lower characters
Has upper characters
Has a digit
Has a special character (Regex match [\W_])

**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!" - AdminPassword *string `json:"adminPassword,omitempty"` - // LicenseType - The license type to use for Windows VMs. See [Azure Hybrid User Benefits](https://azure.microsoft.com/pricing/hybrid-benefit/faq/) for more details. Possible values include: 'LicenseTypeNone', 'LicenseTypeWindowsServer' - LicenseType LicenseType `json:"licenseType,omitempty"` - // EnableCSIProxy - For more details on CSI proxy, see the [CSI proxy GitHub repo](https://github.com/kubernetes-csi/csi-proxy). - EnableCSIProxy *bool `json:"enableCSIProxy,omitempty"` - // GmsaProfile - The Windows gMSA Profile in the Managed Cluster. - GmsaProfile *WindowsGmsaProfile `json:"gmsaProfile,omitempty"` -} - -// MasterProfile profile for the container service master. -type MasterProfile struct { - // Count - Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1. - Count *int32 `json:"count,omitempty"` - // DNSPrefix - DNS prefix to be used to create the FQDN for the master pool. - DNSPrefix *string `json:"dnsPrefix,omitempty"` - // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6' - VMSize VMSizeTypes `json:"vmSize,omitempty"` - // OsDiskSizeGB - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified. - OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` - // VnetSubnetID - VNet SubnetID specifies the VNet's subnet identifier. - VnetSubnetID *string `json:"vnetSubnetID,omitempty"` - // FirstConsecutiveStaticIP - FirstConsecutiveStaticIP used to specify the first static ip of masters. - FirstConsecutiveStaticIP *string `json:"firstConsecutiveStaticIP,omitempty"` - // StorageProfile - Storage profile specifies what kind of storage used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the orchestrator choice. Possible values include: 'StorageProfileTypesStorageAccount', 'StorageProfileTypesManagedDisks' - StorageProfile StorageProfileTypes `json:"storageProfile,omitempty"` - // Fqdn - READ-ONLY; FQDN for the master pool. - Fqdn *string `json:"fqdn,omitempty"` -} - -// MarshalJSON is the custom marshaler for MasterProfile. -func (mp MasterProfile) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mp.Count != nil { - objectMap["count"] = mp.Count - } - if mp.DNSPrefix != nil { - objectMap["dnsPrefix"] = mp.DNSPrefix - } - if mp.VMSize != "" { - objectMap["vmSize"] = mp.VMSize - } - if mp.OsDiskSizeGB != nil { - objectMap["osDiskSizeGB"] = mp.OsDiskSizeGB - } - if mp.VnetSubnetID != nil { - objectMap["vnetSubnetID"] = mp.VnetSubnetID - } - if mp.FirstConsecutiveStaticIP != nil { - objectMap["firstConsecutiveStaticIP"] = mp.FirstConsecutiveStaticIP - } - if mp.StorageProfile != "" { - objectMap["storageProfile"] = mp.StorageProfile - } - return json.Marshal(objectMap) -} - -// NetworkProfile profile of network configuration. -type NetworkProfile struct { - // NetworkPlugin - Network plugin used for building the Kubernetes network. Possible values include: 'NetworkPluginAzure', 'NetworkPluginKubenet', 'NetworkPluginNone' - NetworkPlugin NetworkPlugin `json:"networkPlugin,omitempty"` - // NetworkPolicy - Network policy used for building the Kubernetes network. Possible values include: 'NetworkPolicyCalico', 'NetworkPolicyAzure' - NetworkPolicy NetworkPolicy `json:"networkPolicy,omitempty"` - // NetworkMode - This cannot be specified if networkPlugin is anything other than 'azure'. Possible values include: 'NetworkModeTransparent', 'NetworkModeBridge' - NetworkMode NetworkMode `json:"networkMode,omitempty"` - // PodCidr - A CIDR notation IP range from which to assign pod IPs when kubenet is used. - PodCidr *string `json:"podCidr,omitempty"` - // ServiceCidr - A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges. - ServiceCidr *string `json:"serviceCidr,omitempty"` - // DNSServiceIP - An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr. - DNSServiceIP *string `json:"dnsServiceIP,omitempty"` - // DockerBridgeCidr - A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range. - DockerBridgeCidr *string `json:"dockerBridgeCidr,omitempty"` - // OutboundType - This can only be set at cluster creation time and cannot be changed later. For more information see [egress outbound type](https://docs.microsoft.com/azure/aks/egress-outboundtype). Possible values include: 'OutboundTypeLoadBalancer', 'OutboundTypeUserDefinedRouting', 'OutboundTypeManagedNATGateway', 'OutboundTypeUserAssignedNATGateway' - OutboundType OutboundType `json:"outboundType,omitempty"` - // LoadBalancerSku - The default is 'standard'. See [Azure Load Balancer SKUs](https://docs.microsoft.com/azure/load-balancer/skus) for more information about the differences between load balancer SKUs. Possible values include: 'LoadBalancerSkuStandard', 'LoadBalancerSkuBasic' - LoadBalancerSku LoadBalancerSku `json:"loadBalancerSku,omitempty"` - // LoadBalancerProfile - Profile of the cluster load balancer. - LoadBalancerProfile *ManagedClusterLoadBalancerProfile `json:"loadBalancerProfile,omitempty"` - // NatGatewayProfile - Profile of the cluster NAT gateway. - NatGatewayProfile *ManagedClusterNATGatewayProfile `json:"natGatewayProfile,omitempty"` - // PodCidrs - One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for each IP family (IPv4/IPv6), is expected for dual-stack networking. - PodCidrs *[]string `json:"podCidrs,omitempty"` - // ServiceCidrs - One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for each IP family (IPv4/IPv6), is expected for dual-stack networking. They must not overlap with any Subnet IP ranges. - ServiceCidrs *[]string `json:"serviceCidrs,omitempty"` - // IPFamilies - IP families are used to determine single-stack or dual-stack clusters. For single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6. - IPFamilies *[]IPFamily `json:"ipFamilies,omitempty"` -} - -// NetworkProfileForSnapshot network profile for managed cluster snapshot, these properties are read only. -type NetworkProfileForSnapshot struct { - // NetworkPlugin - networkPlugin for managed cluster snapshot. Possible values include: 'NetworkPluginAzure', 'NetworkPluginKubenet', 'NetworkPluginNone' - NetworkPlugin NetworkPlugin `json:"networkPlugin,omitempty"` - // NetworkPolicy - networkPolicy for managed cluster snapshot. Possible values include: 'NetworkPolicyCalico', 'NetworkPolicyAzure' - NetworkPolicy NetworkPolicy `json:"networkPolicy,omitempty"` - // NetworkMode - networkMode for managed cluster snapshot. Possible values include: 'NetworkModeTransparent', 'NetworkModeBridge' - NetworkMode NetworkMode `json:"networkMode,omitempty"` - // LoadBalancerSku - loadBalancerSku for managed cluster snapshot. Possible values include: 'LoadBalancerSkuStandard', 'LoadBalancerSkuBasic' - LoadBalancerSku LoadBalancerSku `json:"loadBalancerSku,omitempty"` -} - -// OperationListResult the List Operation response. -type OperationListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The list of operations - Value *[]OperationValue `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationListResult. -func (olr OperationListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// OperationValue describes the properties of a Operation value. -type OperationValue struct { - // Origin - READ-ONLY; The origin of the operation. - Origin *string `json:"origin,omitempty"` - // Name - READ-ONLY; The name of the operation. - Name *string `json:"name,omitempty"` - // OperationValueDisplay - Describes the properties of a Operation Value Display. - *OperationValueDisplay `json:"display,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationValue. -func (ov OperationValue) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ov.OperationValueDisplay != nil { - objectMap["display"] = ov.OperationValueDisplay - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for OperationValue struct. -func (ov *OperationValue) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "origin": - if v != nil { - var origin string - err = json.Unmarshal(*v, &origin) - if err != nil { - return err - } - ov.Origin = &origin - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ov.Name = &name - } - case "display": - if v != nil { - var operationValueDisplay OperationValueDisplay - err = json.Unmarshal(*v, &operationValueDisplay) - if err != nil { - return err - } - ov.OperationValueDisplay = &operationValueDisplay - } - } - } - - return nil -} - -// OperationValueDisplay describes the properties of a Operation Value Display. -type OperationValueDisplay struct { - // Operation - READ-ONLY; The display name of the operation. - Operation *string `json:"operation,omitempty"` - // Resource - READ-ONLY; The display name of the resource the operation applies to. - Resource *string `json:"resource,omitempty"` - // Description - READ-ONLY; The description of the operation. - Description *string `json:"description,omitempty"` - // Provider - READ-ONLY; The resource provider for the operation. - Provider *string `json:"provider,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationValueDisplay. -func (ovd OperationValueDisplay) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// OSOptionProfile the OS option profile. -type OSOptionProfile struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The ID of the OS option resource. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the OS option resource. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the OS option resource. - Type *string `json:"type,omitempty"` - // OSOptionPropertyList - The list of OS options. - *OSOptionPropertyList `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for OSOptionProfile. -func (oop OSOptionProfile) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if oop.OSOptionPropertyList != nil { - objectMap["properties"] = oop.OSOptionPropertyList - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for OSOptionProfile struct. -func (oop *OSOptionProfile) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - oop.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - oop.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - oop.Type = &typeVar - } - case "properties": - if v != nil { - var oSOptionPropertyList OSOptionPropertyList - err = json.Unmarshal(*v, &oSOptionPropertyList) - if err != nil { - return err - } - oop.OSOptionPropertyList = &oSOptionPropertyList - } - } - } - - return nil -} - -// OSOptionProperty OS option property. -type OSOptionProperty struct { - // OsType - The OS type. - OsType *string `json:"os-type,omitempty"` - // EnableFipsImage - Whether the image is FIPS-enabled. - EnableFipsImage *bool `json:"enable-fips-image,omitempty"` -} - -// OSOptionPropertyList the list of OS option properties. -type OSOptionPropertyList struct { - // OsOptionPropertyList - The list of OS options. - OsOptionPropertyList *[]OSOptionProperty `json:"osOptionPropertyList,omitempty"` -} - -// OutboundEnvironmentEndpoint egress endpoints which AKS agent nodes connect to for common purpose. -type OutboundEnvironmentEndpoint struct { - // Category - The category of endpoints accessed by the AKS agent node, e.g. azure-resource-management, apiserver, etc. - Category *string `json:"category,omitempty"` - // Endpoints - The endpoints that AKS agent nodes connect to - Endpoints *[]EndpointDependency `json:"endpoints,omitempty"` -} - -// OutboundEnvironmentEndpointCollection collection of OutboundEnvironmentEndpoint -type OutboundEnvironmentEndpointCollection struct { - autorest.Response `json:"-"` - // Value - Collection of resources. - Value *[]OutboundEnvironmentEndpoint `json:"value,omitempty"` - // NextLink - READ-ONLY; Link to next page of resources. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for OutboundEnvironmentEndpointCollection. -func (oeec OutboundEnvironmentEndpointCollection) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if oeec.Value != nil { - objectMap["value"] = oeec.Value - } - return json.Marshal(objectMap) -} - -// OutboundEnvironmentEndpointCollectionIterator provides access to a complete listing of -// OutboundEnvironmentEndpoint values. -type OutboundEnvironmentEndpointCollectionIterator struct { - i int - page OutboundEnvironmentEndpointCollectionPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *OutboundEnvironmentEndpointCollectionIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OutboundEnvironmentEndpointCollectionIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *OutboundEnvironmentEndpointCollectionIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter OutboundEnvironmentEndpointCollectionIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter OutboundEnvironmentEndpointCollectionIterator) Response() OutboundEnvironmentEndpointCollection { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter OutboundEnvironmentEndpointCollectionIterator) Value() OutboundEnvironmentEndpoint { - if !iter.page.NotDone() { - return OutboundEnvironmentEndpoint{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the OutboundEnvironmentEndpointCollectionIterator type. -func NewOutboundEnvironmentEndpointCollectionIterator(page OutboundEnvironmentEndpointCollectionPage) OutboundEnvironmentEndpointCollectionIterator { - return OutboundEnvironmentEndpointCollectionIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (oeec OutboundEnvironmentEndpointCollection) IsEmpty() bool { - return oeec.Value == nil || len(*oeec.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (oeec OutboundEnvironmentEndpointCollection) hasNextLink() bool { - return oeec.NextLink != nil && len(*oeec.NextLink) != 0 -} - -// outboundEnvironmentEndpointCollectionPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (oeec OutboundEnvironmentEndpointCollection) outboundEnvironmentEndpointCollectionPreparer(ctx context.Context) (*http.Request, error) { - if !oeec.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(oeec.NextLink))) -} - -// OutboundEnvironmentEndpointCollectionPage contains a page of OutboundEnvironmentEndpoint values. -type OutboundEnvironmentEndpointCollectionPage struct { - fn func(context.Context, OutboundEnvironmentEndpointCollection) (OutboundEnvironmentEndpointCollection, error) - oeec OutboundEnvironmentEndpointCollection -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *OutboundEnvironmentEndpointCollectionPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OutboundEnvironmentEndpointCollectionPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.oeec) - if err != nil { - return err - } - page.oeec = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *OutboundEnvironmentEndpointCollectionPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page OutboundEnvironmentEndpointCollectionPage) NotDone() bool { - return !page.oeec.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page OutboundEnvironmentEndpointCollectionPage) Response() OutboundEnvironmentEndpointCollection { - return page.oeec -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page OutboundEnvironmentEndpointCollectionPage) Values() []OutboundEnvironmentEndpoint { - if page.oeec.IsEmpty() { - return nil - } - return *page.oeec.Value -} - -// Creates a new instance of the OutboundEnvironmentEndpointCollectionPage type. -func NewOutboundEnvironmentEndpointCollectionPage(cur OutboundEnvironmentEndpointCollection, getNextPage func(context.Context, OutboundEnvironmentEndpointCollection) (OutboundEnvironmentEndpointCollection, error)) OutboundEnvironmentEndpointCollectionPage { - return OutboundEnvironmentEndpointCollectionPage{ - fn: getNextPage, - oeec: cur, - } -} - -// PowerState describes the Power State of the cluster -type PowerState struct { - // Code - Tells whether the cluster is Running or Stopped. Possible values include: 'CodeRunning', 'CodeStopped' - Code Code `json:"code,omitempty"` -} - -// PrivateEndpoint private endpoint which a connection belongs to. -type PrivateEndpoint struct { - // ID - The resource ID of the private endpoint - ID *string `json:"id,omitempty"` -} - -// PrivateEndpointConnection a private endpoint connection -type PrivateEndpointConnection struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The ID of the private endpoint connection. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the private endpoint connection. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The resource type. - Type *string `json:"type,omitempty"` - // PrivateEndpointConnectionProperties - The properties of a private endpoint connection. - *PrivateEndpointConnectionProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateEndpointConnection. -func (pec PrivateEndpointConnection) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if pec.PrivateEndpointConnectionProperties != nil { - objectMap["properties"] = pec.PrivateEndpointConnectionProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for PrivateEndpointConnection struct. -func (pec *PrivateEndpointConnection) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - pec.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - pec.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - pec.Type = &typeVar - } - case "properties": - if v != nil { - var privateEndpointConnectionProperties PrivateEndpointConnectionProperties - err = json.Unmarshal(*v, &privateEndpointConnectionProperties) - if err != nil { - return err - } - pec.PrivateEndpointConnectionProperties = &privateEndpointConnectionProperties - } - } - } - - return nil -} - -// PrivateEndpointConnectionListResult a list of private endpoint connections -type PrivateEndpointConnectionListResult struct { - autorest.Response `json:"-"` - // Value - The collection value. - Value *[]PrivateEndpointConnection `json:"value,omitempty"` -} - -// PrivateEndpointConnectionProperties properties of a private endpoint connection. -type PrivateEndpointConnectionProperties struct { - // ProvisioningState - READ-ONLY; The current provisioning state. Possible values include: 'PrivateEndpointConnectionProvisioningStateSucceeded', 'PrivateEndpointConnectionProvisioningStateCreating', 'PrivateEndpointConnectionProvisioningStateDeleting', 'PrivateEndpointConnectionProvisioningStateFailed' - ProvisioningState PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"` - // PrivateEndpoint - The resource of private endpoint. - PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` - // PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer and provider. - PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateEndpointConnectionProperties. -func (pecp PrivateEndpointConnectionProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if pecp.PrivateEndpoint != nil { - objectMap["privateEndpoint"] = pecp.PrivateEndpoint - } - if pecp.PrivateLinkServiceConnectionState != nil { - objectMap["privateLinkServiceConnectionState"] = pecp.PrivateLinkServiceConnectionState - } - return json.Marshal(objectMap) -} - -// PrivateEndpointConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type PrivateEndpointConnectionsDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(PrivateEndpointConnectionsClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *PrivateEndpointConnectionsDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for PrivateEndpointConnectionsDeleteFuture.Result. -func (future *PrivateEndpointConnectionsDeleteFuture) result(client PrivateEndpointConnectionsClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("containerservice.PrivateEndpointConnectionsDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// PrivateLinkResource a private link resource -type PrivateLinkResource struct { - autorest.Response `json:"-"` - // ID - The ID of the private link resource. - ID *string `json:"id,omitempty"` - // Name - The name of the private link resource. - Name *string `json:"name,omitempty"` - // Type - The resource type. - Type *string `json:"type,omitempty"` - // GroupID - The group ID of the resource. - GroupID *string `json:"groupId,omitempty"` - // RequiredMembers - The RequiredMembers of the resource - RequiredMembers *[]string `json:"requiredMembers,omitempty"` - // PrivateLinkServiceID - READ-ONLY; The private link service ID of the resource, this field is exposed only to NRP internally. - PrivateLinkServiceID *string `json:"privateLinkServiceID,omitempty"` -} - -// MarshalJSON is the custom marshaler for PrivateLinkResource. -func (plr PrivateLinkResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if plr.ID != nil { - objectMap["id"] = plr.ID - } - if plr.Name != nil { - objectMap["name"] = plr.Name - } - if plr.Type != nil { - objectMap["type"] = plr.Type - } - if plr.GroupID != nil { - objectMap["groupId"] = plr.GroupID - } - if plr.RequiredMembers != nil { - objectMap["requiredMembers"] = plr.RequiredMembers - } - return json.Marshal(objectMap) -} - -// PrivateLinkResourcesListResult a list of private link resources -type PrivateLinkResourcesListResult struct { - autorest.Response `json:"-"` - // Value - The collection value. - Value *[]PrivateLinkResource `json:"value,omitempty"` -} - -// PrivateLinkServiceConnectionState the state of a private link service connection. -type PrivateLinkServiceConnectionState struct { - // Status - The private link service connection status. Possible values include: 'ConnectionStatusPending', 'ConnectionStatusApproved', 'ConnectionStatusRejected', 'ConnectionStatusDisconnected' - Status ConnectionStatus `json:"status,omitempty"` - // Description - The private link service connection description. - Description *string `json:"description,omitempty"` -} - -// ProxyResource the resource model definition for a Azure Resource Manager proxy resource. It will not -// have tags and a location -type ProxyResource struct { - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` - // SystemData - READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData `json:"systemData,omitempty"` -} - -// MarshalJSON is the custom marshaler for ProxyResource. -func (pr ProxyResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Resource common fields that are returned in the response for all Azure Resource Manager resources -type Resource struct { - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` - // SystemData - READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData `json:"systemData,omitempty"` -} - -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ResourceReference a reference to an Azure resource. -type ResourceReference struct { - // ID - The fully qualified Azure resource id. - ID *string `json:"id,omitempty"` -} - -// RunCommandRequest a run command request -type RunCommandRequest struct { - // Command - The command to run. - Command *string `json:"command,omitempty"` - // Context - A base64 encoded zip file containing the files required by the command. - Context *string `json:"context,omitempty"` - // ClusterToken - AuthToken issued for AKS AAD Server App. - ClusterToken *string `json:"clusterToken,omitempty"` -} - -// RunCommandResult run command result. -type RunCommandResult struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The command id. - ID *string `json:"id,omitempty"` - // CommandResultProperties - Properties of command result. - *CommandResultProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for RunCommandResult. -func (rcr RunCommandResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if rcr.CommandResultProperties != nil { - objectMap["properties"] = rcr.CommandResultProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for RunCommandResult struct. -func (rcr *RunCommandResult) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - rcr.ID = &ID - } - case "properties": - if v != nil { - var commandResultProperties CommandResultProperties - err = json.Unmarshal(*v, &commandResultProperties) - if err != nil { - return err - } - rcr.CommandResultProperties = &commandResultProperties - } - } - } - - return nil -} - -// Snapshot a node pool snapshot resource. -type Snapshot struct { - autorest.Response `json:"-"` - // SnapshotProperties - Properties of a snapshot. - *SnapshotProperties `json:"properties,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The geo-location where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` - // SystemData - READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData `json:"systemData,omitempty"` -} - -// MarshalJSON is the custom marshaler for Snapshot. -func (s Snapshot) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if s.SnapshotProperties != nil { - objectMap["properties"] = s.SnapshotProperties - } - if s.Tags != nil { - objectMap["tags"] = s.Tags - } - if s.Location != nil { - objectMap["location"] = s.Location - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Snapshot struct. -func (s *Snapshot) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var snapshotProperties SnapshotProperties - err = json.Unmarshal(*v, &snapshotProperties) - if err != nil { - return err - } - s.SnapshotProperties = &snapshotProperties - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - s.Tags = tags - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - s.Location = &location - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - s.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - s.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - s.Type = &typeVar - } - case "systemData": - if v != nil { - var systemData SystemData - err = json.Unmarshal(*v, &systemData) - if err != nil { - return err - } - s.SystemData = &systemData - } - } - } - - return nil -} - -// SnapshotListResult the response from the List Snapshots operation. -type SnapshotListResult struct { - autorest.Response `json:"-"` - // Value - The list of snapshots. - Value *[]Snapshot `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of snapshot results. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for SnapshotListResult. -func (slr SnapshotListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if slr.Value != nil { - objectMap["value"] = slr.Value - } - return json.Marshal(objectMap) -} - -// SnapshotListResultIterator provides access to a complete listing of Snapshot values. -type SnapshotListResultIterator struct { - i int - page SnapshotListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SnapshotListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SnapshotListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SnapshotListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SnapshotListResultIterator) Response() SnapshotListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SnapshotListResultIterator) Value() Snapshot { - if !iter.page.NotDone() { - return Snapshot{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SnapshotListResultIterator type. -func NewSnapshotListResultIterator(page SnapshotListResultPage) SnapshotListResultIterator { - return SnapshotListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (slr SnapshotListResult) IsEmpty() bool { - return slr.Value == nil || len(*slr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (slr SnapshotListResult) hasNextLink() bool { - return slr.NextLink != nil && len(*slr.NextLink) != 0 -} - -// snapshotListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (slr SnapshotListResult) snapshotListResultPreparer(ctx context.Context) (*http.Request, error) { - if !slr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(slr.NextLink))) -} - -// SnapshotListResultPage contains a page of Snapshot values. -type SnapshotListResultPage struct { - fn func(context.Context, SnapshotListResult) (SnapshotListResult, error) - slr SnapshotListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SnapshotListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.slr) - if err != nil { - return err - } - page.slr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SnapshotListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SnapshotListResultPage) NotDone() bool { - return !page.slr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SnapshotListResultPage) Response() SnapshotListResult { - return page.slr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SnapshotListResultPage) Values() []Snapshot { - if page.slr.IsEmpty() { - return nil - } - return *page.slr.Value -} - -// Creates a new instance of the SnapshotListResultPage type. -func NewSnapshotListResultPage(cur SnapshotListResult, getNextPage func(context.Context, SnapshotListResult) (SnapshotListResult, error)) SnapshotListResultPage { - return SnapshotListResultPage{ - fn: getNextPage, - slr: cur, - } -} - -// SnapshotProperties properties used to configure a node pool snapshot. -type SnapshotProperties struct { - // CreationData - CreationData to be used to specify the source agent pool resource ID to create this snapshot. - CreationData *CreationData `json:"creationData,omitempty"` - // SnapshotType - Possible values include: 'SnapshotTypeNodePool', 'SnapshotTypeManagedCluster' - SnapshotType SnapshotType `json:"snapshotType,omitempty"` - // KubernetesVersion - READ-ONLY; The version of Kubernetes. - KubernetesVersion *string `json:"kubernetesVersion,omitempty"` - // NodeImageVersion - READ-ONLY; The version of node image. - NodeImageVersion *string `json:"nodeImageVersion,omitempty"` - // OsType - READ-ONLY; Possible values include: 'OSTypeLinux', 'OSTypeWindows' - OsType OSType `json:"osType,omitempty"` - // OsSku - READ-ONLY; Possible values include: 'OSSKUUbuntu', 'OSSKUCBLMariner' - OsSku OSSKU `json:"osSku,omitempty"` - // VMSize - READ-ONLY; The size of the VM. - VMSize *string `json:"vmSize,omitempty"` - // EnableFIPS - READ-ONLY; Whether to use a FIPS-enabled OS. - EnableFIPS *bool `json:"enableFIPS,omitempty"` -} - -// MarshalJSON is the custom marshaler for SnapshotProperties. -func (sp SnapshotProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sp.CreationData != nil { - objectMap["creationData"] = sp.CreationData - } - if sp.SnapshotType != "" { - objectMap["snapshotType"] = sp.SnapshotType - } - return json.Marshal(objectMap) -} - -// SSHConfiguration SSH configuration for Linux-based VMs running on Azure. -type SSHConfiguration struct { - // PublicKeys - The list of SSH public keys used to authenticate with Linux-based VMs. A maximum of 1 key may be specified. - PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"` -} - -// SSHPublicKey contains information about SSH certificate public key data. -type SSHPublicKey struct { - // KeyData - Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers. - KeyData *string `json:"keyData,omitempty"` -} - -// SubResource reference to another subresource. -type SubResource struct { - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource that is unique within a resource group. This name can be used to access the resource. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for SubResource. -func (sr SubResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SysctlConfig sysctl settings for Linux agent nodes. -type SysctlConfig struct { - // NetCoreSomaxconn - Sysctl setting net.core.somaxconn. - NetCoreSomaxconn *int32 `json:"netCoreSomaxconn,omitempty"` - // NetCoreNetdevMaxBacklog - Sysctl setting net.core.netdev_max_backlog. - NetCoreNetdevMaxBacklog *int32 `json:"netCoreNetdevMaxBacklog,omitempty"` - // NetCoreRmemDefault - Sysctl setting net.core.rmem_default. - NetCoreRmemDefault *int32 `json:"netCoreRmemDefault,omitempty"` - // NetCoreRmemMax - Sysctl setting net.core.rmem_max. - NetCoreRmemMax *int32 `json:"netCoreRmemMax,omitempty"` - // NetCoreWmemDefault - Sysctl setting net.core.wmem_default. - NetCoreWmemDefault *int32 `json:"netCoreWmemDefault,omitempty"` - // NetCoreWmemMax - Sysctl setting net.core.wmem_max. - NetCoreWmemMax *int32 `json:"netCoreWmemMax,omitempty"` - // NetCoreOptmemMax - Sysctl setting net.core.optmem_max. - NetCoreOptmemMax *int32 `json:"netCoreOptmemMax,omitempty"` - // NetIpv4TCPMaxSynBacklog - Sysctl setting net.ipv4.tcp_max_syn_backlog. - NetIpv4TCPMaxSynBacklog *int32 `json:"netIpv4TcpMaxSynBacklog,omitempty"` - // NetIpv4TCPMaxTwBuckets - Sysctl setting net.ipv4.tcp_max_tw_buckets. - NetIpv4TCPMaxTwBuckets *int32 `json:"netIpv4TcpMaxTwBuckets,omitempty"` - // NetIpv4TCPFinTimeout - Sysctl setting net.ipv4.tcp_fin_timeout. - NetIpv4TCPFinTimeout *int32 `json:"netIpv4TcpFinTimeout,omitempty"` - // NetIpv4TCPKeepaliveTime - Sysctl setting net.ipv4.tcp_keepalive_time. - NetIpv4TCPKeepaliveTime *int32 `json:"netIpv4TcpKeepaliveTime,omitempty"` - // NetIpv4TCPKeepaliveProbes - Sysctl setting net.ipv4.tcp_keepalive_probes. - NetIpv4TCPKeepaliveProbes *int32 `json:"netIpv4TcpKeepaliveProbes,omitempty"` - // NetIpv4TcpkeepaliveIntvl - Sysctl setting net.ipv4.tcp_keepalive_intvl. - NetIpv4TcpkeepaliveIntvl *int32 `json:"netIpv4TcpkeepaliveIntvl,omitempty"` - // NetIpv4TCPTwReuse - Sysctl setting net.ipv4.tcp_tw_reuse. - NetIpv4TCPTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty"` - // NetIpv4IPLocalPortRange - Sysctl setting net.ipv4.ip_local_port_range. - NetIpv4IPLocalPortRange *string `json:"netIpv4IpLocalPortRange,omitempty"` - // NetIpv4NeighDefaultGcThresh1 - Sysctl setting net.ipv4.neigh.default.gc_thresh1. - NetIpv4NeighDefaultGcThresh1 *int32 `json:"netIpv4NeighDefaultGcThresh1,omitempty"` - // NetIpv4NeighDefaultGcThresh2 - Sysctl setting net.ipv4.neigh.default.gc_thresh2. - NetIpv4NeighDefaultGcThresh2 *int32 `json:"netIpv4NeighDefaultGcThresh2,omitempty"` - // NetIpv4NeighDefaultGcThresh3 - Sysctl setting net.ipv4.neigh.default.gc_thresh3. - NetIpv4NeighDefaultGcThresh3 *int32 `json:"netIpv4NeighDefaultGcThresh3,omitempty"` - // NetNetfilterNfConntrackMax - Sysctl setting net.netfilter.nf_conntrack_max. - NetNetfilterNfConntrackMax *int32 `json:"netNetfilterNfConntrackMax,omitempty"` - // NetNetfilterNfConntrackBuckets - Sysctl setting net.netfilter.nf_conntrack_buckets. - NetNetfilterNfConntrackBuckets *int32 `json:"netNetfilterNfConntrackBuckets,omitempty"` - // FsInotifyMaxUserWatches - Sysctl setting fs.inotify.max_user_watches. - FsInotifyMaxUserWatches *int32 `json:"fsInotifyMaxUserWatches,omitempty"` - // FsFileMax - Sysctl setting fs.file-max. - FsFileMax *int32 `json:"fsFileMax,omitempty"` - // FsAioMaxNr - Sysctl setting fs.aio-max-nr. - FsAioMaxNr *int32 `json:"fsAioMaxNr,omitempty"` - // FsNrOpen - Sysctl setting fs.nr_open. - FsNrOpen *int32 `json:"fsNrOpen,omitempty"` - // KernelThreadsMax - Sysctl setting kernel.threads-max. - KernelThreadsMax *int32 `json:"kernelThreadsMax,omitempty"` - // VMMaxMapCount - Sysctl setting vm.max_map_count. - VMMaxMapCount *int32 `json:"vmMaxMapCount,omitempty"` - // VMSwappiness - Sysctl setting vm.swappiness. - VMSwappiness *int32 `json:"vmSwappiness,omitempty"` - // VMVfsCachePressure - Sysctl setting vm.vfs_cache_pressure. - VMVfsCachePressure *int32 `json:"vmVfsCachePressure,omitempty"` -} - -// SystemData metadata pertaining to creation and last modification of the resource. -type SystemData struct { - // CreatedBy - The identity that created the resource. - CreatedBy *string `json:"createdBy,omitempty"` - // CreatedByType - The type of identity that created the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey' - CreatedByType CreatedByType `json:"createdByType,omitempty"` - // CreatedAt - The timestamp of resource creation (UTC). - CreatedAt *date.Time `json:"createdAt,omitempty"` - // LastModifiedBy - The identity that last modified the resource. - LastModifiedBy *string `json:"lastModifiedBy,omitempty"` - // LastModifiedByType - The type of identity that last modified the resource. Possible values include: 'CreatedByTypeUser', 'CreatedByTypeApplication', 'CreatedByTypeManagedIdentity', 'CreatedByTypeKey' - LastModifiedByType CreatedByType `json:"lastModifiedByType,omitempty"` - // LastModifiedAt - The timestamp of resource last modification (UTC) - LastModifiedAt *date.Time `json:"lastModifiedAt,omitempty"` -} - -// TagsObject tags object for patch operations. -type TagsObject struct { - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for TagsObject. -func (toVar TagsObject) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if toVar.Tags != nil { - objectMap["tags"] = toVar.Tags - } - return json.Marshal(objectMap) -} - -// TimeInWeek time in a week. -type TimeInWeek struct { - // Day - The day of the week. Possible values include: 'WeekDaySunday', 'WeekDayMonday', 'WeekDayTuesday', 'WeekDayWednesday', 'WeekDayThursday', 'WeekDayFriday', 'WeekDaySaturday' - Day WeekDay `json:"day,omitempty"` - // HourSlots - Each integer hour represents a time range beginning at 0m after the hour ending at the next hour (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 UTC. Specifying [0, 1] means the 00:00 - 02:00 UTC time range. - HourSlots *[]int32 `json:"hourSlots,omitempty"` -} - -// TimeSpan for example, between 2021-05-25T13:00:00Z and 2021-05-25T14:00:00Z. -type TimeSpan struct { - // Start - The start of a time span - Start *date.Time `json:"start,omitempty"` - // End - The end of a time span - End *date.Time `json:"end,omitempty"` -} - -// TrackedResource the resource model definition for an Azure Resource Manager tracked top level resource -// which has 'tags' and a 'location' -type TrackedResource struct { - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // Location - The geo-location where the resource lives - Location *string `json:"location,omitempty"` - // ID - READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; The name of the resource - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string `json:"type,omitempty"` - // SystemData - READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData `json:"systemData,omitempty"` -} - -// MarshalJSON is the custom marshaler for TrackedResource. -func (tr TrackedResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if tr.Tags != nil { - objectMap["tags"] = tr.Tags - } - if tr.Location != nil { - objectMap["location"] = tr.Location - } - return json.Marshal(objectMap) -} - -// UserAssignedIdentity details about a user assigned identity. -type UserAssignedIdentity struct { - // ResourceID - The resource ID of the user assigned identity. - ResourceID *string `json:"resourceId,omitempty"` - // ClientID - The client ID of the user assigned identity. - ClientID *string `json:"clientId,omitempty"` - // ObjectID - The object ID of the user assigned identity. - ObjectID *string `json:"objectId,omitempty"` -} - -// VMDiagnostics profile for diagnostics on the container service VMs. -type VMDiagnostics struct { - // Enabled - Whether the VM diagnostic agent is provisioned on the VM. - Enabled *bool `json:"enabled,omitempty"` - // StorageURI - READ-ONLY; The URI of the storage account where diagnostics are stored. - StorageURI *string `json:"storageUri,omitempty"` -} - -// MarshalJSON is the custom marshaler for VMDiagnostics. -func (vd VMDiagnostics) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if vd.Enabled != nil { - objectMap["enabled"] = vd.Enabled - } - return json.Marshal(objectMap) -} - -// WindowsGmsaProfile windows gMSA Profile in the managed cluster. -type WindowsGmsaProfile struct { - // Enabled - Specifies whether to enable Windows gMSA in the managed cluster. - Enabled *bool `json:"enabled,omitempty"` - // DNSServer - Specifies the DNS server for Windows gMSA.

Set it to empty if you have configured the DNS server in the vnet which is used to create the managed cluster. - DNSServer *string `json:"dnsServer,omitempty"` - // RootDomainName - Specifies the root domain name for Windows gMSA.

Set it to empty if you have configured the DNS server in the vnet which is used to create the managed cluster. - RootDomainName *string `json:"rootDomainName,omitempty"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/operations.go deleted file mode 100644 index d3fceb6d4aa3..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/operations.go +++ /dev/null @@ -1,98 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// OperationsClient is the the Container Service Client. -type OperationsClient struct { - BaseClient -} - -// NewOperationsClient creates an instance of the OperationsClient client. -func NewOperationsClient(subscriptionID string) OperationsClient { - return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { - return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List sends the list request. -func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.OperationsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.OperationsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.OperationsClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.ContainerService/operations"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/privateendpointconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/privateendpointconnections.go deleted file mode 100644 index 316d11958eac..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/privateendpointconnections.go +++ /dev/null @@ -1,406 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// PrivateEndpointConnectionsClient is the the Container Service Client. -type PrivateEndpointConnectionsClient struct { - BaseClient -} - -// NewPrivateEndpointConnectionsClient creates an instance of the PrivateEndpointConnectionsClient client. -func NewPrivateEndpointConnectionsClient(subscriptionID string) PrivateEndpointConnectionsClient { - return NewPrivateEndpointConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewPrivateEndpointConnectionsClientWithBaseURI creates an instance of the PrivateEndpointConnectionsClient client -// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign -// clouds, Azure stack). -func NewPrivateEndpointConnectionsClientWithBaseURI(baseURI string, subscriptionID string) PrivateEndpointConnectionsClient { - return PrivateEndpointConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Delete sends the delete request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// privateEndpointConnectionName - the name of the private endpoint connection. -func (client PrivateEndpointConnectionsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string, privateEndpointConnectionName string) (result PrivateEndpointConnectionsDeleteFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Delete") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.PrivateEndpointConnectionsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName, privateEndpointConnectionName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "Delete", nil, "Failure preparing request") - return - } - - result, err = client.DeleteSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "Delete", result.Response(), "Failure sending request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client PrivateEndpointConnectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string, privateEndpointConnectionName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateEndpointConnectionsClient) DeleteSender(req *http.Request) (future PrivateEndpointConnectionsDeleteFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client PrivateEndpointConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get to learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// privateEndpointConnectionName - the name of the private endpoint connection. -func (client PrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, resourceName string, privateEndpointConnectionName string) (result PrivateEndpointConnection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.PrivateEndpointConnectionsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, resourceName, privateEndpointConnectionName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client PrivateEndpointConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string, privateEndpointConnectionName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateEndpointConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client PrivateEndpointConnectionsClient) GetResponder(resp *http.Response) (result PrivateEndpointConnection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List to learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client PrivateEndpointConnectionsClient) List(ctx context.Context, resourceGroupName string, resourceName string) (result PrivateEndpointConnectionListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.PrivateEndpointConnectionsClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client PrivateEndpointConnectionsClient) ListPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateEndpointConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client PrivateEndpointConnectionsClient) ListResponder(resp *http.Response) (result PrivateEndpointConnectionListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update sends the update request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// privateEndpointConnectionName - the name of the private endpoint connection. -// parameters - the updated private endpoint connection. -func (client PrivateEndpointConnectionsClient) Update(ctx context.Context, resourceGroupName string, resourceName string, privateEndpointConnectionName string, parameters PrivateEndpointConnection) (result PrivateEndpointConnection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateEndpointConnectionsClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.PrivateEndpointConnectionProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.PrivateEndpointConnectionProperties.PrivateLinkServiceConnectionState", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("containerservice.PrivateEndpointConnectionsClient", "Update", err.Error()) - } - - req, err := client.UpdatePreparer(ctx, resourceGroupName, resourceName, privateEndpointConnectionName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateEndpointConnectionsClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client PrivateEndpointConnectionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, privateEndpointConnectionName string, parameters PrivateEndpointConnection) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "privateEndpointConnectionName": autorest.Encode("path", privateEndpointConnectionName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - parameters.ID = nil - parameters.Name = nil - parameters.Type = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateEndpointConnectionsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client PrivateEndpointConnectionsClient) UpdateResponder(resp *http.Response) (result PrivateEndpointConnection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/privatelinkresources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/privatelinkresources.go deleted file mode 100644 index 0b898090ea61..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/privatelinkresources.go +++ /dev/null @@ -1,122 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// PrivateLinkResourcesClient is the the Container Service Client. -type PrivateLinkResourcesClient struct { - BaseClient -} - -// NewPrivateLinkResourcesClient creates an instance of the PrivateLinkResourcesClient client. -func NewPrivateLinkResourcesClient(subscriptionID string) PrivateLinkResourcesClient { - return NewPrivateLinkResourcesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewPrivateLinkResourcesClientWithBaseURI creates an instance of the PrivateLinkResourcesClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewPrivateLinkResourcesClientWithBaseURI(baseURI string, subscriptionID string) PrivateLinkResourcesClient { - return PrivateLinkResourcesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List to learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client PrivateLinkResourcesClient) List(ctx context.Context, resourceGroupName string, resourceName string) (result PrivateLinkResourcesListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/PrivateLinkResourcesClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.PrivateLinkResourcesClient", "List", err.Error()) - } - - req, err := client.ListPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateLinkResourcesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.PrivateLinkResourcesClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.PrivateLinkResourcesClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client PrivateLinkResourcesClient) ListPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client PrivateLinkResourcesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client PrivateLinkResourcesClient) ListResponder(resp *http.Response) (result PrivateLinkResourcesListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/resolveprivatelinkserviceid.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/resolveprivatelinkserviceid.go deleted file mode 100644 index f4472650fce6..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/resolveprivatelinkserviceid.go +++ /dev/null @@ -1,126 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ResolvePrivateLinkServiceIDClient is the the Container Service Client. -type ResolvePrivateLinkServiceIDClient struct { - BaseClient -} - -// NewResolvePrivateLinkServiceIDClient creates an instance of the ResolvePrivateLinkServiceIDClient client. -func NewResolvePrivateLinkServiceIDClient(subscriptionID string) ResolvePrivateLinkServiceIDClient { - return NewResolvePrivateLinkServiceIDClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewResolvePrivateLinkServiceIDClientWithBaseURI creates an instance of the ResolvePrivateLinkServiceIDClient client -// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign -// clouds, Azure stack). -func NewResolvePrivateLinkServiceIDClientWithBaseURI(baseURI string, subscriptionID string) ResolvePrivateLinkServiceIDClient { - return ResolvePrivateLinkServiceIDClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// POST sends the post request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// parameters - parameters required in order to resolve a private link service ID. -func (client ResolvePrivateLinkServiceIDClient) POST(ctx context.Context, resourceGroupName string, resourceName string, parameters PrivateLinkResource) (result PrivateLinkResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ResolvePrivateLinkServiceIDClient.POST") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.ResolvePrivateLinkServiceIDClient", "POST", err.Error()) - } - - req, err := client.POSTPreparer(ctx, resourceGroupName, resourceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ResolvePrivateLinkServiceIDClient", "POST", nil, "Failure preparing request") - return - } - - resp, err := client.POSTSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.ResolvePrivateLinkServiceIDClient", "POST", resp, "Failure sending request") - return - } - - result, err = client.POSTResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.ResolvePrivateLinkServiceIDClient", "POST", resp, "Failure responding to request") - return - } - - return -} - -// POSTPreparer prepares the POST request. -func (client ResolvePrivateLinkServiceIDClient) POSTPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters PrivateLinkResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - parameters.PrivateLinkServiceID = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resolvePrivateLinkServiceId", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// POSTSender sends the POST request. The method will close the -// http.Response Body if it receives an error. -func (client ResolvePrivateLinkServiceIDClient) POSTSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// POSTResponder handles the response to the POST request. The method always -// closes the http.Response Body. -func (client ResolvePrivateLinkServiceIDClient) POSTResponder(resp *http.Response) (result PrivateLinkResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/snapshots.go deleted file mode 100644 index eebb40b76edd..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/snapshots.go +++ /dev/null @@ -1,637 +0,0 @@ -package containerservice - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// SnapshotsClient is the the Container Service Client. -type SnapshotsClient struct { - BaseClient -} - -// NewSnapshotsClient creates an instance of the SnapshotsClient client. -func NewSnapshotsClient(subscriptionID string) SnapshotsClient { - return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient { - return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate sends the create or update request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// parameters - the snapshot to create or update. -func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters Snapshot) (result Snapshot, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.SnapshotsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters Snapshot) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete sends the delete request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client SnapshotsClient) Delete(ctx context.Context, resourceGroupName string, resourceName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.SnapshotsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client SnapshotsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get sends the get request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -func (client SnapshotsClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result Snapshot, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.SnapshotsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, resourceName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName string, resourceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List sends the list request. -func (client SnapshotsClient) List(ctx context.Context) (result SnapshotListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.List") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.SnapshotsClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "List", resp, "Failure sending request") - return - } - - result.slr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "List", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/snapshots", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client SnapshotsClient) listNextResults(ctx context.Context, lastResults SnapshotListResult) (result SnapshotListResult, err error) { - req, err := lastResults.snapshotListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client SnapshotsClient) ListComplete(ctx context.Context) (result SnapshotListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx) - return -} - -// ListByResourceGroup sends the list by resource group request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -func (client SnapshotsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result SnapshotListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.SnapshotsClient", "ListByResourceGroup", err.Error()) - } - - result.fn = client.listByResourceGroupNextResults - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result.slr, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByResourceGroupNextResults retrieves the next set of results, if any. -func (client SnapshotsClient) listByResourceGroupNextResults(ctx context.Context, lastResults SnapshotListResult) (result SnapshotListResult, err error) { - req, err := lastResults.snapshotListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. -func (client SnapshotsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result SnapshotListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) - return -} - -// UpdateTags sends the update tags request. -// Parameters: -// resourceGroupName - the name of the resource group. The name is case insensitive. -// resourceName - the name of the managed cluster resource. -// parameters - parameters supplied to the Update snapshot Tags operation. -func (client SnapshotsClient) UpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (result Snapshot, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SnapshotsClient.UpdateTags") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: client.SubscriptionID, - Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: resourceName, - Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, - {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("containerservice.SnapshotsClient", "UpdateTags", err.Error()) - } - - req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, resourceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "UpdateTags", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateTagsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "UpdateTags", resp, "Failure sending request") - return - } - - result, err = client.UpdateTagsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerservice.SnapshotsClient", "UpdateTags", resp, "Failure responding to request") - return - } - - return -} - -// UpdateTagsPreparer prepares the UpdateTags request. -func (client SnapshotsClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, resourceName string, parameters TagsObject) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "resourceName": autorest.Encode("path", resourceName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2022-03-02-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateTagsSender sends the UpdateTags request. The method will close the -// http.Response Body if it receives an error. -func (client SnapshotsClient) UpdateTagsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateTagsResponder handles the response to the UpdateTags request. The method always -// closes the http.Response Body. -func (client SnapshotsClient) UpdateTagsResponder(resp *http.Response) (result Snapshot, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/version.go deleted file mode 100644 index 23c92ba7393f..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package containerservice - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " containerservice/2022-03-02-preview" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/README.md new file mode 100644 index 000000000000..8a97b0e81ede --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/README.md @@ -0,0 +1,142 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools` Documentation + +The `agentpools` SDK allows for interaction with the Azure Resource Manager Service `containerservice` (API Version `2022-08-02-preview`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools" +``` + + +### Client Initialization + +```go +client := agentpools.NewAgentPoolsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `AgentPoolsClient.AbortLatestOperation` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "agentPoolValue") + +read, err := client.AbortLatestOperation(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `AgentPoolsClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "agentPoolValue") + +payload := agentpools.AgentPool{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `AgentPoolsClient.Delete` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "agentPoolValue") + +if err := client.DeleteThenPoll(ctx, id, agentpools.DefaultDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `AgentPoolsClient.Get` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "agentPoolValue") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `AgentPoolsClient.GetAvailableAgentPoolVersions` + +```go +ctx := context.TODO() +id := agentpools.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +read, err := client.GetAvailableAgentPoolVersions(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `AgentPoolsClient.GetUpgradeProfile` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "agentPoolValue") + +read, err := client.GetUpgradeProfile(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `AgentPoolsClient.List` + +```go +ctx := context.TODO() +id := agentpools.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +// alternatively `client.List(ctx, id)` can be used to do batched pagination +items, err := client.ListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `AgentPoolsClient.UpgradeNodeImageVersion` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "agentPoolValue") + +if err := client.UpgradeNodeImageVersionThenPoll(ctx, id); err != nil { + // handle the error +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/client.go new file mode 100644 index 000000000000..f4128f5c794c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/client.go @@ -0,0 +1,18 @@ +package agentpools + +import "github.com/Azure/go-autorest/autorest" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolsClient struct { + Client autorest.Client + baseUri string +} + +func NewAgentPoolsClientWithBaseURI(endpoint string) AgentPoolsClient { + return AgentPoolsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/constants.go new file mode 100644 index 000000000000..3b6cb4198db4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/constants.go @@ -0,0 +1,360 @@ +package agentpools + +import "strings" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolMode string + +const ( + AgentPoolModeSystem AgentPoolMode = "System" + AgentPoolModeUser AgentPoolMode = "User" +) + +func PossibleValuesForAgentPoolMode() []string { + return []string{ + string(AgentPoolModeSystem), + string(AgentPoolModeUser), + } +} + +func parseAgentPoolMode(input string) (*AgentPoolMode, error) { + vals := map[string]AgentPoolMode{ + "system": AgentPoolModeSystem, + "user": AgentPoolModeUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AgentPoolMode(input) + return &out, nil +} + +type AgentPoolType string + +const ( + AgentPoolTypeAvailabilitySet AgentPoolType = "AvailabilitySet" + AgentPoolTypeVirtualMachineScaleSets AgentPoolType = "VirtualMachineScaleSets" +) + +func PossibleValuesForAgentPoolType() []string { + return []string{ + string(AgentPoolTypeAvailabilitySet), + string(AgentPoolTypeVirtualMachineScaleSets), + } +} + +func parseAgentPoolType(input string) (*AgentPoolType, error) { + vals := map[string]AgentPoolType{ + "availabilityset": AgentPoolTypeAvailabilitySet, + "virtualmachinescalesets": AgentPoolTypeVirtualMachineScaleSets, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AgentPoolType(input) + return &out, nil +} + +type Code string + +const ( + CodeRunning Code = "Running" + CodeStopped Code = "Stopped" +) + +func PossibleValuesForCode() []string { + return []string{ + string(CodeRunning), + string(CodeStopped), + } +} + +func parseCode(input string) (*Code, error) { + vals := map[string]Code{ + "running": CodeRunning, + "stopped": CodeStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Code(input) + return &out, nil +} + +type GPUInstanceProfile string + +const ( + GPUInstanceProfileMIGFourg GPUInstanceProfile = "MIG4g" + GPUInstanceProfileMIGOneg GPUInstanceProfile = "MIG1g" + GPUInstanceProfileMIGSeveng GPUInstanceProfile = "MIG7g" + GPUInstanceProfileMIGThreeg GPUInstanceProfile = "MIG3g" + GPUInstanceProfileMIGTwog GPUInstanceProfile = "MIG2g" +) + +func PossibleValuesForGPUInstanceProfile() []string { + return []string{ + string(GPUInstanceProfileMIGFourg), + string(GPUInstanceProfileMIGOneg), + string(GPUInstanceProfileMIGSeveng), + string(GPUInstanceProfileMIGThreeg), + string(GPUInstanceProfileMIGTwog), + } +} + +func parseGPUInstanceProfile(input string) (*GPUInstanceProfile, error) { + vals := map[string]GPUInstanceProfile{ + "mig4g": GPUInstanceProfileMIGFourg, + "mig1g": GPUInstanceProfileMIGOneg, + "mig7g": GPUInstanceProfileMIGSeveng, + "mig3g": GPUInstanceProfileMIGThreeg, + "mig2g": GPUInstanceProfileMIGTwog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := GPUInstanceProfile(input) + return &out, nil +} + +type KubeletDiskType string + +const ( + KubeletDiskTypeOS KubeletDiskType = "OS" + KubeletDiskTypeTemporary KubeletDiskType = "Temporary" +) + +func PossibleValuesForKubeletDiskType() []string { + return []string{ + string(KubeletDiskTypeOS), + string(KubeletDiskTypeTemporary), + } +} + +func parseKubeletDiskType(input string) (*KubeletDiskType, error) { + vals := map[string]KubeletDiskType{ + "os": KubeletDiskTypeOS, + "temporary": KubeletDiskTypeTemporary, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KubeletDiskType(input) + return &out, nil +} + +type OSDiskType string + +const ( + OSDiskTypeEphemeral OSDiskType = "Ephemeral" + OSDiskTypeManaged OSDiskType = "Managed" +) + +func PossibleValuesForOSDiskType() []string { + return []string{ + string(OSDiskTypeEphemeral), + string(OSDiskTypeManaged), + } +} + +func parseOSDiskType(input string) (*OSDiskType, error) { + vals := map[string]OSDiskType{ + "ephemeral": OSDiskTypeEphemeral, + "managed": OSDiskTypeManaged, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSDiskType(input) + return &out, nil +} + +type OSSKU string + +const ( + OSSKUCBLMariner OSSKU = "CBLMariner" + OSSKUMariner OSSKU = "Mariner" + OSSKUUbuntu OSSKU = "Ubuntu" + OSSKUWindowsTwoZeroOneNine OSSKU = "Windows2019" + OSSKUWindowsTwoZeroTwoTwo OSSKU = "Windows2022" +) + +func PossibleValuesForOSSKU() []string { + return []string{ + string(OSSKUCBLMariner), + string(OSSKUMariner), + string(OSSKUUbuntu), + string(OSSKUWindowsTwoZeroOneNine), + string(OSSKUWindowsTwoZeroTwoTwo), + } +} + +func parseOSSKU(input string) (*OSSKU, error) { + vals := map[string]OSSKU{ + "cblmariner": OSSKUCBLMariner, + "mariner": OSSKUMariner, + "ubuntu": OSSKUUbuntu, + "windows2019": OSSKUWindowsTwoZeroOneNine, + "windows2022": OSSKUWindowsTwoZeroTwoTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSSKU(input) + return &out, nil +} + +type OSType string + +const ( + OSTypeLinux OSType = "Linux" + OSTypeWindows OSType = "Windows" +) + +func PossibleValuesForOSType() []string { + return []string{ + string(OSTypeLinux), + string(OSTypeWindows), + } +} + +func parseOSType(input string) (*OSType, error) { + vals := map[string]OSType{ + "linux": OSTypeLinux, + "windows": OSTypeWindows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSType(input) + return &out, nil +} + +type ScaleDownMode string + +const ( + ScaleDownModeDeallocate ScaleDownMode = "Deallocate" + ScaleDownModeDelete ScaleDownMode = "Delete" +) + +func PossibleValuesForScaleDownMode() []string { + return []string{ + string(ScaleDownModeDeallocate), + string(ScaleDownModeDelete), + } +} + +func parseScaleDownMode(input string) (*ScaleDownMode, error) { + vals := map[string]ScaleDownMode{ + "deallocate": ScaleDownModeDeallocate, + "delete": ScaleDownModeDelete, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleDownMode(input) + return &out, nil +} + +type ScaleSetEvictionPolicy string + +const ( + ScaleSetEvictionPolicyDeallocate ScaleSetEvictionPolicy = "Deallocate" + ScaleSetEvictionPolicyDelete ScaleSetEvictionPolicy = "Delete" +) + +func PossibleValuesForScaleSetEvictionPolicy() []string { + return []string{ + string(ScaleSetEvictionPolicyDeallocate), + string(ScaleSetEvictionPolicyDelete), + } +} + +func parseScaleSetEvictionPolicy(input string) (*ScaleSetEvictionPolicy, error) { + vals := map[string]ScaleSetEvictionPolicy{ + "deallocate": ScaleSetEvictionPolicyDeallocate, + "delete": ScaleSetEvictionPolicyDelete, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleSetEvictionPolicy(input) + return &out, nil +} + +type ScaleSetPriority string + +const ( + ScaleSetPriorityRegular ScaleSetPriority = "Regular" + ScaleSetPrioritySpot ScaleSetPriority = "Spot" +) + +func PossibleValuesForScaleSetPriority() []string { + return []string{ + string(ScaleSetPriorityRegular), + string(ScaleSetPrioritySpot), + } +} + +func parseScaleSetPriority(input string) (*ScaleSetPriority, error) { + vals := map[string]ScaleSetPriority{ + "regular": ScaleSetPriorityRegular, + "spot": ScaleSetPrioritySpot, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleSetPriority(input) + return &out, nil +} + +type WorkloadRuntime string + +const ( + WorkloadRuntimeOCIContainer WorkloadRuntime = "OCIContainer" + WorkloadRuntimeWasmWasi WorkloadRuntime = "WasmWasi" +) + +func PossibleValuesForWorkloadRuntime() []string { + return []string{ + string(WorkloadRuntimeOCIContainer), + string(WorkloadRuntimeWasmWasi), + } +} + +func parseWorkloadRuntime(input string) (*WorkloadRuntime, error) { + vals := map[string]WorkloadRuntime{ + "ocicontainer": WorkloadRuntimeOCIContainer, + "wasmwasi": WorkloadRuntimeWasmWasi, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := WorkloadRuntime(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/id_agentpool.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/id_agentpool.go new file mode 100644 index 000000000000..3044009866e5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/id_agentpool.go @@ -0,0 +1,137 @@ +package agentpools + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = AgentPoolId{} + +// AgentPoolId is a struct representing the Resource ID for a Agent Pool +type AgentPoolId struct { + SubscriptionId string + ResourceGroupName string + ResourceName string + AgentPoolName string +} + +// NewAgentPoolID returns a new AgentPoolId struct +func NewAgentPoolID(subscriptionId string, resourceGroupName string, resourceName string, agentPoolName string) AgentPoolId { + return AgentPoolId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ResourceName: resourceName, + AgentPoolName: agentPoolName, + } +} + +// ParseAgentPoolID parses 'input' into a AgentPoolId +func ParseAgentPoolID(input string) (*AgentPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(AgentPoolId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := AgentPoolId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + if id.AgentPoolName, ok = parsed.Parsed["agentPoolName"]; !ok { + return nil, fmt.Errorf("the segment 'agentPoolName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseAgentPoolIDInsensitively parses 'input' case-insensitively into a AgentPoolId +// note: this method should only be used for API response data and not user input +func ParseAgentPoolIDInsensitively(input string) (*AgentPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(AgentPoolId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := AgentPoolId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + if id.AgentPoolName, ok = parsed.Parsed["agentPoolName"]; !ok { + return nil, fmt.Errorf("the segment 'agentPoolName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateAgentPoolID checks that 'input' can be parsed as a Agent Pool ID +func ValidateAgentPoolID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseAgentPoolID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Agent Pool ID +func (id AgentPoolId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/agentPools/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ResourceName, id.AgentPoolName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Agent Pool ID +func (id AgentPoolId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("resourceName", "resourceValue"), + resourceids.StaticSegment("staticAgentPools", "agentPools", "agentPools"), + resourceids.UserSpecifiedSegment("agentPoolName", "agentPoolValue"), + } +} + +// String returns a human-readable description of this Agent Pool ID +func (id AgentPoolId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Resource Name: %q", id.ResourceName), + fmt.Sprintf("Agent Pool Name: %q", id.AgentPoolName), + } + return fmt.Sprintf("Agent Pool (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/id_managedcluster.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/id_managedcluster.go new file mode 100644 index 000000000000..340433c0fa8e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/id_managedcluster.go @@ -0,0 +1,124 @@ +package agentpools + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = ManagedClusterId{} + +// ManagedClusterId is a struct representing the Resource ID for a Managed Cluster +type ManagedClusterId struct { + SubscriptionId string + ResourceGroupName string + ResourceName string +} + +// NewManagedClusterID returns a new ManagedClusterId struct +func NewManagedClusterID(subscriptionId string, resourceGroupName string, resourceName string) ManagedClusterId { + return ManagedClusterId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ResourceName: resourceName, + } +} + +// ParseManagedClusterID parses 'input' into a ManagedClusterId +func ParseManagedClusterID(input string) (*ManagedClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(ManagedClusterId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ManagedClusterId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseManagedClusterIDInsensitively parses 'input' case-insensitively into a ManagedClusterId +// note: this method should only be used for API response data and not user input +func ParseManagedClusterIDInsensitively(input string) (*ManagedClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(ManagedClusterId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ManagedClusterId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateManagedClusterID checks that 'input' can be parsed as a Managed Cluster ID +func ValidateManagedClusterID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseManagedClusterID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Managed Cluster ID +func (id ManagedClusterId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ResourceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Managed Cluster ID +func (id ManagedClusterId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("resourceName", "resourceValue"), + } +} + +// String returns a human-readable description of this Managed Cluster ID +func (id ManagedClusterId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Resource Name: %q", id.ResourceName), + } + return fmt.Sprintf("Managed Cluster (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_abortlatestoperation_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_abortlatestoperation_autorest.go new file mode 100644 index 000000000000..5cb3e504f4bc --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_abortlatestoperation_autorest.go @@ -0,0 +1,67 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AbortLatestOperationOperationResponse struct { + HttpResponse *http.Response +} + +// AbortLatestOperation ... +func (c AgentPoolsClient) AbortLatestOperation(ctx context.Context, id AgentPoolId) (result AbortLatestOperationOperationResponse, err error) { + req, err := c.preparerForAbortLatestOperation(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "AbortLatestOperation", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "AbortLatestOperation", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForAbortLatestOperation(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "AbortLatestOperation", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForAbortLatestOperation prepares the AbortLatestOperation request. +func (c AgentPoolsClient) preparerForAbortLatestOperation(ctx context.Context, id AgentPoolId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/abort", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForAbortLatestOperation handles the response to the AbortLatestOperation request. The method always +// closes the http.Response Body. +func (c AgentPoolsClient) responderForAbortLatestOperation(resp *http.Response) (result AbortLatestOperationOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusNoContent), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_createorupdate_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_createorupdate_autorest.go new file mode 100644 index 000000000000..0106b67409a8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_createorupdate_autorest.go @@ -0,0 +1,79 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// CreateOrUpdate ... +func (c AgentPoolsClient) CreateOrUpdate(ctx context.Context, id AgentPoolId, input AgentPool) (result CreateOrUpdateOperationResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = c.senderForCreateOrUpdate(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c AgentPoolsClient) CreateOrUpdateThenPoll(ctx context.Context, id AgentPoolId, input AgentPool) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c AgentPoolsClient) preparerForCreateOrUpdate(ctx context.Context, id AgentPoolId, input AgentPool) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForCreateOrUpdate sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (c AgentPoolsClient) senderForCreateOrUpdate(ctx context.Context, req *http.Request) (future CreateOrUpdateOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_delete_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_delete_autorest.go new file mode 100644 index 000000000000..4acc91111ca3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_delete_autorest.go @@ -0,0 +1,107 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +type DeleteOperationOptions struct { + IgnorePodDisruptionBudget *bool +} + +func DefaultDeleteOperationOptions() DeleteOperationOptions { + return DeleteOperationOptions{} +} + +func (o DeleteOperationOptions) toHeaders() map[string]interface{} { + out := make(map[string]interface{}) + + return out +} + +func (o DeleteOperationOptions) toQueryString() map[string]interface{} { + out := make(map[string]interface{}) + + if o.IgnorePodDisruptionBudget != nil { + out["ignore-pod-disruption-budget"] = *o.IgnorePodDisruptionBudget + } + + return out +} + +// Delete ... +func (c AgentPoolsClient) Delete(ctx context.Context, id AgentPoolId, options DeleteOperationOptions) (result DeleteOperationResponse, err error) { + req, err := c.preparerForDelete(ctx, id, options) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = c.senderForDelete(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c AgentPoolsClient) DeleteThenPoll(ctx context.Context, id AgentPoolId, options DeleteOperationOptions) error { + result, err := c.Delete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} + +// preparerForDelete prepares the Delete request. +func (c AgentPoolsClient) preparerForDelete(ctx context.Context, id AgentPoolId, options DeleteOperationOptions) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + for k, v := range options.toQueryString() { + queryParameters[k] = autorest.Encode("query", v) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithHeaders(options.toHeaders()), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForDelete sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (c AgentPoolsClient) senderForDelete(ctx context.Context, req *http.Request) (future DeleteOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_get_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_get_autorest.go new file mode 100644 index 000000000000..989893ea89d6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_get_autorest.go @@ -0,0 +1,68 @@ +package agentpools + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + Model *AgentPool +} + +// Get ... +func (c AgentPoolsClient) Get(ctx context.Context, id AgentPoolId) (result GetOperationResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c AgentPoolsClient) preparerForGet(ctx context.Context, id AgentPoolId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c AgentPoolsClient) responderForGet(resp *http.Response) (result GetOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_getavailableagentpoolversions_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_getavailableagentpoolversions_autorest.go new file mode 100644 index 000000000000..9cdff78ed4c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_getavailableagentpoolversions_autorest.go @@ -0,0 +1,69 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetAvailableAgentPoolVersionsOperationResponse struct { + HttpResponse *http.Response + Model *AgentPoolAvailableVersions +} + +// GetAvailableAgentPoolVersions ... +func (c AgentPoolsClient) GetAvailableAgentPoolVersions(ctx context.Context, id ManagedClusterId) (result GetAvailableAgentPoolVersionsOperationResponse, err error) { + req, err := c.preparerForGetAvailableAgentPoolVersions(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "GetAvailableAgentPoolVersions", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "GetAvailableAgentPoolVersions", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGetAvailableAgentPoolVersions(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "GetAvailableAgentPoolVersions", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGetAvailableAgentPoolVersions prepares the GetAvailableAgentPoolVersions request. +func (c AgentPoolsClient) preparerForGetAvailableAgentPoolVersions(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/availableAgentPoolVersions", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGetAvailableAgentPoolVersions handles the response to the GetAvailableAgentPoolVersions request. The method always +// closes the http.Response Body. +func (c AgentPoolsClient) responderForGetAvailableAgentPoolVersions(resp *http.Response) (result GetAvailableAgentPoolVersionsOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_getupgradeprofile_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_getupgradeprofile_autorest.go new file mode 100644 index 000000000000..0a94e7b1cd75 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_getupgradeprofile_autorest.go @@ -0,0 +1,69 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUpgradeProfileOperationResponse struct { + HttpResponse *http.Response + Model *AgentPoolUpgradeProfile +} + +// GetUpgradeProfile ... +func (c AgentPoolsClient) GetUpgradeProfile(ctx context.Context, id AgentPoolId) (result GetUpgradeProfileOperationResponse, err error) { + req, err := c.preparerForGetUpgradeProfile(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "GetUpgradeProfile", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "GetUpgradeProfile", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGetUpgradeProfile(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "GetUpgradeProfile", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGetUpgradeProfile prepares the GetUpgradeProfile request. +func (c AgentPoolsClient) preparerForGetUpgradeProfile(ctx context.Context, id AgentPoolId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/upgradeProfiles/default", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGetUpgradeProfile handles the response to the GetUpgradeProfile request. The method always +// closes the http.Response Body. +func (c AgentPoolsClient) responderForGetUpgradeProfile(resp *http.Response) (result GetUpgradeProfileOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_list_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_list_autorest.go new file mode 100644 index 000000000000..2dbc9943693c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_list_autorest.go @@ -0,0 +1,186 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + Model *[]AgentPool + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListOperationResponse, error) +} + +type ListCompleteResult struct { + Items []AgentPool +} + +func (r ListOperationResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListOperationResponse) LoadMore(ctx context.Context) (resp ListOperationResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// List ... +func (c AgentPoolsClient) List(ctx context.Context, id ManagedClusterId) (resp ListOperationResponse, err error) { + req, err := c.preparerForList(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "List", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "List", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForList(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "List", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// preparerForList prepares the List request. +func (c AgentPoolsClient) preparerForList(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/agentPools", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListWithNextLink prepares the List request with the given nextLink token. +func (c AgentPoolsClient) preparerForListWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForList handles the response to the List request. The method always +// closes the http.Response Body. +func (c AgentPoolsClient) responderForList(resp *http.Response) (result ListOperationResponse, err error) { + type page struct { + Values []AgentPool `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListOperationResponse, err error) { + req, err := c.preparerForListWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "List", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "List", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForList(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "List", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} + +// ListComplete retrieves all of the results into a single object +func (c AgentPoolsClient) ListComplete(ctx context.Context, id ManagedClusterId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, AgentPoolOperationPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c AgentPoolsClient) ListCompleteMatchingPredicate(ctx context.Context, id ManagedClusterId, predicate AgentPoolOperationPredicate) (resp ListCompleteResult, err error) { + items := make([]AgentPool, 0) + + page, err := c.List(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListCompleteResult{ + Items: items, + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_upgradenodeimageversion_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_upgradenodeimageversion_autorest.go new file mode 100644 index 000000000000..ab04f78312de --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/method_upgradenodeimageversion_autorest.go @@ -0,0 +1,78 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpgradeNodeImageVersionOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// UpgradeNodeImageVersion ... +func (c AgentPoolsClient) UpgradeNodeImageVersion(ctx context.Context, id AgentPoolId) (result UpgradeNodeImageVersionOperationResponse, err error) { + req, err := c.preparerForUpgradeNodeImageVersion(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "UpgradeNodeImageVersion", nil, "Failure preparing request") + return + } + + result, err = c.senderForUpgradeNodeImageVersion(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "agentpools.AgentPoolsClient", "UpgradeNodeImageVersion", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// UpgradeNodeImageVersionThenPoll performs UpgradeNodeImageVersion then polls until it's completed +func (c AgentPoolsClient) UpgradeNodeImageVersionThenPoll(ctx context.Context, id AgentPoolId) error { + result, err := c.UpgradeNodeImageVersion(ctx, id) + if err != nil { + return fmt.Errorf("performing UpgradeNodeImageVersion: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after UpgradeNodeImageVersion: %+v", err) + } + + return nil +} + +// preparerForUpgradeNodeImageVersion prepares the UpgradeNodeImageVersion request. +func (c AgentPoolsClient) preparerForUpgradeNodeImageVersion(ctx context.Context, id AgentPoolId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/upgradeNodeImageVersion", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForUpgradeNodeImageVersion sends the UpgradeNodeImageVersion request. The method will close the +// http.Response Body if it receives an error. +func (c AgentPoolsClient) senderForUpgradeNodeImageVersion(ctx context.Context, req *http.Request) (future UpgradeNodeImageVersionOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpool.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpool.go new file mode 100644 index 000000000000..c67ae33e826c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpool.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPool struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ManagedClusterAgentPoolProfileProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolavailableversions.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolavailableversions.go new file mode 100644 index 000000000000..0b65a05c663d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolavailableversions.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolAvailableVersions struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties AgentPoolAvailableVersionsProperties `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolavailableversionsproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolavailableversionsproperties.go new file mode 100644 index 000000000000..a8369deaba74 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolavailableversionsproperties.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolAvailableVersionsProperties struct { + AgentPoolVersions *[]AgentPoolAvailableVersionsPropertiesAgentPoolVersionsInlined `json:"agentPoolVersions,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolavailableversionspropertiesagentpoolversionsinlined.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolavailableversionspropertiesagentpoolversionsinlined.go new file mode 100644 index 000000000000..1631b3137d15 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolavailableversionspropertiesagentpoolversionsinlined.go @@ -0,0 +1,10 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolAvailableVersionsPropertiesAgentPoolVersionsInlined struct { + Default *bool `json:"default,omitempty"` + IsPreview *bool `json:"isPreview,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradeprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradeprofile.go new file mode 100644 index 000000000000..23cc2927f28f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradeprofile.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeProfile struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties AgentPoolUpgradeProfileProperties `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradeprofileproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradeprofileproperties.go new file mode 100644 index 000000000000..5df00c967623 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradeprofileproperties.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeProfileProperties struct { + KubernetesVersion string `json:"kubernetesVersion"` + LatestNodeImageVersion *string `json:"latestNodeImageVersion,omitempty"` + OsType OSType `json:"osType"` + Upgrades *[]AgentPoolUpgradeProfilePropertiesUpgradesInlined `json:"upgrades,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradeprofilepropertiesupgradesinlined.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradeprofilepropertiesupgradesinlined.go new file mode 100644 index 000000000000..b45442f6cf0f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradeprofilepropertiesupgradesinlined.go @@ -0,0 +1,9 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeProfilePropertiesUpgradesInlined struct { + IsPreview *bool `json:"isPreview,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradesettings.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradesettings.go new file mode 100644 index 000000000000..4b7613a093f9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolupgradesettings.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeSettings struct { + MaxSurge *string `json:"maxSurge,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolwindowsprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolwindowsprofile.go new file mode 100644 index 000000000000..d7ad07f7f69b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_agentpoolwindowsprofile.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolWindowsProfile struct { + DisableOutboundNat *bool `json:"disableOutboundNat,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_creationdata.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_creationdata.go new file mode 100644 index 000000000000..88a8fe8123b2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_creationdata.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreationData struct { + SourceResourceId *string `json:"sourceResourceId,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_kubeletconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_kubeletconfig.go new file mode 100644 index 000000000000..7d56f79880c0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_kubeletconfig.go @@ -0,0 +1,18 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KubeletConfig struct { + AllowedUnsafeSysctls *[]string `json:"allowedUnsafeSysctls,omitempty"` + ContainerLogMaxFiles *int64 `json:"containerLogMaxFiles,omitempty"` + ContainerLogMaxSizeMB *int64 `json:"containerLogMaxSizeMB,omitempty"` + CpuCfsQuota *bool `json:"cpuCfsQuota,omitempty"` + CpuCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty"` + CpuManagerPolicy *string `json:"cpuManagerPolicy,omitempty"` + FailSwapOn *bool `json:"failSwapOn,omitempty"` + ImageGcHighThreshold *int64 `json:"imageGcHighThreshold,omitempty"` + ImageGcLowThreshold *int64 `json:"imageGcLowThreshold,omitempty"` + PodMaxPids *int64 `json:"podMaxPids,omitempty"` + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_linuxosconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_linuxosconfig.go new file mode 100644 index 000000000000..8256b210a852 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_linuxosconfig.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type LinuxOSConfig struct { + SwapFileSizeMB *int64 `json:"swapFileSizeMB,omitempty"` + Sysctls *SysctlConfig `json:"sysctls,omitempty"` + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty"` + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_managedclusteragentpoolprofileproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_managedclusteragentpoolprofileproperties.go new file mode 100644 index 000000000000..588c3025d551 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_managedclusteragentpoolprofileproperties.go @@ -0,0 +1,52 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAgentPoolProfileProperties struct { + AvailabilityZones *[]string `json:"availabilityZones,omitempty"` + CapacityReservationGroupID *string `json:"capacityReservationGroupID,omitempty"` + Count *int64 `json:"count,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + CurrentOrchestratorVersion *string `json:"currentOrchestratorVersion,omitempty"` + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` + EnableCustomCATrust *bool `json:"enableCustomCATrust,omitempty"` + EnableEncryptionAtHost *bool `json:"enableEncryptionAtHost,omitempty"` + EnableFIPS *bool `json:"enableFIPS,omitempty"` + EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"` + EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` + GpuInstanceProfile *GPUInstanceProfile `json:"gpuInstanceProfile,omitempty"` + HostGroupID *string `json:"hostGroupID,omitempty"` + KubeletConfig *KubeletConfig `json:"kubeletConfig,omitempty"` + KubeletDiskType *KubeletDiskType `json:"kubeletDiskType,omitempty"` + LinuxOSConfig *LinuxOSConfig `json:"linuxOSConfig,omitempty"` + MaxCount *int64 `json:"maxCount,omitempty"` + MaxPods *int64 `json:"maxPods,omitempty"` + MessageOfTheDay *string `json:"messageOfTheDay,omitempty"` + MinCount *int64 `json:"minCount,omitempty"` + Mode *AgentPoolMode `json:"mode,omitempty"` + NodeImageVersion *string `json:"nodeImageVersion,omitempty"` + NodeLabels *map[string]string `json:"nodeLabels,omitempty"` + NodePublicIPPrefixID *string `json:"nodePublicIPPrefixID,omitempty"` + NodeTaints *[]string `json:"nodeTaints,omitempty"` + OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` + OsDiskSizeGB *int64 `json:"osDiskSizeGB,omitempty"` + OsDiskType *OSDiskType `json:"osDiskType,omitempty"` + OsSKU *OSSKU `json:"osSKU,omitempty"` + OsType *OSType `json:"osType,omitempty"` + PodSubnetID *string `json:"podSubnetID,omitempty"` + PowerState *PowerState `json:"powerState,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + ProximityPlacementGroupID *string `json:"proximityPlacementGroupID,omitempty"` + ScaleDownMode *ScaleDownMode `json:"scaleDownMode,omitempty"` + ScaleSetEvictionPolicy *ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"` + ScaleSetPriority *ScaleSetPriority `json:"scaleSetPriority,omitempty"` + SpotMaxPrice *float64 `json:"spotMaxPrice,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *AgentPoolType `json:"type,omitempty"` + UpgradeSettings *AgentPoolUpgradeSettings `json:"upgradeSettings,omitempty"` + VmSize *string `json:"vmSize,omitempty"` + VnetSubnetID *string `json:"vnetSubnetID,omitempty"` + WindowsProfile *AgentPoolWindowsProfile `json:"windowsProfile,omitempty"` + WorkloadRuntime *WorkloadRuntime `json:"workloadRuntime,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_powerstate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_powerstate.go new file mode 100644 index 000000000000..c5939d906726 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_powerstate.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PowerState struct { + Code *Code `json:"code,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_sysctlconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_sysctlconfig.go new file mode 100644 index 000000000000..5d3a341605f5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/model_sysctlconfig.go @@ -0,0 +1,35 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SysctlConfig struct { + FsAioMaxNr *int64 `json:"fsAioMaxNr,omitempty"` + FsFileMax *int64 `json:"fsFileMax,omitempty"` + FsInotifyMaxUserWatches *int64 `json:"fsInotifyMaxUserWatches,omitempty"` + FsNrOpen *int64 `json:"fsNrOpen,omitempty"` + KernelThreadsMax *int64 `json:"kernelThreadsMax,omitempty"` + NetCoreNetdevMaxBacklog *int64 `json:"netCoreNetdevMaxBacklog,omitempty"` + NetCoreOptmemMax *int64 `json:"netCoreOptmemMax,omitempty"` + NetCoreRmemDefault *int64 `json:"netCoreRmemDefault,omitempty"` + NetCoreRmemMax *int64 `json:"netCoreRmemMax,omitempty"` + NetCoreSomaxconn *int64 `json:"netCoreSomaxconn,omitempty"` + NetCoreWmemDefault *int64 `json:"netCoreWmemDefault,omitempty"` + NetCoreWmemMax *int64 `json:"netCoreWmemMax,omitempty"` + NetIPv4IPLocalPortRange *string `json:"netIpv4IpLocalPortRange,omitempty"` + NetIPv4NeighDefaultGcThresh1 *int64 `json:"netIpv4NeighDefaultGcThresh1,omitempty"` + NetIPv4NeighDefaultGcThresh2 *int64 `json:"netIpv4NeighDefaultGcThresh2,omitempty"` + NetIPv4NeighDefaultGcThresh3 *int64 `json:"netIpv4NeighDefaultGcThresh3,omitempty"` + NetIPv4TcpFinTimeout *int64 `json:"netIpv4TcpFinTimeout,omitempty"` + NetIPv4TcpKeepaliveProbes *int64 `json:"netIpv4TcpKeepaliveProbes,omitempty"` + NetIPv4TcpKeepaliveTime *int64 `json:"netIpv4TcpKeepaliveTime,omitempty"` + NetIPv4TcpMaxSynBacklog *int64 `json:"netIpv4TcpMaxSynBacklog,omitempty"` + NetIPv4TcpMaxTwBuckets *int64 `json:"netIpv4TcpMaxTwBuckets,omitempty"` + NetIPv4TcpTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty"` + NetIPv4TcpkeepaliveIntvl *int64 `json:"netIpv4TcpkeepaliveIntvl,omitempty"` + NetNetfilterNfConntrackBuckets *int64 `json:"netNetfilterNfConntrackBuckets,omitempty"` + NetNetfilterNfConntrackMax *int64 `json:"netNetfilterNfConntrackMax,omitempty"` + VmMaxMapCount *int64 `json:"vmMaxMapCount,omitempty"` + VmSwappiness *int64 `json:"vmSwappiness,omitempty"` + VmVfsCachePressure *int64 `json:"vmVfsCachePressure,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/predicates.go new file mode 100644 index 000000000000..88177af52358 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/predicates.go @@ -0,0 +1,24 @@ +package agentpools + +type AgentPoolOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p AgentPoolOperationPredicate) Matches(input AgentPool) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/version.go new file mode 100644 index 000000000000..f5be7ab60aee --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools/version.go @@ -0,0 +1,12 @@ +package agentpools + +import "fmt" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2022-08-02-preview" + +func userAgent() string { + return fmt.Sprintf("hashicorp/go-azure-sdk/agentpools/%s", defaultApiVersion) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/README.md new file mode 100644 index 000000000000..63c1488921c0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/README.md @@ -0,0 +1,90 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations` Documentation + +The `maintenanceconfigurations` SDK allows for interaction with the Azure Resource Manager Service `containerservice` (API Version `2022-08-02-preview`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations" +``` + + +### Client Initialization + +```go +client := maintenanceconfigurations.NewMaintenanceConfigurationsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `MaintenanceConfigurationsClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := maintenanceconfigurations.NewMaintenanceConfigurationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "configValue") + +payload := maintenanceconfigurations.MaintenanceConfiguration{ + // ... +} + + +read, err := client.CreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `MaintenanceConfigurationsClient.Delete` + +```go +ctx := context.TODO() +id := maintenanceconfigurations.NewMaintenanceConfigurationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "configValue") + +read, err := client.Delete(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `MaintenanceConfigurationsClient.Get` + +```go +ctx := context.TODO() +id := maintenanceconfigurations.NewMaintenanceConfigurationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "configValue") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `MaintenanceConfigurationsClient.ListByManagedCluster` + +```go +ctx := context.TODO() +id := maintenanceconfigurations.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +// alternatively `client.ListByManagedCluster(ctx, id)` can be used to do batched pagination +items, err := client.ListByManagedClusterComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/client.go new file mode 100644 index 000000000000..c955dc01ae80 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/client.go @@ -0,0 +1,18 @@ +package maintenanceconfigurations + +import "github.com/Azure/go-autorest/autorest" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MaintenanceConfigurationsClient struct { + Client autorest.Client + baseUri string +} + +func NewMaintenanceConfigurationsClientWithBaseURI(endpoint string) MaintenanceConfigurationsClient { + return MaintenanceConfigurationsClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/constants.go new file mode 100644 index 000000000000..7cc6e211d5c3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/constants.go @@ -0,0 +1,49 @@ +package maintenanceconfigurations + +import "strings" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WeekDay string + +const ( + WeekDayFriday WeekDay = "Friday" + WeekDayMonday WeekDay = "Monday" + WeekDaySaturday WeekDay = "Saturday" + WeekDaySunday WeekDay = "Sunday" + WeekDayThursday WeekDay = "Thursday" + WeekDayTuesday WeekDay = "Tuesday" + WeekDayWednesday WeekDay = "Wednesday" +) + +func PossibleValuesForWeekDay() []string { + return []string{ + string(WeekDayFriday), + string(WeekDayMonday), + string(WeekDaySaturday), + string(WeekDaySunday), + string(WeekDayThursday), + string(WeekDayTuesday), + string(WeekDayWednesday), + } +} + +func parseWeekDay(input string) (*WeekDay, error) { + vals := map[string]WeekDay{ + "friday": WeekDayFriday, + "monday": WeekDayMonday, + "saturday": WeekDaySaturday, + "sunday": WeekDaySunday, + "thursday": WeekDayThursday, + "tuesday": WeekDayTuesday, + "wednesday": WeekDayWednesday, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := WeekDay(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/id_maintenanceconfiguration.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/id_maintenanceconfiguration.go new file mode 100644 index 000000000000..62a222b92b41 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/id_maintenanceconfiguration.go @@ -0,0 +1,137 @@ +package maintenanceconfigurations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = MaintenanceConfigurationId{} + +// MaintenanceConfigurationId is a struct representing the Resource ID for a Maintenance Configuration +type MaintenanceConfigurationId struct { + SubscriptionId string + ResourceGroupName string + ResourceName string + ConfigName string +} + +// NewMaintenanceConfigurationID returns a new MaintenanceConfigurationId struct +func NewMaintenanceConfigurationID(subscriptionId string, resourceGroupName string, resourceName string, configName string) MaintenanceConfigurationId { + return MaintenanceConfigurationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ResourceName: resourceName, + ConfigName: configName, + } +} + +// ParseMaintenanceConfigurationID parses 'input' into a MaintenanceConfigurationId +func ParseMaintenanceConfigurationID(input string) (*MaintenanceConfigurationId, error) { + parser := resourceids.NewParserFromResourceIdType(MaintenanceConfigurationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := MaintenanceConfigurationId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + if id.ConfigName, ok = parsed.Parsed["configName"]; !ok { + return nil, fmt.Errorf("the segment 'configName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseMaintenanceConfigurationIDInsensitively parses 'input' case-insensitively into a MaintenanceConfigurationId +// note: this method should only be used for API response data and not user input +func ParseMaintenanceConfigurationIDInsensitively(input string) (*MaintenanceConfigurationId, error) { + parser := resourceids.NewParserFromResourceIdType(MaintenanceConfigurationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := MaintenanceConfigurationId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + if id.ConfigName, ok = parsed.Parsed["configName"]; !ok { + return nil, fmt.Errorf("the segment 'configName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateMaintenanceConfigurationID checks that 'input' can be parsed as a Maintenance Configuration ID +func ValidateMaintenanceConfigurationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseMaintenanceConfigurationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Maintenance Configuration ID +func (id MaintenanceConfigurationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/maintenanceConfigurations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ResourceName, id.ConfigName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Maintenance Configuration ID +func (id MaintenanceConfigurationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("resourceName", "resourceValue"), + resourceids.StaticSegment("staticMaintenanceConfigurations", "maintenanceConfigurations", "maintenanceConfigurations"), + resourceids.UserSpecifiedSegment("configName", "configValue"), + } +} + +// String returns a human-readable description of this Maintenance Configuration ID +func (id MaintenanceConfigurationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Resource Name: %q", id.ResourceName), + fmt.Sprintf("Config Name: %q", id.ConfigName), + } + return fmt.Sprintf("Maintenance Configuration (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/id_managedcluster.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/id_managedcluster.go new file mode 100644 index 000000000000..7830f95a390c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/id_managedcluster.go @@ -0,0 +1,124 @@ +package maintenanceconfigurations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = ManagedClusterId{} + +// ManagedClusterId is a struct representing the Resource ID for a Managed Cluster +type ManagedClusterId struct { + SubscriptionId string + ResourceGroupName string + ResourceName string +} + +// NewManagedClusterID returns a new ManagedClusterId struct +func NewManagedClusterID(subscriptionId string, resourceGroupName string, resourceName string) ManagedClusterId { + return ManagedClusterId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ResourceName: resourceName, + } +} + +// ParseManagedClusterID parses 'input' into a ManagedClusterId +func ParseManagedClusterID(input string) (*ManagedClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(ManagedClusterId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ManagedClusterId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseManagedClusterIDInsensitively parses 'input' case-insensitively into a ManagedClusterId +// note: this method should only be used for API response data and not user input +func ParseManagedClusterIDInsensitively(input string) (*ManagedClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(ManagedClusterId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ManagedClusterId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateManagedClusterID checks that 'input' can be parsed as a Managed Cluster ID +func ValidateManagedClusterID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseManagedClusterID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Managed Cluster ID +func (id ManagedClusterId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ResourceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Managed Cluster ID +func (id ManagedClusterId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("resourceName", "resourceValue"), + } +} + +// String returns a human-readable description of this Managed Cluster ID +func (id ManagedClusterId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Resource Name: %q", id.ResourceName), + } + return fmt.Sprintf("Managed Cluster (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_createorupdate_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_createorupdate_autorest.go new file mode 100644 index 000000000000..57eb1a03ef4b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_createorupdate_autorest.go @@ -0,0 +1,69 @@ +package maintenanceconfigurations + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + Model *MaintenanceConfiguration +} + +// CreateOrUpdate ... +func (c MaintenanceConfigurationsClient) CreateOrUpdate(ctx context.Context, id MaintenanceConfigurationId, input MaintenanceConfiguration) (result CreateOrUpdateOperationResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForCreateOrUpdate(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "CreateOrUpdate", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c MaintenanceConfigurationsClient) preparerForCreateOrUpdate(ctx context.Context, id MaintenanceConfigurationId, input MaintenanceConfiguration) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForCreateOrUpdate handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (c MaintenanceConfigurationsClient) responderForCreateOrUpdate(resp *http.Response) (result CreateOrUpdateOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_delete_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_delete_autorest.go new file mode 100644 index 000000000000..27bc6b8c8559 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_delete_autorest.go @@ -0,0 +1,66 @@ +package maintenanceconfigurations + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + HttpResponse *http.Response +} + +// Delete ... +func (c MaintenanceConfigurationsClient) Delete(ctx context.Context, id MaintenanceConfigurationId) (result DeleteOperationResponse, err error) { + req, err := c.preparerForDelete(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "Delete", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForDelete(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "Delete", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForDelete prepares the Delete request. +func (c MaintenanceConfigurationsClient) preparerForDelete(ctx context.Context, id MaintenanceConfigurationId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForDelete handles the response to the Delete request. The method always +// closes the http.Response Body. +func (c MaintenanceConfigurationsClient) responderForDelete(resp *http.Response) (result DeleteOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_get_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_get_autorest.go new file mode 100644 index 000000000000..57a6eb55e01d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_get_autorest.go @@ -0,0 +1,68 @@ +package maintenanceconfigurations + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + Model *MaintenanceConfiguration +} + +// Get ... +func (c MaintenanceConfigurationsClient) Get(ctx context.Context, id MaintenanceConfigurationId) (result GetOperationResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c MaintenanceConfigurationsClient) preparerForGet(ctx context.Context, id MaintenanceConfigurationId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c MaintenanceConfigurationsClient) responderForGet(resp *http.Response) (result GetOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_listbymanagedcluster_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_listbymanagedcluster_autorest.go new file mode 100644 index 000000000000..5fa46cf3abbe --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/method_listbymanagedcluster_autorest.go @@ -0,0 +1,186 @@ +package maintenanceconfigurations + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByManagedClusterOperationResponse struct { + HttpResponse *http.Response + Model *[]MaintenanceConfiguration + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListByManagedClusterOperationResponse, error) +} + +type ListByManagedClusterCompleteResult struct { + Items []MaintenanceConfiguration +} + +func (r ListByManagedClusterOperationResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListByManagedClusterOperationResponse) LoadMore(ctx context.Context) (resp ListByManagedClusterOperationResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// ListByManagedCluster ... +func (c MaintenanceConfigurationsClient) ListByManagedCluster(ctx context.Context, id ManagedClusterId) (resp ListByManagedClusterOperationResponse, err error) { + req, err := c.preparerForListByManagedCluster(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "ListByManagedCluster", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "ListByManagedCluster", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListByManagedCluster(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "ListByManagedCluster", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// preparerForListByManagedCluster prepares the ListByManagedCluster request. +func (c MaintenanceConfigurationsClient) preparerForListByManagedCluster(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/maintenanceConfigurations", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListByManagedClusterWithNextLink prepares the ListByManagedCluster request with the given nextLink token. +func (c MaintenanceConfigurationsClient) preparerForListByManagedClusterWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListByManagedCluster handles the response to the ListByManagedCluster request. The method always +// closes the http.Response Body. +func (c MaintenanceConfigurationsClient) responderForListByManagedCluster(resp *http.Response) (result ListByManagedClusterOperationResponse, err error) { + type page struct { + Values []MaintenanceConfiguration `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListByManagedClusterOperationResponse, err error) { + req, err := c.preparerForListByManagedClusterWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "ListByManagedCluster", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "ListByManagedCluster", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListByManagedCluster(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "maintenanceconfigurations.MaintenanceConfigurationsClient", "ListByManagedCluster", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} + +// ListByManagedClusterComplete retrieves all of the results into a single object +func (c MaintenanceConfigurationsClient) ListByManagedClusterComplete(ctx context.Context, id ManagedClusterId) (ListByManagedClusterCompleteResult, error) { + return c.ListByManagedClusterCompleteMatchingPredicate(ctx, id, MaintenanceConfigurationOperationPredicate{}) +} + +// ListByManagedClusterCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c MaintenanceConfigurationsClient) ListByManagedClusterCompleteMatchingPredicate(ctx context.Context, id ManagedClusterId, predicate MaintenanceConfigurationOperationPredicate) (resp ListByManagedClusterCompleteResult, err error) { + items := make([]MaintenanceConfiguration, 0) + + page, err := c.ListByManagedCluster(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListByManagedClusterCompleteResult{ + Items: items, + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_maintenanceconfiguration.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_maintenanceconfiguration.go new file mode 100644 index 000000000000..9066e53bff37 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_maintenanceconfiguration.go @@ -0,0 +1,16 @@ +package maintenanceconfigurations + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MaintenanceConfiguration struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *MaintenanceConfigurationProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_maintenanceconfigurationproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_maintenanceconfigurationproperties.go new file mode 100644 index 000000000000..f51d69177104 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_maintenanceconfigurationproperties.go @@ -0,0 +1,9 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MaintenanceConfigurationProperties struct { + NotAllowedTime *[]TimeSpan `json:"notAllowedTime,omitempty"` + TimeInWeek *[]TimeInWeek `json:"timeInWeek,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_timeinweek.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_timeinweek.go new file mode 100644 index 000000000000..cba259f76fce --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_timeinweek.go @@ -0,0 +1,9 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TimeInWeek struct { + Day *WeekDay `json:"day,omitempty"` + HourSlots *[]int64 `json:"hourSlots,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_timespan.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_timespan.go new file mode 100644 index 000000000000..7bbc8c5ba728 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/model_timespan.go @@ -0,0 +1,39 @@ +package maintenanceconfigurations + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TimeSpan struct { + End *string `json:"end,omitempty"` + Start *string `json:"start,omitempty"` +} + +func (o *TimeSpan) GetEndAsTime() (*time.Time, error) { + if o.End == nil { + return nil, nil + } + return dates.ParseAsFormat(o.End, "2006-01-02T15:04:05Z07:00") +} + +func (o *TimeSpan) SetEndAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.End = &formatted +} + +func (o *TimeSpan) GetStartAsTime() (*time.Time, error) { + if o.Start == nil { + return nil, nil + } + return dates.ParseAsFormat(o.Start, "2006-01-02T15:04:05Z07:00") +} + +func (o *TimeSpan) SetStartAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.Start = &formatted +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/predicates.go new file mode 100644 index 000000000000..80dc7478f4c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/predicates.go @@ -0,0 +1,24 @@ +package maintenanceconfigurations + +type MaintenanceConfigurationOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p MaintenanceConfigurationOperationPredicate) Matches(input MaintenanceConfiguration) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/version.go new file mode 100644 index 000000000000..dc6a97525403 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations/version.go @@ -0,0 +1,12 @@ +package maintenanceconfigurations + +import "fmt" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2022-08-02-preview" + +func userAgent() string { + return fmt.Sprintf("hashicorp/go-azure-sdk/maintenanceconfigurations/%s", defaultApiVersion) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/README.md b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/README.md new file mode 100644 index 000000000000..a72d4f0fec31 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/README.md @@ -0,0 +1,360 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters` Documentation + +The `managedclusters` SDK allows for interaction with the Azure Resource Manager Service `containerservice` (API Version `2022-08-02-preview`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters" +``` + + +### Client Initialization + +```go +client := managedclusters.NewManagedClustersClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `ManagedClustersClient.AbortLatestOperation` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +read, err := client.AbortLatestOperation(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +payload := managedclusters.ManagedCluster{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.Delete` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +if err := client.DeleteThenPoll(ctx, id, managedclusters.DefaultDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.Get` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.GetAccessProfile` + +```go +ctx := context.TODO() +id := managedclusters.NewAccessProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "roleValue") + +read, err := client.GetAccessProfile(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.GetCommandResult` + +```go +ctx := context.TODO() +id := managedclusters.NewCommandResultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue", "commandIdValue") + +read, err := client.GetCommandResult(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.GetOSOptions` + +```go +ctx := context.TODO() +id := managedclusters.NewLocationID("12345678-1234-9876-4563-123456789012", "locationValue") + +read, err := client.GetOSOptions(ctx, id, managedclusters.DefaultGetOSOptionsOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.GetUpgradeProfile` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +read, err := client.GetUpgradeProfile(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.List` + +```go +ctx := context.TODO() +id := managedclusters.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.List(ctx, id)` can be used to do batched pagination +items, err := client.ListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ManagedClustersClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := managedclusters.NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ManagedClustersClient.ListClusterAdminCredentials` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +read, err := client.ListClusterAdminCredentials(ctx, id, managedclusters.DefaultListClusterAdminCredentialsOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.ListClusterMonitoringUserCredentials` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +read, err := client.ListClusterMonitoringUserCredentials(ctx, id, managedclusters.DefaultListClusterMonitoringUserCredentialsOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.ListClusterUserCredentials` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +read, err := client.ListClusterUserCredentials(ctx, id, managedclusters.DefaultListClusterUserCredentialsOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.ListOutboundNetworkDependenciesEndpoints` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +// alternatively `client.ListOutboundNetworkDependenciesEndpoints(ctx, id)` can be used to do batched pagination +items, err := client.ListOutboundNetworkDependenciesEndpointsComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ManagedClustersClient.ResetAADProfile` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +payload := managedclusters.ManagedClusterAADProfile{ + // ... +} + + +if err := client.ResetAADProfileThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.ResetServicePrincipalProfile` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +payload := managedclusters.ManagedClusterServicePrincipalProfile{ + // ... +} + + +if err := client.ResetServicePrincipalProfileThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.RotateClusterCertificates` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +if err := client.RotateClusterCertificatesThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.RotateServiceAccountSigningKeys` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +if err := client.RotateServiceAccountSigningKeysThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.RunCommand` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +payload := managedclusters.RunCommandRequest{ + // ... +} + + +if err := client.RunCommandThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.Start` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +if err := client.StartThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.Stop` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +if err := client.StopThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.UpdateTags` + +```go +ctx := context.TODO() +id := managedclusters.NewManagedClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "resourceValue") + +payload := managedclusters.TagsObject{ + // ... +} + + +if err := client.UpdateTagsThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/client.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/client.go new file mode 100644 index 000000000000..df94ecc4199e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/client.go @@ -0,0 +1,18 @@ +package managedclusters + +import "github.com/Azure/go-autorest/autorest" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClustersClient struct { + Client autorest.Client + baseUri string +} + +func NewManagedClustersClientWithBaseURI(endpoint string) ManagedClustersClient { + return ManagedClustersClient{ + Client: autorest.NewClientWithUserAgent(userAgent()), + baseUri: endpoint, + } +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/constants.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/constants.go new file mode 100644 index 000000000000..c84d7cc20bb7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/constants.go @@ -0,0 +1,1012 @@ +package managedclusters + +import "strings" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolMode string + +const ( + AgentPoolModeSystem AgentPoolMode = "System" + AgentPoolModeUser AgentPoolMode = "User" +) + +func PossibleValuesForAgentPoolMode() []string { + return []string{ + string(AgentPoolModeSystem), + string(AgentPoolModeUser), + } +} + +func parseAgentPoolMode(input string) (*AgentPoolMode, error) { + vals := map[string]AgentPoolMode{ + "system": AgentPoolModeSystem, + "user": AgentPoolModeUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AgentPoolMode(input) + return &out, nil +} + +type AgentPoolType string + +const ( + AgentPoolTypeAvailabilitySet AgentPoolType = "AvailabilitySet" + AgentPoolTypeVirtualMachineScaleSets AgentPoolType = "VirtualMachineScaleSets" +) + +func PossibleValuesForAgentPoolType() []string { + return []string{ + string(AgentPoolTypeAvailabilitySet), + string(AgentPoolTypeVirtualMachineScaleSets), + } +} + +func parseAgentPoolType(input string) (*AgentPoolType, error) { + vals := map[string]AgentPoolType{ + "availabilityset": AgentPoolTypeAvailabilitySet, + "virtualmachinescalesets": AgentPoolTypeVirtualMachineScaleSets, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AgentPoolType(input) + return &out, nil +} + +type BackendPoolType string + +const ( + BackendPoolTypeNodeIP BackendPoolType = "NodeIP" + BackendPoolTypeNodeIPConfiguration BackendPoolType = "NodeIPConfiguration" +) + +func PossibleValuesForBackendPoolType() []string { + return []string{ + string(BackendPoolTypeNodeIP), + string(BackendPoolTypeNodeIPConfiguration), + } +} + +func parseBackendPoolType(input string) (*BackendPoolType, error) { + vals := map[string]BackendPoolType{ + "nodeip": BackendPoolTypeNodeIP, + "nodeipconfiguration": BackendPoolTypeNodeIPConfiguration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackendPoolType(input) + return &out, nil +} + +type Code string + +const ( + CodeRunning Code = "Running" + CodeStopped Code = "Stopped" +) + +func PossibleValuesForCode() []string { + return []string{ + string(CodeRunning), + string(CodeStopped), + } +} + +func parseCode(input string) (*Code, error) { + vals := map[string]Code{ + "running": CodeRunning, + "stopped": CodeStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Code(input) + return &out, nil +} + +type ControlledValues string + +const ( + ControlledValuesRequestsAndLimits ControlledValues = "RequestsAndLimits" + ControlledValuesRequestsOnly ControlledValues = "RequestsOnly" +) + +func PossibleValuesForControlledValues() []string { + return []string{ + string(ControlledValuesRequestsAndLimits), + string(ControlledValuesRequestsOnly), + } +} + +func parseControlledValues(input string) (*ControlledValues, error) { + vals := map[string]ControlledValues{ + "requestsandlimits": ControlledValuesRequestsAndLimits, + "requestsonly": ControlledValuesRequestsOnly, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ControlledValues(input) + return &out, nil +} + +type Expander string + +const ( + ExpanderLeastNegativewaste Expander = "least-waste" + ExpanderMostNegativepods Expander = "most-pods" + ExpanderPriority Expander = "priority" + ExpanderRandom Expander = "random" +) + +func PossibleValuesForExpander() []string { + return []string{ + string(ExpanderLeastNegativewaste), + string(ExpanderMostNegativepods), + string(ExpanderPriority), + string(ExpanderRandom), + } +} + +func parseExpander(input string) (*Expander, error) { + vals := map[string]Expander{ + "least-waste": ExpanderLeastNegativewaste, + "most-pods": ExpanderMostNegativepods, + "priority": ExpanderPriority, + "random": ExpanderRandom, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Expander(input) + return &out, nil +} + +type Format string + +const ( + FormatAzure Format = "azure" + FormatExec Format = "exec" +) + +func PossibleValuesForFormat() []string { + return []string{ + string(FormatAzure), + string(FormatExec), + } +} + +func parseFormat(input string) (*Format, error) { + vals := map[string]Format{ + "azure": FormatAzure, + "exec": FormatExec, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Format(input) + return &out, nil +} + +type GPUInstanceProfile string + +const ( + GPUInstanceProfileMIGFourg GPUInstanceProfile = "MIG4g" + GPUInstanceProfileMIGOneg GPUInstanceProfile = "MIG1g" + GPUInstanceProfileMIGSeveng GPUInstanceProfile = "MIG7g" + GPUInstanceProfileMIGThreeg GPUInstanceProfile = "MIG3g" + GPUInstanceProfileMIGTwog GPUInstanceProfile = "MIG2g" +) + +func PossibleValuesForGPUInstanceProfile() []string { + return []string{ + string(GPUInstanceProfileMIGFourg), + string(GPUInstanceProfileMIGOneg), + string(GPUInstanceProfileMIGSeveng), + string(GPUInstanceProfileMIGThreeg), + string(GPUInstanceProfileMIGTwog), + } +} + +func parseGPUInstanceProfile(input string) (*GPUInstanceProfile, error) { + vals := map[string]GPUInstanceProfile{ + "mig4g": GPUInstanceProfileMIGFourg, + "mig1g": GPUInstanceProfileMIGOneg, + "mig7g": GPUInstanceProfileMIGSeveng, + "mig3g": GPUInstanceProfileMIGThreeg, + "mig2g": GPUInstanceProfileMIGTwog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := GPUInstanceProfile(input) + return &out, nil +} + +type IPFamily string + +const ( + IPFamilyIPvFour IPFamily = "IPv4" + IPFamilyIPvSix IPFamily = "IPv6" +) + +func PossibleValuesForIPFamily() []string { + return []string{ + string(IPFamilyIPvFour), + string(IPFamilyIPvSix), + } +} + +func parseIPFamily(input string) (*IPFamily, error) { + vals := map[string]IPFamily{ + "ipv4": IPFamilyIPvFour, + "ipv6": IPFamilyIPvSix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := IPFamily(input) + return &out, nil +} + +type IPvsScheduler string + +const ( + IPvsSchedulerLeastConnection IPvsScheduler = "LeastConnection" + IPvsSchedulerRoundRobin IPvsScheduler = "RoundRobin" +) + +func PossibleValuesForIPvsScheduler() []string { + return []string{ + string(IPvsSchedulerLeastConnection), + string(IPvsSchedulerRoundRobin), + } +} + +func parseIPvsScheduler(input string) (*IPvsScheduler, error) { + vals := map[string]IPvsScheduler{ + "leastconnection": IPvsSchedulerLeastConnection, + "roundrobin": IPvsSchedulerRoundRobin, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := IPvsScheduler(input) + return &out, nil +} + +type KeyVaultNetworkAccessTypes string + +const ( + KeyVaultNetworkAccessTypesPrivate KeyVaultNetworkAccessTypes = "Private" + KeyVaultNetworkAccessTypesPublic KeyVaultNetworkAccessTypes = "Public" +) + +func PossibleValuesForKeyVaultNetworkAccessTypes() []string { + return []string{ + string(KeyVaultNetworkAccessTypesPrivate), + string(KeyVaultNetworkAccessTypesPublic), + } +} + +func parseKeyVaultNetworkAccessTypes(input string) (*KeyVaultNetworkAccessTypes, error) { + vals := map[string]KeyVaultNetworkAccessTypes{ + "private": KeyVaultNetworkAccessTypesPrivate, + "public": KeyVaultNetworkAccessTypesPublic, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KeyVaultNetworkAccessTypes(input) + return &out, nil +} + +type KubeletDiskType string + +const ( + KubeletDiskTypeOS KubeletDiskType = "OS" + KubeletDiskTypeTemporary KubeletDiskType = "Temporary" +) + +func PossibleValuesForKubeletDiskType() []string { + return []string{ + string(KubeletDiskTypeOS), + string(KubeletDiskTypeTemporary), + } +} + +func parseKubeletDiskType(input string) (*KubeletDiskType, error) { + vals := map[string]KubeletDiskType{ + "os": KubeletDiskTypeOS, + "temporary": KubeletDiskTypeTemporary, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KubeletDiskType(input) + return &out, nil +} + +type Level string + +const ( + LevelEnforcement Level = "Enforcement" + LevelOff Level = "Off" + LevelWarning Level = "Warning" +) + +func PossibleValuesForLevel() []string { + return []string{ + string(LevelEnforcement), + string(LevelOff), + string(LevelWarning), + } +} + +func parseLevel(input string) (*Level, error) { + vals := map[string]Level{ + "enforcement": LevelEnforcement, + "off": LevelOff, + "warning": LevelWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Level(input) + return &out, nil +} + +type LicenseType string + +const ( + LicenseTypeNone LicenseType = "None" + LicenseTypeWindowsServer LicenseType = "Windows_Server" +) + +func PossibleValuesForLicenseType() []string { + return []string{ + string(LicenseTypeNone), + string(LicenseTypeWindowsServer), + } +} + +func parseLicenseType(input string) (*LicenseType, error) { + vals := map[string]LicenseType{ + "none": LicenseTypeNone, + "windows_server": LicenseTypeWindowsServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LicenseType(input) + return &out, nil +} + +type LoadBalancerSku string + +const ( + LoadBalancerSkuBasic LoadBalancerSku = "basic" + LoadBalancerSkuStandard LoadBalancerSku = "standard" +) + +func PossibleValuesForLoadBalancerSku() []string { + return []string{ + string(LoadBalancerSkuBasic), + string(LoadBalancerSkuStandard), + } +} + +func parseLoadBalancerSku(input string) (*LoadBalancerSku, error) { + vals := map[string]LoadBalancerSku{ + "basic": LoadBalancerSkuBasic, + "standard": LoadBalancerSkuStandard, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoadBalancerSku(input) + return &out, nil +} + +type ManagedClusterPodIdentityProvisioningState string + +const ( + ManagedClusterPodIdentityProvisioningStateAssigned ManagedClusterPodIdentityProvisioningState = "Assigned" + ManagedClusterPodIdentityProvisioningStateDeleting ManagedClusterPodIdentityProvisioningState = "Deleting" + ManagedClusterPodIdentityProvisioningStateFailed ManagedClusterPodIdentityProvisioningState = "Failed" + ManagedClusterPodIdentityProvisioningStateUpdating ManagedClusterPodIdentityProvisioningState = "Updating" +) + +func PossibleValuesForManagedClusterPodIdentityProvisioningState() []string { + return []string{ + string(ManagedClusterPodIdentityProvisioningStateAssigned), + string(ManagedClusterPodIdentityProvisioningStateDeleting), + string(ManagedClusterPodIdentityProvisioningStateFailed), + string(ManagedClusterPodIdentityProvisioningStateUpdating), + } +} + +func parseManagedClusterPodIdentityProvisioningState(input string) (*ManagedClusterPodIdentityProvisioningState, error) { + vals := map[string]ManagedClusterPodIdentityProvisioningState{ + "assigned": ManagedClusterPodIdentityProvisioningStateAssigned, + "deleting": ManagedClusterPodIdentityProvisioningStateDeleting, + "failed": ManagedClusterPodIdentityProvisioningStateFailed, + "updating": ManagedClusterPodIdentityProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ManagedClusterPodIdentityProvisioningState(input) + return &out, nil +} + +type ManagedClusterSKUName string + +const ( + ManagedClusterSKUNameBasic ManagedClusterSKUName = "Basic" +) + +func PossibleValuesForManagedClusterSKUName() []string { + return []string{ + string(ManagedClusterSKUNameBasic), + } +} + +func parseManagedClusterSKUName(input string) (*ManagedClusterSKUName, error) { + vals := map[string]ManagedClusterSKUName{ + "basic": ManagedClusterSKUNameBasic, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ManagedClusterSKUName(input) + return &out, nil +} + +type ManagedClusterSKUTier string + +const ( + ManagedClusterSKUTierFree ManagedClusterSKUTier = "Free" + ManagedClusterSKUTierPaid ManagedClusterSKUTier = "Paid" +) + +func PossibleValuesForManagedClusterSKUTier() []string { + return []string{ + string(ManagedClusterSKUTierFree), + string(ManagedClusterSKUTierPaid), + } +} + +func parseManagedClusterSKUTier(input string) (*ManagedClusterSKUTier, error) { + vals := map[string]ManagedClusterSKUTier{ + "free": ManagedClusterSKUTierFree, + "paid": ManagedClusterSKUTierPaid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ManagedClusterSKUTier(input) + return &out, nil +} + +type Mode string + +const ( + ModeIPTABLES Mode = "IPTABLES" + ModeIPVS Mode = "IPVS" +) + +func PossibleValuesForMode() []string { + return []string{ + string(ModeIPTABLES), + string(ModeIPVS), + } +} + +func parseMode(input string) (*Mode, error) { + vals := map[string]Mode{ + "iptables": ModeIPTABLES, + "ipvs": ModeIPVS, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Mode(input) + return &out, nil +} + +type NetworkMode string + +const ( + NetworkModeBridge NetworkMode = "bridge" + NetworkModeTransparent NetworkMode = "transparent" +) + +func PossibleValuesForNetworkMode() []string { + return []string{ + string(NetworkModeBridge), + string(NetworkModeTransparent), + } +} + +func parseNetworkMode(input string) (*NetworkMode, error) { + vals := map[string]NetworkMode{ + "bridge": NetworkModeBridge, + "transparent": NetworkModeTransparent, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkMode(input) + return &out, nil +} + +type NetworkPlugin string + +const ( + NetworkPluginAzure NetworkPlugin = "azure" + NetworkPluginKubenet NetworkPlugin = "kubenet" + NetworkPluginNone NetworkPlugin = "none" +) + +func PossibleValuesForNetworkPlugin() []string { + return []string{ + string(NetworkPluginAzure), + string(NetworkPluginKubenet), + string(NetworkPluginNone), + } +} + +func parseNetworkPlugin(input string) (*NetworkPlugin, error) { + vals := map[string]NetworkPlugin{ + "azure": NetworkPluginAzure, + "kubenet": NetworkPluginKubenet, + "none": NetworkPluginNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkPlugin(input) + return &out, nil +} + +type NetworkPluginMode string + +const ( + NetworkPluginModeOverlay NetworkPluginMode = "Overlay" +) + +func PossibleValuesForNetworkPluginMode() []string { + return []string{ + string(NetworkPluginModeOverlay), + } +} + +func parseNetworkPluginMode(input string) (*NetworkPluginMode, error) { + vals := map[string]NetworkPluginMode{ + "overlay": NetworkPluginModeOverlay, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkPluginMode(input) + return &out, nil +} + +type NetworkPolicy string + +const ( + NetworkPolicyAzure NetworkPolicy = "azure" + NetworkPolicyCalico NetworkPolicy = "calico" +) + +func PossibleValuesForNetworkPolicy() []string { + return []string{ + string(NetworkPolicyAzure), + string(NetworkPolicyCalico), + } +} + +func parseNetworkPolicy(input string) (*NetworkPolicy, error) { + vals := map[string]NetworkPolicy{ + "azure": NetworkPolicyAzure, + "calico": NetworkPolicyCalico, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkPolicy(input) + return &out, nil +} + +type OSDiskType string + +const ( + OSDiskTypeEphemeral OSDiskType = "Ephemeral" + OSDiskTypeManaged OSDiskType = "Managed" +) + +func PossibleValuesForOSDiskType() []string { + return []string{ + string(OSDiskTypeEphemeral), + string(OSDiskTypeManaged), + } +} + +func parseOSDiskType(input string) (*OSDiskType, error) { + vals := map[string]OSDiskType{ + "ephemeral": OSDiskTypeEphemeral, + "managed": OSDiskTypeManaged, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSDiskType(input) + return &out, nil +} + +type OSSKU string + +const ( + OSSKUCBLMariner OSSKU = "CBLMariner" + OSSKUMariner OSSKU = "Mariner" + OSSKUUbuntu OSSKU = "Ubuntu" + OSSKUWindowsTwoZeroOneNine OSSKU = "Windows2019" + OSSKUWindowsTwoZeroTwoTwo OSSKU = "Windows2022" +) + +func PossibleValuesForOSSKU() []string { + return []string{ + string(OSSKUCBLMariner), + string(OSSKUMariner), + string(OSSKUUbuntu), + string(OSSKUWindowsTwoZeroOneNine), + string(OSSKUWindowsTwoZeroTwoTwo), + } +} + +func parseOSSKU(input string) (*OSSKU, error) { + vals := map[string]OSSKU{ + "cblmariner": OSSKUCBLMariner, + "mariner": OSSKUMariner, + "ubuntu": OSSKUUbuntu, + "windows2019": OSSKUWindowsTwoZeroOneNine, + "windows2022": OSSKUWindowsTwoZeroTwoTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSSKU(input) + return &out, nil +} + +type OSType string + +const ( + OSTypeLinux OSType = "Linux" + OSTypeWindows OSType = "Windows" +) + +func PossibleValuesForOSType() []string { + return []string{ + string(OSTypeLinux), + string(OSTypeWindows), + } +} + +func parseOSType(input string) (*OSType, error) { + vals := map[string]OSType{ + "linux": OSTypeLinux, + "windows": OSTypeWindows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSType(input) + return &out, nil +} + +type OutboundType string + +const ( + OutboundTypeLoadBalancer OutboundType = "loadBalancer" + OutboundTypeManagedNATGateway OutboundType = "managedNATGateway" + OutboundTypeUserAssignedNATGateway OutboundType = "userAssignedNATGateway" + OutboundTypeUserDefinedRouting OutboundType = "userDefinedRouting" +) + +func PossibleValuesForOutboundType() []string { + return []string{ + string(OutboundTypeLoadBalancer), + string(OutboundTypeManagedNATGateway), + string(OutboundTypeUserAssignedNATGateway), + string(OutboundTypeUserDefinedRouting), + } +} + +func parseOutboundType(input string) (*OutboundType, error) { + vals := map[string]OutboundType{ + "loadbalancer": OutboundTypeLoadBalancer, + "managednatgateway": OutboundTypeManagedNATGateway, + "userassignednatgateway": OutboundTypeUserAssignedNATGateway, + "userdefinedrouting": OutboundTypeUserDefinedRouting, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OutboundType(input) + return &out, nil +} + +type PublicNetworkAccess string + +const ( + PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled" + PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" + PublicNetworkAccessSecuredByPerimeter PublicNetworkAccess = "SecuredByPerimeter" +) + +func PossibleValuesForPublicNetworkAccess() []string { + return []string{ + string(PublicNetworkAccessDisabled), + string(PublicNetworkAccessEnabled), + string(PublicNetworkAccessSecuredByPerimeter), + } +} + +func parsePublicNetworkAccess(input string) (*PublicNetworkAccess, error) { + vals := map[string]PublicNetworkAccess{ + "disabled": PublicNetworkAccessDisabled, + "enabled": PublicNetworkAccessEnabled, + "securedbyperimeter": PublicNetworkAccessSecuredByPerimeter, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PublicNetworkAccess(input) + return &out, nil +} + +type ScaleDownMode string + +const ( + ScaleDownModeDeallocate ScaleDownMode = "Deallocate" + ScaleDownModeDelete ScaleDownMode = "Delete" +) + +func PossibleValuesForScaleDownMode() []string { + return []string{ + string(ScaleDownModeDeallocate), + string(ScaleDownModeDelete), + } +} + +func parseScaleDownMode(input string) (*ScaleDownMode, error) { + vals := map[string]ScaleDownMode{ + "deallocate": ScaleDownModeDeallocate, + "delete": ScaleDownModeDelete, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleDownMode(input) + return &out, nil +} + +type ScaleSetEvictionPolicy string + +const ( + ScaleSetEvictionPolicyDeallocate ScaleSetEvictionPolicy = "Deallocate" + ScaleSetEvictionPolicyDelete ScaleSetEvictionPolicy = "Delete" +) + +func PossibleValuesForScaleSetEvictionPolicy() []string { + return []string{ + string(ScaleSetEvictionPolicyDeallocate), + string(ScaleSetEvictionPolicyDelete), + } +} + +func parseScaleSetEvictionPolicy(input string) (*ScaleSetEvictionPolicy, error) { + vals := map[string]ScaleSetEvictionPolicy{ + "deallocate": ScaleSetEvictionPolicyDeallocate, + "delete": ScaleSetEvictionPolicyDelete, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleSetEvictionPolicy(input) + return &out, nil +} + +type ScaleSetPriority string + +const ( + ScaleSetPriorityRegular ScaleSetPriority = "Regular" + ScaleSetPrioritySpot ScaleSetPriority = "Spot" +) + +func PossibleValuesForScaleSetPriority() []string { + return []string{ + string(ScaleSetPriorityRegular), + string(ScaleSetPrioritySpot), + } +} + +func parseScaleSetPriority(input string) (*ScaleSetPriority, error) { + vals := map[string]ScaleSetPriority{ + "regular": ScaleSetPriorityRegular, + "spot": ScaleSetPrioritySpot, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleSetPriority(input) + return &out, nil +} + +type UpdateMode string + +const ( + UpdateModeAuto UpdateMode = "Auto" + UpdateModeInitial UpdateMode = "Initial" + UpdateModeOff UpdateMode = "Off" + UpdateModeRecreate UpdateMode = "Recreate" +) + +func PossibleValuesForUpdateMode() []string { + return []string{ + string(UpdateModeAuto), + string(UpdateModeInitial), + string(UpdateModeOff), + string(UpdateModeRecreate), + } +} + +func parseUpdateMode(input string) (*UpdateMode, error) { + vals := map[string]UpdateMode{ + "auto": UpdateModeAuto, + "initial": UpdateModeInitial, + "off": UpdateModeOff, + "recreate": UpdateModeRecreate, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateMode(input) + return &out, nil +} + +type UpgradeChannel string + +const ( + UpgradeChannelNodeNegativeimage UpgradeChannel = "node-image" + UpgradeChannelNone UpgradeChannel = "none" + UpgradeChannelPatch UpgradeChannel = "patch" + UpgradeChannelRapid UpgradeChannel = "rapid" + UpgradeChannelStable UpgradeChannel = "stable" +) + +func PossibleValuesForUpgradeChannel() []string { + return []string{ + string(UpgradeChannelNodeNegativeimage), + string(UpgradeChannelNone), + string(UpgradeChannelPatch), + string(UpgradeChannelRapid), + string(UpgradeChannelStable), + } +} + +func parseUpgradeChannel(input string) (*UpgradeChannel, error) { + vals := map[string]UpgradeChannel{ + "node-image": UpgradeChannelNodeNegativeimage, + "none": UpgradeChannelNone, + "patch": UpgradeChannelPatch, + "rapid": UpgradeChannelRapid, + "stable": UpgradeChannelStable, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpgradeChannel(input) + return &out, nil +} + +type WorkloadRuntime string + +const ( + WorkloadRuntimeOCIContainer WorkloadRuntime = "OCIContainer" + WorkloadRuntimeWasmWasi WorkloadRuntime = "WasmWasi" +) + +func PossibleValuesForWorkloadRuntime() []string { + return []string{ + string(WorkloadRuntimeOCIContainer), + string(WorkloadRuntimeWasmWasi), + } +} + +func parseWorkloadRuntime(input string) (*WorkloadRuntime, error) { + vals := map[string]WorkloadRuntime{ + "ocicontainer": WorkloadRuntimeOCIContainer, + "wasmwasi": WorkloadRuntimeWasmWasi, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := WorkloadRuntime(input) + return &out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_accessprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_accessprofile.go new file mode 100644 index 000000000000..f0566b57711f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_accessprofile.go @@ -0,0 +1,137 @@ +package managedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = AccessProfileId{} + +// AccessProfileId is a struct representing the Resource ID for a Access Profile +type AccessProfileId struct { + SubscriptionId string + ResourceGroupName string + ResourceName string + RoleName string +} + +// NewAccessProfileID returns a new AccessProfileId struct +func NewAccessProfileID(subscriptionId string, resourceGroupName string, resourceName string, roleName string) AccessProfileId { + return AccessProfileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ResourceName: resourceName, + RoleName: roleName, + } +} + +// ParseAccessProfileID parses 'input' into a AccessProfileId +func ParseAccessProfileID(input string) (*AccessProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(AccessProfileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := AccessProfileId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + if id.RoleName, ok = parsed.Parsed["roleName"]; !ok { + return nil, fmt.Errorf("the segment 'roleName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseAccessProfileIDInsensitively parses 'input' case-insensitively into a AccessProfileId +// note: this method should only be used for API response data and not user input +func ParseAccessProfileIDInsensitively(input string) (*AccessProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(AccessProfileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := AccessProfileId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + if id.RoleName, ok = parsed.Parsed["roleName"]; !ok { + return nil, fmt.Errorf("the segment 'roleName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateAccessProfileID checks that 'input' can be parsed as a Access Profile ID +func ValidateAccessProfileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseAccessProfileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Access Profile ID +func (id AccessProfileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/accessProfiles/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ResourceName, id.RoleName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Access Profile ID +func (id AccessProfileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("resourceName", "resourceValue"), + resourceids.StaticSegment("staticAccessProfiles", "accessProfiles", "accessProfiles"), + resourceids.UserSpecifiedSegment("roleName", "roleValue"), + } +} + +// String returns a human-readable description of this Access Profile ID +func (id AccessProfileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Resource Name: %q", id.ResourceName), + fmt.Sprintf("Role Name: %q", id.RoleName), + } + return fmt.Sprintf("Access Profile (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_commandresult.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_commandresult.go new file mode 100644 index 000000000000..c70b4c10ed9c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_commandresult.go @@ -0,0 +1,137 @@ +package managedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = CommandResultId{} + +// CommandResultId is a struct representing the Resource ID for a Command Result +type CommandResultId struct { + SubscriptionId string + ResourceGroupName string + ResourceName string + CommandId string +} + +// NewCommandResultID returns a new CommandResultId struct +func NewCommandResultID(subscriptionId string, resourceGroupName string, resourceName string, commandId string) CommandResultId { + return CommandResultId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ResourceName: resourceName, + CommandId: commandId, + } +} + +// ParseCommandResultID parses 'input' into a CommandResultId +func ParseCommandResultID(input string) (*CommandResultId, error) { + parser := resourceids.NewParserFromResourceIdType(CommandResultId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := CommandResultId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + if id.CommandId, ok = parsed.Parsed["commandId"]; !ok { + return nil, fmt.Errorf("the segment 'commandId' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseCommandResultIDInsensitively parses 'input' case-insensitively into a CommandResultId +// note: this method should only be used for API response data and not user input +func ParseCommandResultIDInsensitively(input string) (*CommandResultId, error) { + parser := resourceids.NewParserFromResourceIdType(CommandResultId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := CommandResultId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + if id.CommandId, ok = parsed.Parsed["commandId"]; !ok { + return nil, fmt.Errorf("the segment 'commandId' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateCommandResultID checks that 'input' can be parsed as a Command Result ID +func ValidateCommandResultID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseCommandResultID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Command Result ID +func (id CommandResultId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/commandResults/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ResourceName, id.CommandId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Command Result ID +func (id CommandResultId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("resourceName", "resourceValue"), + resourceids.StaticSegment("staticCommandResults", "commandResults", "commandResults"), + resourceids.UserSpecifiedSegment("commandId", "commandIdValue"), + } +} + +// String returns a human-readable description of this Command Result ID +func (id CommandResultId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Resource Name: %q", id.ResourceName), + fmt.Sprintf("Command: %q", id.CommandId), + } + return fmt.Sprintf("Command Result (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_location.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_location.go new file mode 100644 index 000000000000..63ae042c8a87 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_location.go @@ -0,0 +1,111 @@ +package managedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = LocationId{} + +// LocationId is a struct representing the Resource ID for a Location +type LocationId struct { + SubscriptionId string + Location string +} + +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, location string) LocationId { + return LocationId{ + SubscriptionId: subscriptionId, + Location: location, + } +} + +// ParseLocationID parses 'input' into a LocationId +func ParseLocationID(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := LocationId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.Location, ok = parsed.Parsed["location"]; !ok { + return nil, fmt.Errorf("the segment 'location' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input +func ParseLocationIDInsensitively(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(LocationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := LocationId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.Location, ok = parsed.Parsed["location"]; !ok { + return nil, fmt.Errorf("the segment 'location' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.ContainerService/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.Location) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("location", "locationValue"), + } +} + +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location: %q", id.Location), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_managedcluster.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_managedcluster.go new file mode 100644 index 000000000000..524f6fc5fe70 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/id_managedcluster.go @@ -0,0 +1,124 @@ +package managedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +var _ resourceids.ResourceId = ManagedClusterId{} + +// ManagedClusterId is a struct representing the Resource ID for a Managed Cluster +type ManagedClusterId struct { + SubscriptionId string + ResourceGroupName string + ResourceName string +} + +// NewManagedClusterID returns a new ManagedClusterId struct +func NewManagedClusterID(subscriptionId string, resourceGroupName string, resourceName string) ManagedClusterId { + return ManagedClusterId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ResourceName: resourceName, + } +} + +// ParseManagedClusterID parses 'input' into a ManagedClusterId +func ParseManagedClusterID(input string) (*ManagedClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(ManagedClusterId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ManagedClusterId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ParseManagedClusterIDInsensitively parses 'input' case-insensitively into a ManagedClusterId +// note: this method should only be used for API response data and not user input +func ParseManagedClusterIDInsensitively(input string) (*ManagedClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(ManagedClusterId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + var ok bool + id := ManagedClusterId{} + + if id.SubscriptionId, ok = parsed.Parsed["subscriptionId"]; !ok { + return nil, fmt.Errorf("the segment 'subscriptionId' was not found in the resource id %q", input) + } + + if id.ResourceGroupName, ok = parsed.Parsed["resourceGroupName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceGroupName' was not found in the resource id %q", input) + } + + if id.ResourceName, ok = parsed.Parsed["resourceName"]; !ok { + return nil, fmt.Errorf("the segment 'resourceName' was not found in the resource id %q", input) + } + + return &id, nil +} + +// ValidateManagedClusterID checks that 'input' can be parsed as a Managed Cluster ID +func ValidateManagedClusterID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseManagedClusterID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Managed Cluster ID +func (id ManagedClusterId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ResourceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Managed Cluster ID +func (id ManagedClusterId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("resourceName", "resourceValue"), + } +} + +// String returns a human-readable description of this Managed Cluster ID +func (id ManagedClusterId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Resource Name: %q", id.ResourceName), + } + return fmt.Sprintf("Managed Cluster (%s)", strings.Join(components, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_abortlatestoperation_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_abortlatestoperation_autorest.go new file mode 100644 index 000000000000..f68d76fa2e01 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_abortlatestoperation_autorest.go @@ -0,0 +1,67 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AbortLatestOperationOperationResponse struct { + HttpResponse *http.Response +} + +// AbortLatestOperation ... +func (c ManagedClustersClient) AbortLatestOperation(ctx context.Context, id ManagedClusterId) (result AbortLatestOperationOperationResponse, err error) { + req, err := c.preparerForAbortLatestOperation(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "AbortLatestOperation", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "AbortLatestOperation", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForAbortLatestOperation(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "AbortLatestOperation", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForAbortLatestOperation prepares the AbortLatestOperation request. +func (c ManagedClustersClient) preparerForAbortLatestOperation(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/abort", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForAbortLatestOperation handles the response to the AbortLatestOperation request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForAbortLatestOperation(resp *http.Response) (result AbortLatestOperationOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusNoContent), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_createorupdate_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_createorupdate_autorest.go new file mode 100644 index 000000000000..3bf60ea59122 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_createorupdate_autorest.go @@ -0,0 +1,79 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// CreateOrUpdate ... +func (c ManagedClustersClient) CreateOrUpdate(ctx context.Context, id ManagedClusterId, input ManagedCluster) (result CreateOrUpdateOperationResponse, err error) { + req, err := c.preparerForCreateOrUpdate(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = c.senderForCreateOrUpdate(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "CreateOrUpdate", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c ManagedClustersClient) CreateOrUpdateThenPoll(ctx context.Context, id ManagedClusterId, input ManagedCluster) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} + +// preparerForCreateOrUpdate prepares the CreateOrUpdate request. +func (c ManagedClustersClient) preparerForCreateOrUpdate(ctx context.Context, id ManagedClusterId, input ManagedCluster) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForCreateOrUpdate sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForCreateOrUpdate(ctx context.Context, req *http.Request) (future CreateOrUpdateOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_delete_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_delete_autorest.go new file mode 100644 index 000000000000..0255e77f2611 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_delete_autorest.go @@ -0,0 +1,107 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +type DeleteOperationOptions struct { + IgnorePodDisruptionBudget *bool +} + +func DefaultDeleteOperationOptions() DeleteOperationOptions { + return DeleteOperationOptions{} +} + +func (o DeleteOperationOptions) toHeaders() map[string]interface{} { + out := make(map[string]interface{}) + + return out +} + +func (o DeleteOperationOptions) toQueryString() map[string]interface{} { + out := make(map[string]interface{}) + + if o.IgnorePodDisruptionBudget != nil { + out["ignore-pod-disruption-budget"] = *o.IgnorePodDisruptionBudget + } + + return out +} + +// Delete ... +func (c ManagedClustersClient) Delete(ctx context.Context, id ManagedClusterId, options DeleteOperationOptions) (result DeleteOperationResponse, err error) { + req, err := c.preparerForDelete(ctx, id, options) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = c.senderForDelete(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "Delete", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c ManagedClustersClient) DeleteThenPoll(ctx context.Context, id ManagedClusterId, options DeleteOperationOptions) error { + result, err := c.Delete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} + +// preparerForDelete prepares the Delete request. +func (c ManagedClustersClient) preparerForDelete(ctx context.Context, id ManagedClusterId, options DeleteOperationOptions) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + for k, v := range options.toQueryString() { + queryParameters[k] = autorest.Encode("query", v) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsDelete(), + autorest.WithBaseURL(c.baseUri), + autorest.WithHeaders(options.toHeaders()), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForDelete sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForDelete(ctx context.Context, req *http.Request) (future DeleteOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_get_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_get_autorest.go new file mode 100644 index 000000000000..c5e08ae898de --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_get_autorest.go @@ -0,0 +1,68 @@ +package managedclusters + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + Model *ManagedCluster +} + +// Get ... +func (c ManagedClustersClient) Get(ctx context.Context, id ManagedClusterId) (result GetOperationResponse, err error) { + req, err := c.preparerForGet(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "Get", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "Get", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGet(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "Get", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGet prepares the Get request. +func (c ManagedClustersClient) preparerForGet(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGet handles the response to the Get request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForGet(resp *http.Response) (result GetOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getaccessprofile_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getaccessprofile_autorest.go new file mode 100644 index 000000000000..82b7df70d16e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getaccessprofile_autorest.go @@ -0,0 +1,69 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetAccessProfileOperationResponse struct { + HttpResponse *http.Response + Model *ManagedClusterAccessProfile +} + +// GetAccessProfile ... +func (c ManagedClustersClient) GetAccessProfile(ctx context.Context, id AccessProfileId) (result GetAccessProfileOperationResponse, err error) { + req, err := c.preparerForGetAccessProfile(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetAccessProfile", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetAccessProfile", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGetAccessProfile(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetAccessProfile", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGetAccessProfile prepares the GetAccessProfile request. +func (c ManagedClustersClient) preparerForGetAccessProfile(ctx context.Context, id AccessProfileId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/listCredential", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGetAccessProfile handles the response to the GetAccessProfile request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForGetAccessProfile(resp *http.Response) (result GetAccessProfileOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getcommandresult_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getcommandresult_autorest.go new file mode 100644 index 000000000000..b5d9f095c1eb --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getcommandresult_autorest.go @@ -0,0 +1,68 @@ +package managedclusters + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetCommandResultOperationResponse struct { + HttpResponse *http.Response + Model *RunCommandResult +} + +// GetCommandResult ... +func (c ManagedClustersClient) GetCommandResult(ctx context.Context, id CommandResultId) (result GetCommandResultOperationResponse, err error) { + req, err := c.preparerForGetCommandResult(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetCommandResult", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetCommandResult", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGetCommandResult(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetCommandResult", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGetCommandResult prepares the GetCommandResult request. +func (c ManagedClustersClient) preparerForGetCommandResult(ctx context.Context, id CommandResultId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGetCommandResult handles the response to the GetCommandResult request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForGetCommandResult(resp *http.Response) (result GetCommandResultOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getosoptions_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getosoptions_autorest.go new file mode 100644 index 000000000000..59f01e6d53f8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getosoptions_autorest.go @@ -0,0 +1,98 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOSOptionsOperationResponse struct { + HttpResponse *http.Response + Model *OSOptionProfile +} + +type GetOSOptionsOperationOptions struct { + ResourceType *string +} + +func DefaultGetOSOptionsOperationOptions() GetOSOptionsOperationOptions { + return GetOSOptionsOperationOptions{} +} + +func (o GetOSOptionsOperationOptions) toHeaders() map[string]interface{} { + out := make(map[string]interface{}) + + return out +} + +func (o GetOSOptionsOperationOptions) toQueryString() map[string]interface{} { + out := make(map[string]interface{}) + + if o.ResourceType != nil { + out["resource-type"] = *o.ResourceType + } + + return out +} + +// GetOSOptions ... +func (c ManagedClustersClient) GetOSOptions(ctx context.Context, id LocationId, options GetOSOptionsOperationOptions) (result GetOSOptionsOperationResponse, err error) { + req, err := c.preparerForGetOSOptions(ctx, id, options) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetOSOptions", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetOSOptions", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGetOSOptions(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetOSOptions", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGetOSOptions prepares the GetOSOptions request. +func (c ManagedClustersClient) preparerForGetOSOptions(ctx context.Context, id LocationId, options GetOSOptionsOperationOptions) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + for k, v := range options.toQueryString() { + queryParameters[k] = autorest.Encode("query", v) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithHeaders(options.toHeaders()), + autorest.WithPath(fmt.Sprintf("%s/osOptions/default", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGetOSOptions handles the response to the GetOSOptions request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForGetOSOptions(resp *http.Response) (result GetOSOptionsOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getupgradeprofile_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getupgradeprofile_autorest.go new file mode 100644 index 000000000000..cb70f49aa028 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_getupgradeprofile_autorest.go @@ -0,0 +1,69 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUpgradeProfileOperationResponse struct { + HttpResponse *http.Response + Model *ManagedClusterUpgradeProfile +} + +// GetUpgradeProfile ... +func (c ManagedClustersClient) GetUpgradeProfile(ctx context.Context, id ManagedClusterId) (result GetUpgradeProfileOperationResponse, err error) { + req, err := c.preparerForGetUpgradeProfile(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetUpgradeProfile", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetUpgradeProfile", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForGetUpgradeProfile(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "GetUpgradeProfile", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForGetUpgradeProfile prepares the GetUpgradeProfile request. +func (c ManagedClustersClient) preparerForGetUpgradeProfile(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/upgradeProfiles/default", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForGetUpgradeProfile handles the response to the GetUpgradeProfile request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForGetUpgradeProfile(resp *http.Response) (result GetUpgradeProfileOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_list_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_list_autorest.go new file mode 100644 index 000000000000..871409d1963c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_list_autorest.go @@ -0,0 +1,187 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + Model *[]ManagedCluster + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListOperationResponse, error) +} + +type ListCompleteResult struct { + Items []ManagedCluster +} + +func (r ListOperationResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListOperationResponse) LoadMore(ctx context.Context) (resp ListOperationResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// List ... +func (c ManagedClustersClient) List(ctx context.Context, id commonids.SubscriptionId) (resp ListOperationResponse, err error) { + req, err := c.preparerForList(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "List", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "List", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForList(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "List", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// preparerForList prepares the List request. +func (c ManagedClustersClient) preparerForList(ctx context.Context, id commonids.SubscriptionId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.ContainerService/managedClusters", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListWithNextLink prepares the List request with the given nextLink token. +func (c ManagedClustersClient) preparerForListWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForList handles the response to the List request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForList(resp *http.Response) (result ListOperationResponse, err error) { + type page struct { + Values []ManagedCluster `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListOperationResponse, err error) { + req, err := c.preparerForListWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "List", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "List", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForList(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "List", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} + +// ListComplete retrieves all of the results into a single object +func (c ManagedClustersClient) ListComplete(ctx context.Context, id commonids.SubscriptionId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, ManagedClusterOperationPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c ManagedClustersClient) ListCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate ManagedClusterOperationPredicate) (resp ListCompleteResult, err error) { + items := make([]ManagedCluster, 0) + + page, err := c.List(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListCompleteResult{ + Items: items, + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listbyresourcegroup_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listbyresourcegroup_autorest.go new file mode 100644 index 000000000000..bc961d43758e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listbyresourcegroup_autorest.go @@ -0,0 +1,187 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + Model *[]ManagedCluster + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListByResourceGroupOperationResponse, error) +} + +type ListByResourceGroupCompleteResult struct { + Items []ManagedCluster +} + +func (r ListByResourceGroupOperationResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListByResourceGroupOperationResponse) LoadMore(ctx context.Context) (resp ListByResourceGroupOperationResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// ListByResourceGroup ... +func (c ManagedClustersClient) ListByResourceGroup(ctx context.Context, id commonids.ResourceGroupId) (resp ListByResourceGroupOperationResponse, err error) { + req, err := c.preparerForListByResourceGroup(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListByResourceGroup", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListByResourceGroup(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListByResourceGroup", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// preparerForListByResourceGroup prepares the ListByResourceGroup request. +func (c ManagedClustersClient) preparerForListByResourceGroup(ctx context.Context, id commonids.ResourceGroupId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/providers/Microsoft.ContainerService/managedClusters", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListByResourceGroupWithNextLink prepares the ListByResourceGroup request with the given nextLink token. +func (c ManagedClustersClient) preparerForListByResourceGroupWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListByResourceGroup handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForListByResourceGroup(resp *http.Response) (result ListByResourceGroupOperationResponse, err error) { + type page struct { + Values []ManagedCluster `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListByResourceGroupOperationResponse, err error) { + req, err := c.preparerForListByResourceGroupWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListByResourceGroup", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListByResourceGroup(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListByResourceGroup", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} + +// ListByResourceGroupComplete retrieves all of the results into a single object +func (c ManagedClustersClient) ListByResourceGroupComplete(ctx context.Context, id commonids.ResourceGroupId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, ManagedClusterOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c ManagedClustersClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id commonids.ResourceGroupId, predicate ManagedClusterOperationPredicate) (resp ListByResourceGroupCompleteResult, err error) { + items := make([]ManagedCluster, 0) + + page, err := c.ListByResourceGroup(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListByResourceGroupCompleteResult{ + Items: items, + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listclusteradmincredentials_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listclusteradmincredentials_autorest.go new file mode 100644 index 000000000000..e14b90ceb4e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listclusteradmincredentials_autorest.go @@ -0,0 +1,98 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListClusterAdminCredentialsOperationResponse struct { + HttpResponse *http.Response + Model *CredentialResults +} + +type ListClusterAdminCredentialsOperationOptions struct { + ServerFqdn *string +} + +func DefaultListClusterAdminCredentialsOperationOptions() ListClusterAdminCredentialsOperationOptions { + return ListClusterAdminCredentialsOperationOptions{} +} + +func (o ListClusterAdminCredentialsOperationOptions) toHeaders() map[string]interface{} { + out := make(map[string]interface{}) + + return out +} + +func (o ListClusterAdminCredentialsOperationOptions) toQueryString() map[string]interface{} { + out := make(map[string]interface{}) + + if o.ServerFqdn != nil { + out["server-fqdn"] = *o.ServerFqdn + } + + return out +} + +// ListClusterAdminCredentials ... +func (c ManagedClustersClient) ListClusterAdminCredentials(ctx context.Context, id ManagedClusterId, options ListClusterAdminCredentialsOperationOptions) (result ListClusterAdminCredentialsOperationResponse, err error) { + req, err := c.preparerForListClusterAdminCredentials(ctx, id, options) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListClusterAdminCredentials", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListClusterAdminCredentials", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListClusterAdminCredentials(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListClusterAdminCredentials", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForListClusterAdminCredentials prepares the ListClusterAdminCredentials request. +func (c ManagedClustersClient) preparerForListClusterAdminCredentials(ctx context.Context, id ManagedClusterId, options ListClusterAdminCredentialsOperationOptions) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + for k, v := range options.toQueryString() { + queryParameters[k] = autorest.Encode("query", v) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithHeaders(options.toHeaders()), + autorest.WithPath(fmt.Sprintf("%s/listClusterAdminCredential", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListClusterAdminCredentials handles the response to the ListClusterAdminCredentials request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForListClusterAdminCredentials(resp *http.Response) (result ListClusterAdminCredentialsOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listclustermonitoringusercredentials_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listclustermonitoringusercredentials_autorest.go new file mode 100644 index 000000000000..6065e73fd64c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listclustermonitoringusercredentials_autorest.go @@ -0,0 +1,98 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListClusterMonitoringUserCredentialsOperationResponse struct { + HttpResponse *http.Response + Model *CredentialResults +} + +type ListClusterMonitoringUserCredentialsOperationOptions struct { + ServerFqdn *string +} + +func DefaultListClusterMonitoringUserCredentialsOperationOptions() ListClusterMonitoringUserCredentialsOperationOptions { + return ListClusterMonitoringUserCredentialsOperationOptions{} +} + +func (o ListClusterMonitoringUserCredentialsOperationOptions) toHeaders() map[string]interface{} { + out := make(map[string]interface{}) + + return out +} + +func (o ListClusterMonitoringUserCredentialsOperationOptions) toQueryString() map[string]interface{} { + out := make(map[string]interface{}) + + if o.ServerFqdn != nil { + out["server-fqdn"] = *o.ServerFqdn + } + + return out +} + +// ListClusterMonitoringUserCredentials ... +func (c ManagedClustersClient) ListClusterMonitoringUserCredentials(ctx context.Context, id ManagedClusterId, options ListClusterMonitoringUserCredentialsOperationOptions) (result ListClusterMonitoringUserCredentialsOperationResponse, err error) { + req, err := c.preparerForListClusterMonitoringUserCredentials(ctx, id, options) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListClusterMonitoringUserCredentials", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListClusterMonitoringUserCredentials", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListClusterMonitoringUserCredentials(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListClusterMonitoringUserCredentials", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForListClusterMonitoringUserCredentials prepares the ListClusterMonitoringUserCredentials request. +func (c ManagedClustersClient) preparerForListClusterMonitoringUserCredentials(ctx context.Context, id ManagedClusterId, options ListClusterMonitoringUserCredentialsOperationOptions) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + for k, v := range options.toQueryString() { + queryParameters[k] = autorest.Encode("query", v) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithHeaders(options.toHeaders()), + autorest.WithPath(fmt.Sprintf("%s/listClusterMonitoringUserCredential", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListClusterMonitoringUserCredentials handles the response to the ListClusterMonitoringUserCredentials request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForListClusterMonitoringUserCredentials(resp *http.Response) (result ListClusterMonitoringUserCredentialsOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listclusterusercredentials_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listclusterusercredentials_autorest.go new file mode 100644 index 000000000000..2cd263f9b47e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listclusterusercredentials_autorest.go @@ -0,0 +1,103 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListClusterUserCredentialsOperationResponse struct { + HttpResponse *http.Response + Model *CredentialResults +} + +type ListClusterUserCredentialsOperationOptions struct { + Format *Format + ServerFqdn *string +} + +func DefaultListClusterUserCredentialsOperationOptions() ListClusterUserCredentialsOperationOptions { + return ListClusterUserCredentialsOperationOptions{} +} + +func (o ListClusterUserCredentialsOperationOptions) toHeaders() map[string]interface{} { + out := make(map[string]interface{}) + + return out +} + +func (o ListClusterUserCredentialsOperationOptions) toQueryString() map[string]interface{} { + out := make(map[string]interface{}) + + if o.Format != nil { + out["format"] = *o.Format + } + + if o.ServerFqdn != nil { + out["server-fqdn"] = *o.ServerFqdn + } + + return out +} + +// ListClusterUserCredentials ... +func (c ManagedClustersClient) ListClusterUserCredentials(ctx context.Context, id ManagedClusterId, options ListClusterUserCredentialsOperationOptions) (result ListClusterUserCredentialsOperationResponse, err error) { + req, err := c.preparerForListClusterUserCredentials(ctx, id, options) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListClusterUserCredentials", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListClusterUserCredentials", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListClusterUserCredentials(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListClusterUserCredentials", result.HttpResponse, "Failure responding to request") + return + } + + return +} + +// preparerForListClusterUserCredentials prepares the ListClusterUserCredentials request. +func (c ManagedClustersClient) preparerForListClusterUserCredentials(ctx context.Context, id ManagedClusterId, options ListClusterUserCredentialsOperationOptions) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + for k, v := range options.toQueryString() { + queryParameters[k] = autorest.Encode("query", v) + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithHeaders(options.toHeaders()), + autorest.WithPath(fmt.Sprintf("%s/listClusterUserCredential", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListClusterUserCredentials handles the response to the ListClusterUserCredentials request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForListClusterUserCredentials(resp *http.Response) (result ListClusterUserCredentialsOperationResponse, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Model), + autorest.ByClosing()) + result.HttpResponse = resp + + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listoutboundnetworkdependenciesendpoints_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listoutboundnetworkdependenciesendpoints_autorest.go new file mode 100644 index 000000000000..cdc548f003d0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_listoutboundnetworkdependenciesendpoints_autorest.go @@ -0,0 +1,186 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOutboundNetworkDependenciesEndpointsOperationResponse struct { + HttpResponse *http.Response + Model *[]OutboundEnvironmentEndpoint + + nextLink *string + nextPageFunc func(ctx context.Context, nextLink string) (ListOutboundNetworkDependenciesEndpointsOperationResponse, error) +} + +type ListOutboundNetworkDependenciesEndpointsCompleteResult struct { + Items []OutboundEnvironmentEndpoint +} + +func (r ListOutboundNetworkDependenciesEndpointsOperationResponse) HasMore() bool { + return r.nextLink != nil +} + +func (r ListOutboundNetworkDependenciesEndpointsOperationResponse) LoadMore(ctx context.Context) (resp ListOutboundNetworkDependenciesEndpointsOperationResponse, err error) { + if !r.HasMore() { + err = fmt.Errorf("no more pages returned") + return + } + return r.nextPageFunc(ctx, *r.nextLink) +} + +// ListOutboundNetworkDependenciesEndpoints ... +func (c ManagedClustersClient) ListOutboundNetworkDependenciesEndpoints(ctx context.Context, id ManagedClusterId) (resp ListOutboundNetworkDependenciesEndpointsOperationResponse, err error) { + req, err := c.preparerForListOutboundNetworkDependenciesEndpoints(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", nil, "Failure preparing request") + return + } + + resp.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", resp.HttpResponse, "Failure sending request") + return + } + + resp, err = c.responderForListOutboundNetworkDependenciesEndpoints(resp.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", resp.HttpResponse, "Failure responding to request") + return + } + return +} + +// preparerForListOutboundNetworkDependenciesEndpoints prepares the ListOutboundNetworkDependenciesEndpoints request. +func (c ManagedClustersClient) preparerForListOutboundNetworkDependenciesEndpoints(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/outboundNetworkDependenciesEndpoints", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// preparerForListOutboundNetworkDependenciesEndpointsWithNextLink prepares the ListOutboundNetworkDependenciesEndpoints request with the given nextLink token. +func (c ManagedClustersClient) preparerForListOutboundNetworkDependenciesEndpointsWithNextLink(ctx context.Context, nextLink string) (*http.Request, error) { + uri, err := url.Parse(nextLink) + if err != nil { + return nil, fmt.Errorf("parsing nextLink %q: %+v", nextLink, err) + } + queryParameters := map[string]interface{}{} + for k, v := range uri.Query() { + if len(v) == 0 { + continue + } + val := v[0] + val = autorest.Encode("query", val) + queryParameters[k] = val + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsGet(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(uri.Path), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// responderForListOutboundNetworkDependenciesEndpoints handles the response to the ListOutboundNetworkDependenciesEndpoints request. The method always +// closes the http.Response Body. +func (c ManagedClustersClient) responderForListOutboundNetworkDependenciesEndpoints(resp *http.Response) (result ListOutboundNetworkDependenciesEndpointsOperationResponse, err error) { + type page struct { + Values []OutboundEnvironmentEndpoint `json:"value"` + NextLink *string `json:"nextLink"` + } + var respObj page + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&respObj), + autorest.ByClosing()) + result.HttpResponse = resp + result.Model = &respObj.Values + result.nextLink = respObj.NextLink + if respObj.NextLink != nil { + result.nextPageFunc = func(ctx context.Context, nextLink string) (result ListOutboundNetworkDependenciesEndpointsOperationResponse, err error) { + req, err := c.preparerForListOutboundNetworkDependenciesEndpointsWithNextLink(ctx, nextLink) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", nil, "Failure preparing request") + return + } + + result.HttpResponse, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", result.HttpResponse, "Failure sending request") + return + } + + result, err = c.responderForListOutboundNetworkDependenciesEndpoints(result.HttpResponse) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ListOutboundNetworkDependenciesEndpoints", result.HttpResponse, "Failure responding to request") + return + } + + return + } + } + return +} + +// ListOutboundNetworkDependenciesEndpointsComplete retrieves all of the results into a single object +func (c ManagedClustersClient) ListOutboundNetworkDependenciesEndpointsComplete(ctx context.Context, id ManagedClusterId) (ListOutboundNetworkDependenciesEndpointsCompleteResult, error) { + return c.ListOutboundNetworkDependenciesEndpointsCompleteMatchingPredicate(ctx, id, OutboundEnvironmentEndpointOperationPredicate{}) +} + +// ListOutboundNetworkDependenciesEndpointsCompleteMatchingPredicate retrieves all of the results and then applied the predicate +func (c ManagedClustersClient) ListOutboundNetworkDependenciesEndpointsCompleteMatchingPredicate(ctx context.Context, id ManagedClusterId, predicate OutboundEnvironmentEndpointOperationPredicate) (resp ListOutboundNetworkDependenciesEndpointsCompleteResult, err error) { + items := make([]OutboundEnvironmentEndpoint, 0) + + page, err := c.ListOutboundNetworkDependenciesEndpoints(ctx, id) + if err != nil { + err = fmt.Errorf("loading the initial page: %+v", err) + return + } + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + for page.HasMore() { + page, err = page.LoadMore(ctx) + if err != nil { + err = fmt.Errorf("loading the next page: %+v", err) + return + } + + if page.Model != nil { + for _, v := range *page.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + } + + out := ListOutboundNetworkDependenciesEndpointsCompleteResult{ + Items: items, + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_resetaadprofile_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_resetaadprofile_autorest.go new file mode 100644 index 000000000000..8c3bf221bffb --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_resetaadprofile_autorest.go @@ -0,0 +1,79 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResetAADProfileOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// ResetAADProfile ... +func (c ManagedClustersClient) ResetAADProfile(ctx context.Context, id ManagedClusterId, input ManagedClusterAADProfile) (result ResetAADProfileOperationResponse, err error) { + req, err := c.preparerForResetAADProfile(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ResetAADProfile", nil, "Failure preparing request") + return + } + + result, err = c.senderForResetAADProfile(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ResetAADProfile", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// ResetAADProfileThenPoll performs ResetAADProfile then polls until it's completed +func (c ManagedClustersClient) ResetAADProfileThenPoll(ctx context.Context, id ManagedClusterId, input ManagedClusterAADProfile) error { + result, err := c.ResetAADProfile(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ResetAADProfile: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after ResetAADProfile: %+v", err) + } + + return nil +} + +// preparerForResetAADProfile prepares the ResetAADProfile request. +func (c ManagedClustersClient) preparerForResetAADProfile(ctx context.Context, id ManagedClusterId, input ManagedClusterAADProfile) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/resetAADProfile", id.ID())), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForResetAADProfile sends the ResetAADProfile request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForResetAADProfile(ctx context.Context, req *http.Request) (future ResetAADProfileOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_resetserviceprincipalprofile_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_resetserviceprincipalprofile_autorest.go new file mode 100644 index 000000000000..7830267d6163 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_resetserviceprincipalprofile_autorest.go @@ -0,0 +1,79 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResetServicePrincipalProfileOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// ResetServicePrincipalProfile ... +func (c ManagedClustersClient) ResetServicePrincipalProfile(ctx context.Context, id ManagedClusterId, input ManagedClusterServicePrincipalProfile) (result ResetServicePrincipalProfileOperationResponse, err error) { + req, err := c.preparerForResetServicePrincipalProfile(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ResetServicePrincipalProfile", nil, "Failure preparing request") + return + } + + result, err = c.senderForResetServicePrincipalProfile(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "ResetServicePrincipalProfile", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// ResetServicePrincipalProfileThenPoll performs ResetServicePrincipalProfile then polls until it's completed +func (c ManagedClustersClient) ResetServicePrincipalProfileThenPoll(ctx context.Context, id ManagedClusterId, input ManagedClusterServicePrincipalProfile) error { + result, err := c.ResetServicePrincipalProfile(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ResetServicePrincipalProfile: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after ResetServicePrincipalProfile: %+v", err) + } + + return nil +} + +// preparerForResetServicePrincipalProfile prepares the ResetServicePrincipalProfile request. +func (c ManagedClustersClient) preparerForResetServicePrincipalProfile(ctx context.Context, id ManagedClusterId, input ManagedClusterServicePrincipalProfile) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/resetServicePrincipalProfile", id.ID())), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForResetServicePrincipalProfile sends the ResetServicePrincipalProfile request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForResetServicePrincipalProfile(ctx context.Context, req *http.Request) (future ResetServicePrincipalProfileOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_rotateclustercertificates_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_rotateclustercertificates_autorest.go new file mode 100644 index 000000000000..83b54e351193 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_rotateclustercertificates_autorest.go @@ -0,0 +1,78 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RotateClusterCertificatesOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// RotateClusterCertificates ... +func (c ManagedClustersClient) RotateClusterCertificates(ctx context.Context, id ManagedClusterId) (result RotateClusterCertificatesOperationResponse, err error) { + req, err := c.preparerForRotateClusterCertificates(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "RotateClusterCertificates", nil, "Failure preparing request") + return + } + + result, err = c.senderForRotateClusterCertificates(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "RotateClusterCertificates", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// RotateClusterCertificatesThenPoll performs RotateClusterCertificates then polls until it's completed +func (c ManagedClustersClient) RotateClusterCertificatesThenPoll(ctx context.Context, id ManagedClusterId) error { + result, err := c.RotateClusterCertificates(ctx, id) + if err != nil { + return fmt.Errorf("performing RotateClusterCertificates: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after RotateClusterCertificates: %+v", err) + } + + return nil +} + +// preparerForRotateClusterCertificates prepares the RotateClusterCertificates request. +func (c ManagedClustersClient) preparerForRotateClusterCertificates(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/rotateClusterCertificates", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForRotateClusterCertificates sends the RotateClusterCertificates request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForRotateClusterCertificates(ctx context.Context, req *http.Request) (future RotateClusterCertificatesOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_rotateserviceaccountsigningkeys_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_rotateserviceaccountsigningkeys_autorest.go new file mode 100644 index 000000000000..f5bf5cc8da57 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_rotateserviceaccountsigningkeys_autorest.go @@ -0,0 +1,78 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RotateServiceAccountSigningKeysOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// RotateServiceAccountSigningKeys ... +func (c ManagedClustersClient) RotateServiceAccountSigningKeys(ctx context.Context, id ManagedClusterId) (result RotateServiceAccountSigningKeysOperationResponse, err error) { + req, err := c.preparerForRotateServiceAccountSigningKeys(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "RotateServiceAccountSigningKeys", nil, "Failure preparing request") + return + } + + result, err = c.senderForRotateServiceAccountSigningKeys(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "RotateServiceAccountSigningKeys", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// RotateServiceAccountSigningKeysThenPoll performs RotateServiceAccountSigningKeys then polls until it's completed +func (c ManagedClustersClient) RotateServiceAccountSigningKeysThenPoll(ctx context.Context, id ManagedClusterId) error { + result, err := c.RotateServiceAccountSigningKeys(ctx, id) + if err != nil { + return fmt.Errorf("performing RotateServiceAccountSigningKeys: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after RotateServiceAccountSigningKeys: %+v", err) + } + + return nil +} + +// preparerForRotateServiceAccountSigningKeys prepares the RotateServiceAccountSigningKeys request. +func (c ManagedClustersClient) preparerForRotateServiceAccountSigningKeys(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/rotateServiceAccountSigningKeys", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForRotateServiceAccountSigningKeys sends the RotateServiceAccountSigningKeys request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForRotateServiceAccountSigningKeys(ctx context.Context, req *http.Request) (future RotateServiceAccountSigningKeysOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_runcommand_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_runcommand_autorest.go new file mode 100644 index 000000000000..4f3eecf77593 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_runcommand_autorest.go @@ -0,0 +1,79 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RunCommandOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// RunCommand ... +func (c ManagedClustersClient) RunCommand(ctx context.Context, id ManagedClusterId, input RunCommandRequest) (result RunCommandOperationResponse, err error) { + req, err := c.preparerForRunCommand(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "RunCommand", nil, "Failure preparing request") + return + } + + result, err = c.senderForRunCommand(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "RunCommand", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// RunCommandThenPoll performs RunCommand then polls until it's completed +func (c ManagedClustersClient) RunCommandThenPoll(ctx context.Context, id ManagedClusterId, input RunCommandRequest) error { + result, err := c.RunCommand(ctx, id, input) + if err != nil { + return fmt.Errorf("performing RunCommand: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after RunCommand: %+v", err) + } + + return nil +} + +// preparerForRunCommand prepares the RunCommand request. +func (c ManagedClustersClient) preparerForRunCommand(ctx context.Context, id ManagedClusterId, input RunCommandRequest) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/runCommand", id.ID())), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForRunCommand sends the RunCommand request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForRunCommand(ctx context.Context, req *http.Request) (future RunCommandOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_start_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_start_autorest.go new file mode 100644 index 000000000000..e58f11e31c8a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_start_autorest.go @@ -0,0 +1,78 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// Start ... +func (c ManagedClustersClient) Start(ctx context.Context, id ManagedClusterId) (result StartOperationResponse, err error) { + req, err := c.preparerForStart(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "Start", nil, "Failure preparing request") + return + } + + result, err = c.senderForStart(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "Start", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// StartThenPoll performs Start then polls until it's completed +func (c ManagedClustersClient) StartThenPoll(ctx context.Context, id ManagedClusterId) error { + result, err := c.Start(ctx, id) + if err != nil { + return fmt.Errorf("performing Start: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after Start: %+v", err) + } + + return nil +} + +// preparerForStart prepares the Start request. +func (c ManagedClustersClient) preparerForStart(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/start", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForStart sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForStart(ctx context.Context, req *http.Request) (future StartOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_stop_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_stop_autorest.go new file mode 100644 index 000000000000..1324f9e0ad44 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_stop_autorest.go @@ -0,0 +1,78 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StopOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// Stop ... +func (c ManagedClustersClient) Stop(ctx context.Context, id ManagedClusterId) (result StopOperationResponse, err error) { + req, err := c.preparerForStop(ctx, id) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "Stop", nil, "Failure preparing request") + return + } + + result, err = c.senderForStop(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "Stop", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// StopThenPoll performs Stop then polls until it's completed +func (c ManagedClustersClient) StopThenPoll(ctx context.Context, id ManagedClusterId) error { + result, err := c.Stop(ctx, id) + if err != nil { + return fmt.Errorf("performing Stop: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after Stop: %+v", err) + } + + return nil +} + +// preparerForStop prepares the Stop request. +func (c ManagedClustersClient) preparerForStop(ctx context.Context, id ManagedClusterId) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(fmt.Sprintf("%s/stop", id.ID())), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForStop sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForStop(ctx context.Context, req *http.Request) (future StopOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_updatetags_autorest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_updatetags_autorest.go new file mode 100644 index 000000000000..8786b846f79d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/method_updatetags_autorest.go @@ -0,0 +1,79 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/polling" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateTagsOperationResponse struct { + Poller polling.LongRunningPoller + HttpResponse *http.Response +} + +// UpdateTags ... +func (c ManagedClustersClient) UpdateTags(ctx context.Context, id ManagedClusterId, input TagsObject) (result UpdateTagsOperationResponse, err error) { + req, err := c.preparerForUpdateTags(ctx, id, input) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "UpdateTags", nil, "Failure preparing request") + return + } + + result, err = c.senderForUpdateTags(ctx, req) + if err != nil { + err = autorest.NewErrorWithError(err, "managedclusters.ManagedClustersClient", "UpdateTags", result.HttpResponse, "Failure sending request") + return + } + + return +} + +// UpdateTagsThenPoll performs UpdateTags then polls until it's completed +func (c ManagedClustersClient) UpdateTagsThenPoll(ctx context.Context, id ManagedClusterId, input TagsObject) error { + result, err := c.UpdateTags(ctx, id, input) + if err != nil { + return fmt.Errorf("performing UpdateTags: %+v", err) + } + + if err := result.Poller.PollUntilDone(); err != nil { + return fmt.Errorf("polling after UpdateTags: %+v", err) + } + + return nil +} + +// preparerForUpdateTags prepares the UpdateTags request. +func (c ManagedClustersClient) preparerForUpdateTags(ctx context.Context, id ManagedClusterId, input TagsObject) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": defaultApiVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(c.baseUri), + autorest.WithPath(id.ID()), + autorest.WithJSON(input), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// senderForUpdateTags sends the UpdateTags request. The method will close the +// http.Response Body if it receives an error. +func (c ManagedClustersClient) senderForUpdateTags(ctx context.Context, req *http.Request) (future UpdateTagsOperationResponse, err error) { + var resp *http.Response + resp, err = c.Client.Send(req, azure.DoRetryWithRegistration(c.Client)) + if err != nil { + return + } + + future.Poller, err = polling.NewPollerFromResponse(ctx, resp, c.Client, req.Method) + return +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_accessprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_accessprofile.go new file mode 100644 index 000000000000..c13b64ab25d2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_accessprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccessProfile struct { + KubeConfig *string `json:"kubeConfig,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_agentpoolupgradesettings.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_agentpoolupgradesettings.go new file mode 100644 index 000000000000..461cb4361b0b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_agentpoolupgradesettings.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeSettings struct { + MaxSurge *string `json:"maxSurge,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_agentpoolwindowsprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_agentpoolwindowsprofile.go new file mode 100644 index 000000000000..5558f04ed005 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_agentpoolwindowsprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolWindowsProfile struct { + DisableOutboundNat *bool `json:"disableOutboundNat,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_azurekeyvaultkms.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_azurekeyvaultkms.go new file mode 100644 index 000000000000..57c4e7848f34 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_azurekeyvaultkms.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureKeyVaultKms struct { + Enabled *bool `json:"enabled,omitempty"` + KeyId *string `json:"keyId,omitempty"` + KeyVaultNetworkAccess *KeyVaultNetworkAccessTypes `json:"keyVaultNetworkAccess,omitempty"` + KeyVaultResourceId *string `json:"keyVaultResourceId,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_commandresultproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_commandresultproperties.go new file mode 100644 index 000000000000..745a87ae4ea0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_commandresultproperties.go @@ -0,0 +1,43 @@ +package managedclusters + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandResultProperties struct { + ExitCode *int64 `json:"exitCode,omitempty"` + FinishedAt *string `json:"finishedAt,omitempty"` + Logs *string `json:"logs,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + Reason *string `json:"reason,omitempty"` + StartedAt *string `json:"startedAt,omitempty"` +} + +func (o *CommandResultProperties) GetFinishedAtAsTime() (*time.Time, error) { + if o.FinishedAt == nil { + return nil, nil + } + return dates.ParseAsFormat(o.FinishedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o *CommandResultProperties) SetFinishedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.FinishedAt = &formatted +} + +func (o *CommandResultProperties) GetStartedAtAsTime() (*time.Time, error) { + if o.StartedAt == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o *CommandResultProperties) SetStartedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedAt = &formatted +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicelinuxprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicelinuxprofile.go new file mode 100644 index 000000000000..a3abf14adeb3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicelinuxprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceLinuxProfile struct { + AdminUsername string `json:"adminUsername"` + Ssh ContainerServiceSshConfiguration `json:"ssh"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicenetworkprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicenetworkprofile.go new file mode 100644 index 000000000000..9e69baba5e33 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicenetworkprofile.go @@ -0,0 +1,23 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceNetworkProfile struct { + DnsServiceIP *string `json:"dnsServiceIP,omitempty"` + DockerBridgeCidr *string `json:"dockerBridgeCidr,omitempty"` + IPFamilies *[]IPFamily `json:"ipFamilies,omitempty"` + KubeProxyConfig *ContainerServiceNetworkProfileKubeProxyConfig `json:"kubeProxyConfig,omitempty"` + LoadBalancerProfile *ManagedClusterLoadBalancerProfile `json:"loadBalancerProfile,omitempty"` + LoadBalancerSku *LoadBalancerSku `json:"loadBalancerSku,omitempty"` + NatGatewayProfile *ManagedClusterNATGatewayProfile `json:"natGatewayProfile,omitempty"` + NetworkMode *NetworkMode `json:"networkMode,omitempty"` + NetworkPlugin *NetworkPlugin `json:"networkPlugin,omitempty"` + NetworkPluginMode *NetworkPluginMode `json:"networkPluginMode,omitempty"` + NetworkPolicy *NetworkPolicy `json:"networkPolicy,omitempty"` + OutboundType *OutboundType `json:"outboundType,omitempty"` + PodCidr *string `json:"podCidr,omitempty"` + PodCidrs *[]string `json:"podCidrs,omitempty"` + ServiceCidr *string `json:"serviceCidr,omitempty"` + ServiceCidrs *[]string `json:"serviceCidrs,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicenetworkprofilekubeproxyconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicenetworkprofilekubeproxyconfig.go new file mode 100644 index 000000000000..5be387f7c407 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicenetworkprofilekubeproxyconfig.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceNetworkProfileKubeProxyConfig struct { + Enabled *bool `json:"enabled,omitempty"` + IPvsConfig *ContainerServiceNetworkProfileKubeProxyConfigIPvsConfig `json:"ipvsConfig,omitempty"` + Mode *Mode `json:"mode,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicenetworkprofilekubeproxyconfigipvsconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicenetworkprofilekubeproxyconfigipvsconfig.go new file mode 100644 index 000000000000..96dc155e9d5c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicenetworkprofilekubeproxyconfigipvsconfig.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceNetworkProfileKubeProxyConfigIPvsConfig struct { + Scheduler *IPvsScheduler `json:"scheduler,omitempty"` + TcpFinTimeoutSeconds *int64 `json:"tcpFinTimeoutSeconds,omitempty"` + TcpTimeoutSeconds *int64 `json:"tcpTimeoutSeconds,omitempty"` + UdpTimeoutSeconds *int64 `json:"udpTimeoutSeconds,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicesshconfiguration.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicesshconfiguration.go new file mode 100644 index 000000000000..4bf259bd15d5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicesshconfiguration.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceSshConfiguration struct { + PublicKeys []ContainerServiceSshPublicKey `json:"publicKeys"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicesshpublickey.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicesshpublickey.go new file mode 100644 index 000000000000..7796cdf6a902 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_containerservicesshpublickey.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceSshPublicKey struct { + KeyData string `json:"keyData"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_creationdata.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_creationdata.go new file mode 100644 index 000000000000..f2f19907006a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_creationdata.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreationData struct { + SourceResourceId *string `json:"sourceResourceId,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_credentialresult.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_credentialresult.go new file mode 100644 index 000000000000..7002f56084eb --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_credentialresult.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CredentialResult struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_credentialresults.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_credentialresults.go new file mode 100644 index 000000000000..c572432888e5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_credentialresults.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CredentialResults struct { + Kubeconfigs *[]CredentialResult `json:"kubeconfigs,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_endpointdependency.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_endpointdependency.go new file mode 100644 index 000000000000..ae8639b529bb --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_endpointdependency.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type EndpointDependency struct { + DomainName *string `json:"domainName,omitempty"` + EndpointDetails *[]EndpointDetail `json:"endpointDetails,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_endpointdetail.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_endpointdetail.go new file mode 100644 index 000000000000..de73a2e9168f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_endpointdetail.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type EndpointDetail struct { + Description *string `json:"description,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` + Port *int64 `json:"port,omitempty"` + Protocol *string `json:"protocol,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_guardrailsprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_guardrailsprofile.go new file mode 100644 index 000000000000..6875f54cbbe5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_guardrailsprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GuardrailsProfile struct { + ExcludedNamespaces *[]string `json:"excludedNamespaces,omitempty"` + Level Level `json:"level"` + SystemExcludedNamespaces *[]string `json:"systemExcludedNamespaces,omitempty"` + Version string `json:"version"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_kubeletconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_kubeletconfig.go new file mode 100644 index 000000000000..0f4036b4e7c1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_kubeletconfig.go @@ -0,0 +1,18 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KubeletConfig struct { + AllowedUnsafeSysctls *[]string `json:"allowedUnsafeSysctls,omitempty"` + ContainerLogMaxFiles *int64 `json:"containerLogMaxFiles,omitempty"` + ContainerLogMaxSizeMB *int64 `json:"containerLogMaxSizeMB,omitempty"` + CpuCfsQuota *bool `json:"cpuCfsQuota,omitempty"` + CpuCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty"` + CpuManagerPolicy *string `json:"cpuManagerPolicy,omitempty"` + FailSwapOn *bool `json:"failSwapOn,omitempty"` + ImageGcHighThreshold *int64 `json:"imageGcHighThreshold,omitempty"` + ImageGcLowThreshold *int64 `json:"imageGcLowThreshold,omitempty"` + PodMaxPids *int64 `json:"podMaxPids,omitempty"` + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_linuxosconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_linuxosconfig.go new file mode 100644 index 000000000000..431f6faa2aa6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_linuxosconfig.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type LinuxOSConfig struct { + SwapFileSizeMB *int64 `json:"swapFileSizeMB,omitempty"` + Sysctls *SysctlConfig `json:"sysctls,omitempty"` + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty"` + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedcluster.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedcluster.go new file mode 100644 index 000000000000..96f028c8024d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedcluster.go @@ -0,0 +1,23 @@ +package managedclusters + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/edgezones" + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedCluster struct { + ExtendedLocation *edgezones.Model `json:"extendedLocation,omitempty"` + Id *string `json:"id,omitempty"` + Identity *identity.SystemOrUserAssignedMap `json:"identity,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *ManagedClusterProperties `json:"properties,omitempty"` + Sku *ManagedClusterSKU `json:"sku,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteraadprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteraadprofile.go new file mode 100644 index 000000000000..271addb1fd4d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteraadprofile.go @@ -0,0 +1,14 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAADProfile struct { + AdminGroupObjectIDs *[]string `json:"adminGroupObjectIDs,omitempty"` + ClientAppID *string `json:"clientAppID,omitempty"` + EnableAzureRBAC *bool `json:"enableAzureRBAC,omitempty"` + Managed *bool `json:"managed,omitempty"` + ServerAppID *string `json:"serverAppID,omitempty"` + ServerAppSecret *string `json:"serverAppSecret,omitempty"` + TenantID *string `json:"tenantID,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteraccessprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteraccessprofile.go new file mode 100644 index 000000000000..e248b2a253b1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteraccessprofile.go @@ -0,0 +1,18 @@ +package managedclusters + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAccessProfile struct { + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *AccessProfile `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteraddonprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteraddonprofile.go new file mode 100644 index 000000000000..7dc1f865d9df --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteraddonprofile.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAddonProfile struct { + Config *map[string]string `json:"config,omitempty"` + Enabled bool `json:"enabled"` + Identity *UserAssignedIdentity `json:"identity,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteragentpoolprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteragentpoolprofile.go new file mode 100644 index 000000000000..f52123b5f215 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteragentpoolprofile.go @@ -0,0 +1,53 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAgentPoolProfile struct { + AvailabilityZones *[]string `json:"availabilityZones,omitempty"` + CapacityReservationGroupID *string `json:"capacityReservationGroupID,omitempty"` + Count *int64 `json:"count,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + CurrentOrchestratorVersion *string `json:"currentOrchestratorVersion,omitempty"` + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` + EnableCustomCATrust *bool `json:"enableCustomCATrust,omitempty"` + EnableEncryptionAtHost *bool `json:"enableEncryptionAtHost,omitempty"` + EnableFIPS *bool `json:"enableFIPS,omitempty"` + EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"` + EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` + GpuInstanceProfile *GPUInstanceProfile `json:"gpuInstanceProfile,omitempty"` + HostGroupID *string `json:"hostGroupID,omitempty"` + KubeletConfig *KubeletConfig `json:"kubeletConfig,omitempty"` + KubeletDiskType *KubeletDiskType `json:"kubeletDiskType,omitempty"` + LinuxOSConfig *LinuxOSConfig `json:"linuxOSConfig,omitempty"` + MaxCount *int64 `json:"maxCount,omitempty"` + MaxPods *int64 `json:"maxPods,omitempty"` + MessageOfTheDay *string `json:"messageOfTheDay,omitempty"` + MinCount *int64 `json:"minCount,omitempty"` + Mode *AgentPoolMode `json:"mode,omitempty"` + Name string `json:"name"` + NodeImageVersion *string `json:"nodeImageVersion,omitempty"` + NodeLabels *map[string]string `json:"nodeLabels,omitempty"` + NodePublicIPPrefixID *string `json:"nodePublicIPPrefixID,omitempty"` + NodeTaints *[]string `json:"nodeTaints,omitempty"` + OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` + OsDiskSizeGB *int64 `json:"osDiskSizeGB,omitempty"` + OsDiskType *OSDiskType `json:"osDiskType,omitempty"` + OsSKU *OSSKU `json:"osSKU,omitempty"` + OsType *OSType `json:"osType,omitempty"` + PodSubnetID *string `json:"podSubnetID,omitempty"` + PowerState *PowerState `json:"powerState,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + ProximityPlacementGroupID *string `json:"proximityPlacementGroupID,omitempty"` + ScaleDownMode *ScaleDownMode `json:"scaleDownMode,omitempty"` + ScaleSetEvictionPolicy *ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"` + ScaleSetPriority *ScaleSetPriority `json:"scaleSetPriority,omitempty"` + SpotMaxPrice *float64 `json:"spotMaxPrice,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *AgentPoolType `json:"type,omitempty"` + UpgradeSettings *AgentPoolUpgradeSettings `json:"upgradeSettings,omitempty"` + VmSize *string `json:"vmSize,omitempty"` + VnetSubnetID *string `json:"vnetSubnetID,omitempty"` + WindowsProfile *AgentPoolWindowsProfile `json:"windowsProfile,omitempty"` + WorkloadRuntime *WorkloadRuntime `json:"workloadRuntime,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterapiserveraccessprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterapiserveraccessprofile.go new file mode 100644 index 000000000000..69b53ef85a7d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterapiserveraccessprofile.go @@ -0,0 +1,14 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAPIServerAccessProfile struct { + AuthorizedIPRanges *[]string `json:"authorizedIPRanges,omitempty"` + DisableRunCommand *bool `json:"disableRunCommand,omitempty"` + EnablePrivateCluster *bool `json:"enablePrivateCluster,omitempty"` + EnablePrivateClusterPublicFQDN *bool `json:"enablePrivateClusterPublicFQDN,omitempty"` + EnableVnetIntegration *bool `json:"enableVnetIntegration,omitempty"` + PrivateDNSZone *string `json:"privateDNSZone,omitempty"` + SubnetId *string `json:"subnetId,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterautoupgradeprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterautoupgradeprofile.go new file mode 100644 index 000000000000..9c3598b8b394 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterautoupgradeprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAutoUpgradeProfile struct { + UpgradeChannel *UpgradeChannel `json:"upgradeChannel,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterazuremonitorprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterazuremonitorprofile.go new file mode 100644 index 000000000000..8a18140e45a8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterazuremonitorprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAzureMonitorProfile struct { + Metrics *ManagedClusterAzureMonitorProfileMetrics `json:"metrics,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterazuremonitorprofilekubestatemetrics.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterazuremonitorprofilekubestatemetrics.go new file mode 100644 index 000000000000..ef0fb9066e28 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterazuremonitorprofilekubestatemetrics.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAzureMonitorProfileKubeStateMetrics struct { + MetricAnnotationsAllowList *string `json:"metricAnnotationsAllowList,omitempty"` + MetricLabelsAllowlist *string `json:"metricLabelsAllowlist,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterazuremonitorprofilemetrics.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterazuremonitorprofilemetrics.go new file mode 100644 index 000000000000..5c173441517a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterazuremonitorprofilemetrics.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAzureMonitorProfileMetrics struct { + Enabled bool `json:"enabled"` + KubeStateMetrics *ManagedClusterAzureMonitorProfileKubeStateMetrics `json:"kubeStateMetrics,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterhttpproxyconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterhttpproxyconfig.go new file mode 100644 index 000000000000..b9f467853876 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterhttpproxyconfig.go @@ -0,0 +1,12 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterHTTPProxyConfig struct { + EffectiveNoProxy *[]string `json:"effectiveNoProxy,omitempty"` + HTTPProxy *string `json:"httpProxy,omitempty"` + HTTPSProxy *string `json:"httpsProxy,omitempty"` + NoProxy *[]string `json:"noProxy,omitempty"` + TrustedCa *string `json:"trustedCa,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteringressprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteringressprofile.go new file mode 100644 index 000000000000..ef54c8e80692 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteringressprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterIngressProfile struct { + WebAppRouting *ManagedClusterIngressProfileWebAppRouting `json:"webAppRouting,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteringressprofilewebapprouting.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteringressprofilewebapprouting.go new file mode 100644 index 000000000000..32d89b359714 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteringressprofilewebapprouting.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterIngressProfileWebAppRouting struct { + DnsZoneResourceId *string `json:"dnsZoneResourceId,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofile.go new file mode 100644 index 000000000000..0d2207851bd1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofile.go @@ -0,0 +1,15 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterLoadBalancerProfile struct { + AllocatedOutboundPorts *int64 `json:"allocatedOutboundPorts,omitempty"` + BackendPoolType *BackendPoolType `json:"backendPoolType,omitempty"` + EffectiveOutboundIPs *[]ResourceReference `json:"effectiveOutboundIPs,omitempty"` + EnableMultipleStandardLoadBalancers *bool `json:"enableMultipleStandardLoadBalancers,omitempty"` + IdleTimeoutInMinutes *int64 `json:"idleTimeoutInMinutes,omitempty"` + ManagedOutboundIPs *ManagedClusterLoadBalancerProfileManagedOutboundIPs `json:"managedOutboundIPs,omitempty"` + OutboundIPPrefixes *ManagedClusterLoadBalancerProfileOutboundIPPrefixes `json:"outboundIPPrefixes,omitempty"` + OutboundIPs *ManagedClusterLoadBalancerProfileOutboundIPs `json:"outboundIPs,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofilemanagedoutboundips.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofilemanagedoutboundips.go new file mode 100644 index 000000000000..b8d24680fed6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofilemanagedoutboundips.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterLoadBalancerProfileManagedOutboundIPs struct { + Count *int64 `json:"count,omitempty"` + CountIPv6 *int64 `json:"countIPv6,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofileoutboundipprefixes.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofileoutboundipprefixes.go new file mode 100644 index 000000000000..76893c76d85f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofileoutboundipprefixes.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterLoadBalancerProfileOutboundIPPrefixes struct { + PublicIPPrefixes *[]ResourceReference `json:"publicIPPrefixes,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofileoutboundips.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofileoutboundips.go new file mode 100644 index 000000000000..f0a60f5817e8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterloadbalancerprofileoutboundips.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterLoadBalancerProfileOutboundIPs struct { + PublicIPs *[]ResourceReference `json:"publicIPs,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustermanagedoutboundipprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustermanagedoutboundipprofile.go new file mode 100644 index 000000000000..ec079b6cdf43 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustermanagedoutboundipprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterManagedOutboundIPProfile struct { + Count *int64 `json:"count,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusternatgatewayprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusternatgatewayprofile.go new file mode 100644 index 000000000000..a7e37699c62a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusternatgatewayprofile.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterNATGatewayProfile struct { + EffectiveOutboundIPs *[]ResourceReference `json:"effectiveOutboundIPs,omitempty"` + IdleTimeoutInMinutes *int64 `json:"idleTimeoutInMinutes,omitempty"` + ManagedOutboundIPProfile *ManagedClusterManagedOutboundIPProfile `json:"managedOutboundIPProfile,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteroidcissuerprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteroidcissuerprofile.go new file mode 100644 index 000000000000..b5d3be88f7dc --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusteroidcissuerprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterOIDCIssuerProfile struct { + Enabled *bool `json:"enabled,omitempty"` + IssuerURL *string `json:"issuerURL,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentity.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentity.go new file mode 100644 index 000000000000..3bfb67996454 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentity.go @@ -0,0 +1,13 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentity struct { + BindingSelector *string `json:"bindingSelector,omitempty"` + Identity UserAssignedIdentity `json:"identity"` + Name string `json:"name"` + Namespace string `json:"namespace"` + ProvisioningInfo *ManagedClusterPodIdentityProvisioningInfo `json:"provisioningInfo,omitempty"` + ProvisioningState *ManagedClusterPodIdentityProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityexception.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityexception.go new file mode 100644 index 000000000000..28a710f9b5cc --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityexception.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityException struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + PodLabels map[string]string `json:"podLabels"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprofile.go new file mode 100644 index 000000000000..e32db30028af --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityProfile struct { + AllowNetworkPluginKubenet *bool `json:"allowNetworkPluginKubenet,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + UserAssignedIdentities *[]ManagedClusterPodIdentity `json:"userAssignedIdentities,omitempty"` + UserAssignedIdentityExceptions *[]ManagedClusterPodIdentityException `json:"userAssignedIdentityExceptions,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprovisioningerror.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprovisioningerror.go new file mode 100644 index 000000000000..1fb66d9034f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprovisioningerror.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityProvisioningError struct { + Error *ManagedClusterPodIdentityProvisioningErrorBody `json:"error,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprovisioningerrorbody.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprovisioningerrorbody.go new file mode 100644 index 000000000000..db4466f80bfe --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprovisioningerrorbody.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityProvisioningErrorBody struct { + Code *string `json:"code,omitempty"` + Details *[]ManagedClusterPodIdentityProvisioningErrorBody `json:"details,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprovisioninginfo.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprovisioninginfo.go new file mode 100644 index 000000000000..60464e39c822 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpodidentityprovisioninginfo.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityProvisioningInfo struct { + Error *ManagedClusterPodIdentityProvisioningError `json:"error,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpoolupgradeprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpoolupgradeprofile.go new file mode 100644 index 000000000000..7bafa5df9722 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpoolupgradeprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPoolUpgradeProfile struct { + KubernetesVersion string `json:"kubernetesVersion"` + Name *string `json:"name,omitempty"` + OsType OSType `json:"osType"` + Upgrades *[]ManagedClusterPoolUpgradeProfileUpgradesInlined `json:"upgrades,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpoolupgradeprofileupgradesinlined.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpoolupgradeprofileupgradesinlined.go new file mode 100644 index 000000000000..ccc02dd99577 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpoolupgradeprofileupgradesinlined.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPoolUpgradeProfileUpgradesInlined struct { + IsPreview *bool `json:"isPreview,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterproperties.go new file mode 100644 index 000000000000..b69cdd6fddb5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterproperties.go @@ -0,0 +1,46 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterProperties struct { + AadProfile *ManagedClusterAADProfile `json:"aadProfile,omitempty"` + AddonProfiles *map[string]ManagedClusterAddonProfile `json:"addonProfiles,omitempty"` + AgentPoolProfiles *[]ManagedClusterAgentPoolProfile `json:"agentPoolProfiles,omitempty"` + ApiServerAccessProfile *ManagedClusterAPIServerAccessProfile `json:"apiServerAccessProfile,omitempty"` + AutoScalerProfile *ManagedClusterPropertiesAutoScalerProfile `json:"autoScalerProfile,omitempty"` + AutoUpgradeProfile *ManagedClusterAutoUpgradeProfile `json:"autoUpgradeProfile,omitempty"` + AzureMonitorProfile *ManagedClusterAzureMonitorProfile `json:"azureMonitorProfile,omitempty"` + AzurePortalFQDN *string `json:"azurePortalFQDN,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + CurrentKubernetesVersion *string `json:"currentKubernetesVersion,omitempty"` + DisableLocalAccounts *bool `json:"disableLocalAccounts,omitempty"` + DiskEncryptionSetID *string `json:"diskEncryptionSetID,omitempty"` + DnsPrefix *string `json:"dnsPrefix,omitempty"` + EnableNamespaceResources *bool `json:"enableNamespaceResources,omitempty"` + EnablePodSecurityPolicy *bool `json:"enablePodSecurityPolicy,omitempty"` + EnableRBAC *bool `json:"enableRBAC,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` + FqdnSubdomain *string `json:"fqdnSubdomain,omitempty"` + GuardrailsProfile *GuardrailsProfile `json:"guardrailsProfile,omitempty"` + HTTPProxyConfig *ManagedClusterHTTPProxyConfig `json:"httpProxyConfig,omitempty"` + IdentityProfile *map[string]UserAssignedIdentity `json:"identityProfile,omitempty"` + IngressProfile *ManagedClusterIngressProfile `json:"ingressProfile,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + LinuxProfile *ContainerServiceLinuxProfile `json:"linuxProfile,omitempty"` + MaxAgentPools *int64 `json:"maxAgentPools,omitempty"` + NetworkProfile *ContainerServiceNetworkProfile `json:"networkProfile,omitempty"` + NodeResourceGroup *string `json:"nodeResourceGroup,omitempty"` + OidcIssuerProfile *ManagedClusterOIDCIssuerProfile `json:"oidcIssuerProfile,omitempty"` + PodIdentityProfile *ManagedClusterPodIdentityProfile `json:"podIdentityProfile,omitempty"` + PowerState *PowerState `json:"powerState,omitempty"` + PrivateFQDN *string `json:"privateFQDN,omitempty"` + PrivateLinkResources *[]PrivateLinkResource `json:"privateLinkResources,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + PublicNetworkAccess *PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` + SecurityProfile *ManagedClusterSecurityProfile `json:"securityProfile,omitempty"` + ServicePrincipalProfile *ManagedClusterServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"` + StorageProfile *ManagedClusterStorageProfile `json:"storageProfile,omitempty"` + WindowsProfile *ManagedClusterWindowsProfile `json:"windowsProfile,omitempty"` + WorkloadAutoScalerProfile *ManagedClusterWorkloadAutoScalerProfile `json:"workloadAutoScalerProfile,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpropertiesautoscalerprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpropertiesautoscalerprofile.go new file mode 100644 index 000000000000..17fe396efa81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterpropertiesautoscalerprofile.go @@ -0,0 +1,24 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPropertiesAutoScalerProfile struct { + BalanceSimilarNodeGroups *string `json:"balance-similar-node-groups,omitempty"` + Expander *Expander `json:"expander,omitempty"` + MaxEmptyBulkDelete *string `json:"max-empty-bulk-delete,omitempty"` + MaxGracefulTerminationSec *string `json:"max-graceful-termination-sec,omitempty"` + MaxNodeProvisionTime *string `json:"max-node-provision-time,omitempty"` + MaxTotalUnreadyPercentage *string `json:"max-total-unready-percentage,omitempty"` + NewPodScaleUpDelay *string `json:"new-pod-scale-up-delay,omitempty"` + OkTotalUnreadyCount *string `json:"ok-total-unready-count,omitempty"` + ScaleDownDelayAfterAdd *string `json:"scale-down-delay-after-add,omitempty"` + ScaleDownDelayAfterDelete *string `json:"scale-down-delay-after-delete,omitempty"` + ScaleDownDelayAfterFailure *string `json:"scale-down-delay-after-failure,omitempty"` + ScaleDownUnneededTime *string `json:"scale-down-unneeded-time,omitempty"` + ScaleDownUnreadyTime *string `json:"scale-down-unready-time,omitempty"` + ScaleDownUtilizationThreshold *string `json:"scale-down-utilization-threshold,omitempty"` + ScanInterval *string `json:"scan-interval,omitempty"` + SkipNodesWithLocalStorage *string `json:"skip-nodes-with-local-storage,omitempty"` + SkipNodesWithSystemPods *string `json:"skip-nodes-with-system-pods,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofile.go new file mode 100644 index 000000000000..63a2cc3ae029 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofile.go @@ -0,0 +1,12 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfile struct { + AzureKeyVaultKms *AzureKeyVaultKms `json:"azureKeyVaultKms,omitempty"` + Defender *ManagedClusterSecurityProfileDefender `json:"defender,omitempty"` + ImageCleaner *ManagedClusterSecurityProfileImageCleaner `json:"imageCleaner,omitempty"` + NodeRestriction *ManagedClusterSecurityProfileNodeRestriction `json:"nodeRestriction,omitempty"` + WorkloadIdentity *ManagedClusterSecurityProfileWorkloadIdentity `json:"workloadIdentity,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofiledefender.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofiledefender.go new file mode 100644 index 000000000000..c63debde097a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofiledefender.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfileDefender struct { + LogAnalyticsWorkspaceResourceId *string `json:"logAnalyticsWorkspaceResourceId,omitempty"` + SecurityMonitoring *ManagedClusterSecurityProfileDefenderSecurityMonitoring `json:"securityMonitoring,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofiledefendersecuritymonitoring.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofiledefendersecuritymonitoring.go new file mode 100644 index 000000000000..de84c795fecc --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofiledefendersecuritymonitoring.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfileDefenderSecurityMonitoring struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofileimagecleaner.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofileimagecleaner.go new file mode 100644 index 000000000000..eb3e2ea70073 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofileimagecleaner.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfileImageCleaner struct { + Enabled *bool `json:"enabled,omitempty"` + IntervalHours *int64 `json:"intervalHours,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofilenoderestriction.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofilenoderestriction.go new file mode 100644 index 000000000000..6730a946379f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofilenoderestriction.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfileNodeRestriction struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofileworkloadidentity.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofileworkloadidentity.go new file mode 100644 index 000000000000..84f0312767bb --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersecurityprofileworkloadidentity.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfileWorkloadIdentity struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterserviceprincipalprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterserviceprincipalprofile.go new file mode 100644 index 000000000000..2a03beecdcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterserviceprincipalprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterServicePrincipalProfile struct { + ClientId string `json:"clientId"` + Secret *string `json:"secret,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersku.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersku.go new file mode 100644 index 000000000000..a4dcf9c50d3d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclustersku.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSKU struct { + Name *ManagedClusterSKUName `json:"name,omitempty"` + Tier *ManagedClusterSKUTier `json:"tier,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofile.go new file mode 100644 index 000000000000..11073ae103dc --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfile struct { + BlobCSIDriver *ManagedClusterStorageProfileBlobCSIDriver `json:"blobCSIDriver,omitempty"` + DiskCSIDriver *ManagedClusterStorageProfileDiskCSIDriver `json:"diskCSIDriver,omitempty"` + FileCSIDriver *ManagedClusterStorageProfileFileCSIDriver `json:"fileCSIDriver,omitempty"` + SnapshotController *ManagedClusterStorageProfileSnapshotController `json:"snapshotController,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofileblobcsidriver.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofileblobcsidriver.go new file mode 100644 index 000000000000..dc0e951be6b5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofileblobcsidriver.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfileBlobCSIDriver struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofilediskcsidriver.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofilediskcsidriver.go new file mode 100644 index 000000000000..5a12e13453f0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofilediskcsidriver.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfileDiskCSIDriver struct { + Enabled *bool `json:"enabled,omitempty"` + Version *string `json:"version,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofilefilecsidriver.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofilefilecsidriver.go new file mode 100644 index 000000000000..986ece9d9332 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofilefilecsidriver.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfileFileCSIDriver struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofilesnapshotcontroller.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofilesnapshotcontroller.go new file mode 100644 index 000000000000..d46ee10b6835 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterstorageprofilesnapshotcontroller.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfileSnapshotController struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterupgradeprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterupgradeprofile.go new file mode 100644 index 000000000000..86274cdb14a4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterupgradeprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterUpgradeProfile struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ManagedClusterUpgradeProfileProperties `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterupgradeprofileproperties.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterupgradeprofileproperties.go new file mode 100644 index 000000000000..4db4b1267606 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterupgradeprofileproperties.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterUpgradeProfileProperties struct { + AgentPoolProfiles []ManagedClusterPoolUpgradeProfile `json:"agentPoolProfiles"` + ControlPlaneProfile ManagedClusterPoolUpgradeProfile `json:"controlPlaneProfile"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterwindowsprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterwindowsprofile.go new file mode 100644 index 000000000000..8e3cffdace86 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterwindowsprofile.go @@ -0,0 +1,12 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterWindowsProfile struct { + AdminPassword *string `json:"adminPassword,omitempty"` + AdminUsername string `json:"adminUsername"` + EnableCSIProxy *bool `json:"enableCSIProxy,omitempty"` + GmsaProfile *WindowsGmsaProfile `json:"gmsaProfile,omitempty"` + LicenseType *LicenseType `json:"licenseType,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterworkloadautoscalerprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterworkloadautoscalerprofile.go new file mode 100644 index 000000000000..891a44a0084d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterworkloadautoscalerprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterWorkloadAutoScalerProfile struct { + Keda *ManagedClusterWorkloadAutoScalerProfileKeda `json:"keda,omitempty"` + VerticalPodAutoscaler *ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterworkloadautoscalerprofilekeda.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterworkloadautoscalerprofilekeda.go new file mode 100644 index 000000000000..7115a06bfb3e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterworkloadautoscalerprofilekeda.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterWorkloadAutoScalerProfileKeda struct { + Enabled bool `json:"enabled"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterworkloadautoscalerprofileverticalpodautoscaler.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterworkloadautoscalerprofileverticalpodautoscaler.go new file mode 100644 index 000000000000..de1c2badc46e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_managedclusterworkloadautoscalerprofileverticalpodautoscaler.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler struct { + ControlledValues ControlledValues `json:"controlledValues"` + Enabled bool `json:"enabled"` + UpdateMode UpdateMode `json:"updateMode"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_osoptionprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_osoptionprofile.go new file mode 100644 index 000000000000..27b0bbc0c20c --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_osoptionprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OSOptionProfile struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties OSOptionPropertyList `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_osoptionproperty.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_osoptionproperty.go new file mode 100644 index 000000000000..4c8a976ae2d2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_osoptionproperty.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OSOptionProperty struct { + EnableFipsImage bool `json:"enable-fips-image"` + OsType string `json:"os-type"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_osoptionpropertylist.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_osoptionpropertylist.go new file mode 100644 index 000000000000..5c126250b2cd --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_osoptionpropertylist.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OSOptionPropertyList struct { + OsOptionPropertyList []OSOptionProperty `json:"osOptionPropertyList"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_outboundenvironmentendpoint.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_outboundenvironmentendpoint.go new file mode 100644 index 000000000000..a4f0d234bc80 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_outboundenvironmentendpoint.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OutboundEnvironmentEndpoint struct { + Category *string `json:"category,omitempty"` + Endpoints *[]EndpointDependency `json:"endpoints,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_powerstate.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_powerstate.go new file mode 100644 index 000000000000..d5abeb15b66a --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_powerstate.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PowerState struct { + Code *Code `json:"code,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_privatelinkresource.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_privatelinkresource.go new file mode 100644 index 000000000000..14d827574be5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_privatelinkresource.go @@ -0,0 +1,13 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateLinkResource struct { + GroupId *string `json:"groupId,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + PrivateLinkServiceID *string `json:"privateLinkServiceID,omitempty"` + RequiredMembers *[]string `json:"requiredMembers,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_resourcereference.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_resourcereference.go new file mode 100644 index 000000000000..dbb05bb4711d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_resourcereference.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceReference struct { + Id *string `json:"id,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_runcommandrequest.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_runcommandrequest.go new file mode 100644 index 000000000000..caa9ae3d0cdc --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_runcommandrequest.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RunCommandRequest struct { + ClusterToken *string `json:"clusterToken,omitempty"` + Command string `json:"command"` + Context *string `json:"context,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_runcommandresult.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_runcommandresult.go new file mode 100644 index 000000000000..59cfc374de3d --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_runcommandresult.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RunCommandResult struct { + Id *string `json:"id,omitempty"` + Properties *CommandResultProperties `json:"properties,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_sysctlconfig.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_sysctlconfig.go new file mode 100644 index 000000000000..b354a63d4540 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_sysctlconfig.go @@ -0,0 +1,35 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SysctlConfig struct { + FsAioMaxNr *int64 `json:"fsAioMaxNr,omitempty"` + FsFileMax *int64 `json:"fsFileMax,omitempty"` + FsInotifyMaxUserWatches *int64 `json:"fsInotifyMaxUserWatches,omitempty"` + FsNrOpen *int64 `json:"fsNrOpen,omitempty"` + KernelThreadsMax *int64 `json:"kernelThreadsMax,omitempty"` + NetCoreNetdevMaxBacklog *int64 `json:"netCoreNetdevMaxBacklog,omitempty"` + NetCoreOptmemMax *int64 `json:"netCoreOptmemMax,omitempty"` + NetCoreRmemDefault *int64 `json:"netCoreRmemDefault,omitempty"` + NetCoreRmemMax *int64 `json:"netCoreRmemMax,omitempty"` + NetCoreSomaxconn *int64 `json:"netCoreSomaxconn,omitempty"` + NetCoreWmemDefault *int64 `json:"netCoreWmemDefault,omitempty"` + NetCoreWmemMax *int64 `json:"netCoreWmemMax,omitempty"` + NetIPv4IPLocalPortRange *string `json:"netIpv4IpLocalPortRange,omitempty"` + NetIPv4NeighDefaultGcThresh1 *int64 `json:"netIpv4NeighDefaultGcThresh1,omitempty"` + NetIPv4NeighDefaultGcThresh2 *int64 `json:"netIpv4NeighDefaultGcThresh2,omitempty"` + NetIPv4NeighDefaultGcThresh3 *int64 `json:"netIpv4NeighDefaultGcThresh3,omitempty"` + NetIPv4TcpFinTimeout *int64 `json:"netIpv4TcpFinTimeout,omitempty"` + NetIPv4TcpKeepaliveProbes *int64 `json:"netIpv4TcpKeepaliveProbes,omitempty"` + NetIPv4TcpKeepaliveTime *int64 `json:"netIpv4TcpKeepaliveTime,omitempty"` + NetIPv4TcpMaxSynBacklog *int64 `json:"netIpv4TcpMaxSynBacklog,omitempty"` + NetIPv4TcpMaxTwBuckets *int64 `json:"netIpv4TcpMaxTwBuckets,omitempty"` + NetIPv4TcpTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty"` + NetIPv4TcpkeepaliveIntvl *int64 `json:"netIpv4TcpkeepaliveIntvl,omitempty"` + NetNetfilterNfConntrackBuckets *int64 `json:"netNetfilterNfConntrackBuckets,omitempty"` + NetNetfilterNfConntrackMax *int64 `json:"netNetfilterNfConntrackMax,omitempty"` + VmMaxMapCount *int64 `json:"vmMaxMapCount,omitempty"` + VmSwappiness *int64 `json:"vmSwappiness,omitempty"` + VmVfsCachePressure *int64 `json:"vmVfsCachePressure,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_tagsobject.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_tagsobject.go new file mode 100644 index 000000000000..69f1454e3b11 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_tagsobject.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TagsObject struct { + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_userassignedidentity.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_userassignedidentity.go new file mode 100644 index 000000000000..b1eecd7085aa --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_userassignedidentity.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UserAssignedIdentity struct { + ClientId *string `json:"clientId,omitempty"` + ObjectId *string `json:"objectId,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_windowsgmsaprofile.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_windowsgmsaprofile.go new file mode 100644 index 000000000000..742f96764d45 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/model_windowsgmsaprofile.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WindowsGmsaProfile struct { + DnsServer *string `json:"dnsServer,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + RootDomainName *string `json:"rootDomainName,omitempty"` +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/predicates.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/predicates.go new file mode 100644 index 000000000000..7400b76ca311 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/predicates.go @@ -0,0 +1,42 @@ +package managedclusters + +type ManagedClusterOperationPredicate struct { + Id *string + Location *string + Name *string + Type *string +} + +func (p ManagedClusterOperationPredicate) Matches(input ManagedCluster) bool { + + if p.Id != nil && (input.Id == nil && *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil && *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil && *p.Type != *input.Type) { + return false + } + + return true +} + +type OutboundEnvironmentEndpointOperationPredicate struct { + Category *string +} + +func (p OutboundEnvironmentEndpointOperationPredicate) Matches(input OutboundEnvironmentEndpoint) bool { + + if p.Category != nil && (input.Category == nil && *p.Category != *input.Category) { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/version.go b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/version.go new file mode 100644 index 000000000000..572d7cec2aa8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters/version.go @@ -0,0 +1,12 @@ +package managedclusters + +import "fmt" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2022-08-02-preview" + +func userAgent() string { + return fmt.Sprintf("hashicorp/go-azure-sdk/managedclusters/%s", defaultApiVersion) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6aa327e3849d..35901b1850b9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -45,7 +45,6 @@ github.com/Azure/azure-sdk-for-go/services/preview/automation/mgmt/2020-01-13-pr github.com/Azure/azure-sdk-for-go/services/preview/blueprint/mgmt/2018-11-01-preview/blueprint github.com/Azure/azure-sdk-for-go/services/preview/botservice/mgmt/2021-05-01-preview/botservice github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/mgmt/2021-08-01-preview/containerregistry -github.com/Azure/azure-sdk-for-go/services/preview/containerservice/mgmt/2022-03-02-preview/containerservice github.com/Azure/azure-sdk-for-go/services/preview/customproviders/mgmt/2018-09-01-preview/customproviders github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2021-07-01-preview/insights github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2021-09-01-preview/insights @@ -213,6 +212,9 @@ github.com/hashicorp/go-azure-sdk/resource-manager/compute/2022-03-02/disks github.com/hashicorp/go-azure-sdk/resource-manager/confidentialledger/2022-05-13/confidentialledger github.com/hashicorp/go-azure-sdk/resource-manager/consumption/2019-10-01/budgets github.com/hashicorp/go-azure-sdk/resource-manager/containerinstance/2021-10-01/containerinstance +github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/agentpools +github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/maintenanceconfigurations +github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2022-08-02-preview/managedclusters github.com/hashicorp/go-azure-sdk/resource-manager/cosmosdb/2022-05-15/cosmosdb github.com/hashicorp/go-azure-sdk/resource-manager/cosmosdb/2022-05-15/managedcassandras github.com/hashicorp/go-azure-sdk/resource-manager/cosmosdb/2022-05-15/sqldedicatedgateway