From 6d66962c9c86742204805f270e8b6e9e2ff992d1 Mon Sep 17 00:00:00 2001 From: toniiiik <32296236+toniiiik@users.noreply.github.com> Date: Tue, 16 Jan 2024 11:07:21 +0100 Subject: [PATCH] ADD support for PodIdentityProvider config in azure pipeline trigger (#4867) Co-authored-by: Zbynek Roubalik --- CHANGELOG.md | 1 + pkg/scalers/azure_pipelines_scaler.go | 158 ++++++--- pkg/scalers/azure_pipelines_scaler_test.go | 16 +- .../azure_pipelines_aad_wi_test.go | 317 ++++++++++++++++++ 4 files changed, 448 insertions(+), 44 deletions(-) create mode 100644 tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 74b1916bcbb..adbdf421037 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ Here is an overview of all new **experimental** features: - **General**: Request all ScaledObject/ScaledJob triggers in parallel ([#5276](https://github.com/kedacore/keda/issues/5276)) - **General**: Support TriggerAuthentication properties from ConfigMap ([#4830](https://github.com/kedacore/keda/issues/4830)) - **General**: Use client-side round-robin load balancing for grpc calls ([#5224](https://github.com/kedacore/keda/issues/5224)) +- **Azure Pipelines Scaler**: Add support for workload identity authentication ([#5013](https://github.com/kedacore/keda/issues/5013)) - **GCP pubsub scaler**: Support distribution-valued metrics and metrics from topics ([#5070](https://github.com/kedacore/keda/issues/5070)) - **GCP stackdriver scaler**: Support valueIfNull parameter ([#5345](https://github.com/kedacore/keda/pull/5345)) - **Hashicorp Vault**: Add support to get secret that needs write operation (e.g. pki) ([#5067](https://github.com/kedacore/keda/issues/5067)) diff --git a/pkg/scalers/azure_pipelines_scaler.go b/pkg/scalers/azure_pipelines_scaler.go index 7bcdead70b9..e3b61095fe7 100644 --- a/pkg/scalers/azure_pipelines_scaler.go +++ b/pkg/scalers/azure_pipelines_scaler.go @@ -11,10 +11,15 @@ import ( "strings" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/scalers/azure" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -27,6 +32,10 @@ type JobRequests struct { Value []JobRequest `json:"value"` } +const ( + devopsResource = "499b84ac-1321-427f-aa17-267ca6975798/.default" +) + type JobRequest struct { RequestID int `json:"requestId"` QueueTime time.Time `json:"queueTime"` @@ -119,16 +128,17 @@ type azurePipelinesPoolIDResponse struct { } type azurePipelinesScaler struct { - metricType v2.MetricTargetType - metadata *azurePipelinesMetadata - httpClient *http.Client - logger logr.Logger + metricType v2.MetricTargetType + metadata *azurePipelinesMetadata + httpClient *http.Client + podIdentity kedav1alpha1.AuthPodIdentity + logger logr.Logger } type azurePipelinesMetadata struct { organizationURL string organizationName string - personalAccessToken string + authContext authContext parent string demands string poolID int @@ -139,36 +149,68 @@ type azurePipelinesMetadata struct { requireAllDemands bool } +type authContext struct { + cred *azidentity.ChainedTokenCredential + pat string + token *azcore.AccessToken +} + // NewAzurePipelinesScaler creates a new AzurePipelinesScaler func NewAzurePipelinesScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) { httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false) + logger := InitializeLogger(config, "azure_pipelines_scaler") metricType, err := GetMetricTargetType(config) if err != nil { return nil, fmt.Errorf("error getting scaler metric type: %w", err) } - meta, err := parseAzurePipelinesMetadata(ctx, config, httpClient) + meta, podIdentity, err := parseAzurePipelinesMetadata(ctx, logger, config, httpClient) if err != nil { return nil, fmt.Errorf("error parsing azure Pipelines metadata: %w", err) } return &azurePipelinesScaler{ - metricType: metricType, - metadata: meta, - httpClient: httpClient, - logger: InitializeLogger(config, "azure_pipelines_scaler"), + metricType: metricType, + metadata: meta, + httpClient: httpClient, + podIdentity: podIdentity, + logger: logger, }, nil } -func parseAzurePipelinesMetadata(ctx context.Context, config *ScalerConfig, httpClient *http.Client) (*azurePipelinesMetadata, error) { +func getAuthMethod(logger logr.Logger, config *ScalerConfig) (string, *azidentity.ChainedTokenCredential, kedav1alpha1.AuthPodIdentity, error) { + pat := "" + if val, ok := config.AuthParams["personalAccessToken"]; ok && val != "" { + // Found the personalAccessToken in a parameter from TriggerAuthentication + pat = config.AuthParams["personalAccessToken"] + } else if val, ok := config.TriggerMetadata["personalAccessTokenFromEnv"]; ok && val != "" { + pat = config.ResolvedEnv[config.TriggerMetadata["personalAccessTokenFromEnv"]] + } else { + switch config.PodIdentity.Provider { + case "", kedav1alpha1.PodIdentityProviderNone: + return "", nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no personalAccessToken given or PodIdentity provider configured") + case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload: + cred, err := azure.NewChainedCredential(logger, config.PodIdentity.GetIdentityID(), config.PodIdentity.Provider) + if err != nil { + return "", nil, kedav1alpha1.AuthPodIdentity{}, err + } + return "", cred, kedav1alpha1.AuthPodIdentity{Provider: config.PodIdentity.Provider}, nil + default: + return "", nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("pod identity %s not supported for azure pipelines", config.PodIdentity.Provider) + } + } + return pat, nil, kedav1alpha1.AuthPodIdentity{}, nil +} + +func parseAzurePipelinesMetadata(ctx context.Context, logger logr.Logger, config *ScalerConfig, httpClient *http.Client) (*azurePipelinesMetadata, kedav1alpha1.AuthPodIdentity, error) { meta := azurePipelinesMetadata{} meta.targetPipelinesQueueLength = defaultTargetPipelinesQueueLength if val, ok := config.TriggerMetadata["targetPipelinesQueueLength"]; ok { queueLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - return nil, fmt.Errorf("error parsing azure pipelines metadata targetPipelinesQueueLength: %w", err) + return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("error parsing azure pipelines metadata targetPipelinesQueueLength: %w", err) } meta.targetPipelinesQueueLength = queueLength @@ -178,7 +220,7 @@ func parseAzurePipelinesMetadata(ctx context.Context, config *ScalerConfig, http if val, ok := config.TriggerMetadata["activationTargetPipelinesQueueLength"]; ok { activationQueueLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - return nil, fmt.Errorf("error parsing azure pipelines metadata activationTargetPipelinesQueueLength: %w", err) + return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("error parsing azure pipelines metadata activationTargetPipelinesQueueLength: %w", err) } meta.activationTargetPipelinesQueueLength = activationQueueLength @@ -190,22 +232,24 @@ func parseAzurePipelinesMetadata(ctx context.Context, config *ScalerConfig, http } else if val, ok := config.TriggerMetadata["organizationURLFromEnv"]; ok && val != "" { meta.organizationURL = config.ResolvedEnv[val] } else { - return nil, fmt.Errorf("no organizationURL given") + return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no organizationURL given") } if val := meta.organizationURL[strings.LastIndex(meta.organizationURL, "/")+1:]; val != "" { meta.organizationName = meta.organizationURL[strings.LastIndex(meta.organizationURL, "/")+1:] } else { - return nil, fmt.Errorf("failed to extract organization name from organizationURL") + return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("failed to extract organization name from organizationURL") } - if val, ok := config.AuthParams["personalAccessToken"]; ok && val != "" { - // Found the personalAccessToken in a parameter from TriggerAuthentication - meta.personalAccessToken = config.AuthParams["personalAccessToken"] - } else if val, ok := config.TriggerMetadata["personalAccessTokenFromEnv"]; ok && val != "" { - meta.personalAccessToken = config.ResolvedEnv[config.TriggerMetadata["personalAccessTokenFromEnv"]] - } else { - return nil, fmt.Errorf("no personalAccessToken given") + pat, cred, podIdentity, err := getAuthMethod(logger, config) + if err != nil { + return nil, kedav1alpha1.AuthPodIdentity{}, err + } + // Trim any trailing new lines from the Azure Pipelines PAT + meta.authContext = authContext{ + pat: strings.TrimSuffix(pat, "\n"), + cred: cred, + token: nil, } if val, ok := config.TriggerMetadata["parent"]; ok && val != "" { @@ -224,7 +268,7 @@ func parseAzurePipelinesMetadata(ctx context.Context, config *ScalerConfig, http if val, ok := config.TriggerMetadata["jobsToFetch"]; ok && val != "" { jobsToFetch, err := strconv.ParseInt(val, 10, 64) if err != nil { - return nil, fmt.Errorf("error parsing jobsToFetch: %w", err) + return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("error parsing jobsToFetch: %w", err) } meta.jobsToFetch = jobsToFetch } @@ -233,41 +277,40 @@ func parseAzurePipelinesMetadata(ctx context.Context, config *ScalerConfig, http if val, ok := config.TriggerMetadata["requireAllDemands"]; ok && val != "" { requireAllDemands, err := strconv.ParseBool(val) if err != nil { - return nil, err + return nil, kedav1alpha1.AuthPodIdentity{}, err } meta.requireAllDemands = requireAllDemands } if val, ok := config.TriggerMetadata["poolName"]; ok && val != "" { var err error - poolID, err := getPoolIDFromName(ctx, val, &meta, httpClient) + poolID, err := getPoolIDFromName(ctx, logger, val, &meta, podIdentity, httpClient) if err != nil { - return nil, err + return nil, kedav1alpha1.AuthPodIdentity{}, err } meta.poolID = poolID } else { if val, ok := config.TriggerMetadata["poolID"]; ok && val != "" { var err error - poolID, err := validatePoolID(ctx, val, &meta, httpClient) + poolID, err := validatePoolID(ctx, logger, val, &meta, podIdentity, httpClient) if err != nil { - return nil, err + return nil, kedav1alpha1.AuthPodIdentity{}, err } meta.poolID = poolID } else { - return nil, fmt.Errorf("no poolName or poolID given") + return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no poolName or poolID given") } } - // Trim any trailing new lines from the Azure Pipelines PAT - meta.personalAccessToken = strings.TrimSuffix(meta.personalAccessToken, "\n") meta.triggerIndex = config.TriggerIndex - return &meta, nil + return &meta, podIdentity, nil } -func getPoolIDFromName(ctx context.Context, poolName string, metadata *azurePipelinesMetadata, httpClient *http.Client) (int, error) { +func getPoolIDFromName(ctx context.Context, logger logr.Logger, poolName string, metadata *azurePipelinesMetadata, podIdentity kedav1alpha1.AuthPodIdentity, httpClient *http.Client) (int, error) { urlString := fmt.Sprintf("%s/_apis/distributedtask/pools?poolName=%s", metadata.organizationURL, url.QueryEscape(poolName)) - body, err := getAzurePipelineRequest(ctx, urlString, metadata, httpClient) + body, err := getAzurePipelineRequest(ctx, logger, urlString, metadata, podIdentity, httpClient) + if err != nil { return -1, err } @@ -290,9 +333,10 @@ func getPoolIDFromName(ctx context.Context, poolName string, metadata *azurePipe return result.Value[0].ID, nil } -func validatePoolID(ctx context.Context, poolID string, metadata *azurePipelinesMetadata, httpClient *http.Client) (int, error) { +func validatePoolID(ctx context.Context, logger logr.Logger, poolID string, metadata *azurePipelinesMetadata, podIdentity kedav1alpha1.AuthPodIdentity, httpClient *http.Client) (int, error) { urlString := fmt.Sprintf("%s/_apis/distributedtask/pools?poolID=%s", metadata.organizationURL, poolID) - body, err := getAzurePipelineRequest(ctx, urlString, metadata, httpClient) + body, err := getAzurePipelineRequest(ctx, logger, urlString, metadata, podIdentity, httpClient) + if err != nil { return -1, fmt.Errorf("agent pool with id `%s` not found: %w", poolID, err) } @@ -306,13 +350,49 @@ func validatePoolID(ctx context.Context, poolID string, metadata *azurePipelines return result.ID, nil } -func getAzurePipelineRequest(ctx context.Context, urlString string, metadata *azurePipelinesMetadata, httpClient *http.Client) ([]byte, error) { +func getToken(ctx context.Context, metadata *azurePipelinesMetadata, scope string) (string, error) { + if metadata.authContext.token != nil { + //if token expires after more then minute from now let's reuse + if metadata.authContext.token.ExpiresOn.After(time.Now().Add(time.Second * 60)) { + return metadata.authContext.token.Token, nil + } + } + token, err := metadata.authContext.cred.GetToken(ctx, policy.TokenRequestOptions{ + Scopes: []string{ + scope, + }, + }) + + if err != nil { + return "", err + } + + metadata.authContext.token = &token + + return metadata.authContext.token.Token, nil +} + +func getAzurePipelineRequest(ctx context.Context, logger logr.Logger, urlString string, metadata *azurePipelinesMetadata, podIdentity kedav1alpha1.AuthPodIdentity, httpClient *http.Client) ([]byte, error) { req, err := http.NewRequestWithContext(ctx, "GET", urlString, nil) if err != nil { return []byte{}, err } - req.SetBasicAuth("", metadata.personalAccessToken) + switch podIdentity.Provider { + case "", kedav1alpha1.PodIdentityProviderNone: + //PAT + logger.V(1).Info("making request to ADO REST API using PAT") + req.SetBasicAuth("", metadata.authContext.pat) + case kedav1alpha1.PodIdentityProviderAzureWorkload: + //ADO Resource token + logger.V(1).Info("making request to ADO REST API using managed identity") + aadToken, err := getToken(ctx, metadata, devopsResource) + if err != nil { + return []byte{}, fmt.Errorf("cannot create workload identity credentials: %w", err) + } + logger.V(1).Info("token acquired setting auth header as 'bearer XXXXXX'") + req.Header.Set("Authorization", "Bearer "+aadToken) + } r, err := httpClient.Do(req) if err != nil { @@ -340,7 +420,7 @@ func (s *azurePipelinesScaler) GetAzurePipelinesQueueLength(ctx context.Context) } else { urlString = fmt.Sprintf("%s/_apis/distributedtask/pools/%d/jobrequests?$top=%d", s.metadata.organizationURL, s.metadata.poolID, s.metadata.jobsToFetch) } - body, err := getAzurePipelineRequest(ctx, urlString, s.metadata, s.httpClient) + body, err := getAzurePipelineRequest(ctx, s.logger, urlString, s.metadata, s.podIdentity, s.httpClient) if err != nil { return -1, err } diff --git a/pkg/scalers/azure_pipelines_scaler_test.go b/pkg/scalers/azure_pipelines_scaler_test.go index f333e2a4afb..4e6f2436bc7 100644 --- a/pkg/scalers/azure_pipelines_scaler_test.go +++ b/pkg/scalers/azure_pipelines_scaler_test.go @@ -6,6 +6,8 @@ import ( "net/http/httptest" "strings" "testing" + + "github.com/go-logr/logr" ) const loadCount = 1000 // the size of the pretend pool completed of job requests @@ -68,7 +70,9 @@ func TestParseAzurePipelinesMetadata(t *testing.T) { testData.authParams["organizationURL"] = apiStub.URL } - _, err := parseAzurePipelinesMetadata(context.TODO(), &ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testData.resolvedEnv, AuthParams: testData.authParams}, http.DefaultClient) + logger := logr.Discard() + + _, _, err := parseAzurePipelinesMetadata(context.TODO(), logger, &ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testData.resolvedEnv, AuthParams: testData.authParams}, http.DefaultClient) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -121,8 +125,8 @@ func TestValidateAzurePipelinesPool(t *testing.T) { "organizationURL": apiStub.URL, "personalAccessToken": "PAT", } - - _, err := parseAzurePipelinesMetadata(context.TODO(), &ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: nil, AuthParams: authParams}, http.DefaultClient) + logger := logr.Discard() + _, _, err := parseAzurePipelinesMetadata(context.TODO(), logger, &ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: nil, AuthParams: authParams}, http.DefaultClient) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -160,7 +164,9 @@ func TestAzurePipelinesGetMetricSpecForScaling(t *testing.T) { "targetPipelinesQueueLength": "1", } - meta, err := parseAzurePipelinesMetadata(context.TODO(), &ScalerConfig{TriggerMetadata: metadata, ResolvedEnv: nil, AuthParams: authParams, TriggerIndex: testData.triggerIndex}, http.DefaultClient) + logger := logr.Discard() + + meta, _, err := parseAzurePipelinesMetadata(context.TODO(), logger, &ScalerConfig{TriggerMetadata: metadata, ResolvedEnv: nil, AuthParams: authParams, TriggerIndex: testData.triggerIndex}, http.DefaultClient) if err != nil { t.Fatal("Could not parse metadata:", err) } @@ -183,7 +189,7 @@ func getMatchedAgentMetaData(url string) *azurePipelinesMetadata { meta.organizationName = "testOrg" meta.organizationURL = url meta.parent = "dotnet60-keda-template" - meta.personalAccessToken = "testPAT" + meta.authContext.pat = "testPAT" meta.poolID = 1 meta.targetPipelinesQueueLength = 1 diff --git a/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go b/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go new file mode 100644 index 00000000000..f1db3b68871 --- /dev/null +++ b/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go @@ -0,0 +1,317 @@ +//go:build e2e +// +build e2e + +package azure_pipelines_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "strconv" + "testing" + "time" + + "github.com/joho/godotenv" + "github.com/microsoft/azure-devops-go-api/azuredevops" + "github.com/microsoft/azure-devops-go-api/azuredevops/build" + "github.com/microsoft/azure-devops-go-api/azuredevops/taskagent" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "azure-pipelines-aad-wi-test" +) + +var ( + organizationURL = os.Getenv("AZURE_DEVOPS_ORGANIZATION_URL") + personalAccessToken = os.Getenv("AZURE_DEVOPS_PAT") + project = os.Getenv("AZURE_DEVOPS_PROJECT") + buildID = os.Getenv("AZURE_DEVOPS_AAD_WI_BUILD_DEFINITION_ID") + poolName = os.Getenv("AZURE_DEVOPS_AAD_WI_POOL_NAME") + poolID = "0" + triggerAuthName = fmt.Sprintf("%s-ta", testName) + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + minReplicaCount = 0 + maxReplicaCount = 1 +) + +type templateData struct { + TestNamespace string + SecretName string + DeploymentName string + ScaledObjectName string + MinReplicaCount string + MaxReplicaCount string + Pat string + URL string + PoolName string + PoolID string + TriggerAuthName string +} + +const ( + secretTemplate = ` +apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + personalAccessToken: {{.Pat}} +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: azdevops-agent +spec: + replicas: 1 + selector: + matchLabels: + app: azdevops-agent + template: + metadata: + labels: + app: azdevops-agent + spec: + terminationGracePeriodSeconds: 90 + containers: + - name: azdevops-agent + lifecycle: + preStop: + exec: + command: ["/bin/sleep","60"] + image: ghcr.io/kedacore/tests-azure-pipelines-agent:b3a02cc + env: + - name: AZP_URL + value: {{.URL}} + - name: AZP_TOKEN + valueFrom: + secretKeyRef: + name: {{.SecretName}} + key: personalAccessToken + - name: AZP_POOL + value: {{.PoolName}} +` + + poolIdscaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 15 + cooldownPeriod: 5 + triggers: + - type: azure-pipelines + metadata: + organizationURLFromEnv: "AZP_URL" + activationTargetPipelinesQueueLength: "1" + poolID: "{{.PoolID}}" + authenticationRef: + name: {{.TriggerAuthName}} +` + poolNamescaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 15 + cooldownPeriod: 5 + triggers: + - type: azure-pipelines + metadata: + organizationURLFromEnv: "AZP_URL" + activationTargetPipelinesQueueLength: "1" + poolName: "{{.PoolName}}" + authenticationRef: + name: {{.TriggerAuthName}} +` + poolTriggerAuthRef = ` +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: azure-workload +` +) + +func TestScaler(t *testing.T) { + // setup + t.Log("--- setting up ---") + require.NotEmpty(t, organizationURL, "AZURE_DEVOPS_ORGANIZATION_URL env variable is required for azure pipelines test") + require.NotEmpty(t, personalAccessToken, "AZURE_DEVOPS_PAT env variable is required for azure pipelines test") + require.NotEmpty(t, project, "AZURE_DEVOPS_PROJECT env variable is required for azure pipelines test") + require.NotEmpty(t, buildID, "AZURE_DEVOPS_AAD_WI_BUILD_DEFINITION_ID env variable is required for azure pipelines test") + require.NotEmpty(t, poolName, "AZURE_DEVOPS_AAD_WI_POOL_NAME env variable is required for azure pipelines test") + connection := azuredevops.NewPatConnection(organizationURL, personalAccessToken) + clearAllBuilds(t, connection) + // Get pool ID + poolID = fmt.Sprintf("%d", getAzDoPoolID(t, connection)) + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + WaitForPodCountInNamespace(t, kc, testNamespace, minReplicaCount, 60, 2) + + // test scaling poolId + testActivation(t, kc, connection) + testScaleOut(t, kc, connection) + testScaleIn(t, kc) + + // test scaling PoolName + KubectlApplyWithTemplate(t, data, "poolNamescaledObjectTemplate", poolNamescaledObjectTemplate) + testActivation(t, kc, connection) + testScaleOut(t, kc, connection) + testScaleIn(t, kc) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) +} + +func getAzDoPoolID(t *testing.T, connection *azuredevops.Connection) int { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + taskClient, err := taskagent.NewClient(ctx, connection) + if err != nil { + t.Errorf("unable to create task agent client") + } + args := taskagent.GetAgentPoolsArgs{ + PoolName: &poolName, + } + pools, err := taskClient.GetAgentPools(ctx, args) + if err != nil { + t.Errorf("unable to get the pools") + } + return *(*pools)[0].Id +} + +func queueBuild(t *testing.T, connection *azuredevops.Connection) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + buildClient, err := build.NewClient(ctx, connection) + if err != nil { + t.Errorf("unable to create build client") + } + id, err := strconv.Atoi(buildID) + if err != nil { + t.Errorf("unable to parse buildID") + } + args := build.QueueBuildArgs{ + Project: &project, + Build: &build.Build{ + Definition: &build.DefinitionReference{ + Id: &id, + }, + }, + } + _, err = buildClient.QueueBuild(ctx, args) + if err != nil { + t.Errorf("unable to get the pools") + } +} + +func clearAllBuilds(t *testing.T, connection *azuredevops.Connection) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + buildClient, err := build.NewClient(ctx, connection) + if err != nil { + t.Errorf("unable to create build client") + } + var top = 20 + args := build.GetBuildsArgs{ + Project: &project, + StatusFilter: &build.BuildStatusValues.All, + QueryOrder: &build.BuildQueryOrderValues.QueueTimeDescending, + Top: &top, + } + azBuilds, err := buildClient.GetBuilds(ctx, args) + if err != nil { + t.Errorf("unable to get builds") + } + for _, azBuild := range azBuilds.Value { + azBuild.Status = &build.BuildStatusValues.Cancelling + args := build.UpdateBuildArgs{ + Build: &azBuild, + Project: &project, + BuildId: azBuild.Id, + } + _, err = buildClient.UpdateBuild(ctx, args) + if err != nil { + t.Errorf("unable to cancel build") + } + } +} + +func getTemplateData() (templateData, []Template) { + base64Pat := base64.StdEncoding.EncodeToString([]byte(personalAccessToken)) + + return templateData{ + TestNamespace: testNamespace, + SecretName: secretName, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: fmt.Sprintf("%v", minReplicaCount), + MaxReplicaCount: fmt.Sprintf("%v", maxReplicaCount), + Pat: base64Pat, + URL: organizationURL, + PoolName: poolName, + PoolID: poolID, + TriggerAuthName: triggerAuthName, + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "poolTriggerAuthRef", Config: poolTriggerAuthRef}, + {Name: "poolIdscaledObjectTemplate", Config: poolIdscaledObjectTemplate}, + } +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, connection *azuredevops.Connection) { + t.Log("--- testing activation ---") + queueBuild(t, connection) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, connection *azuredevops.Connection) { + t.Log("--- testing scale out ---") + queueBuild(t, connection) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 1), + "replica count should be 2 after 1 minute") +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale in ---") + assert.True(t, WaitForPodCountInNamespace(t, kc, testNamespace, minReplicaCount, 60, 5), + "pod count should be 0 after 1 minute") +}