Skip to content

Commit

Permalink
Merge pull request #1293 from CecileRobertMichon/fix-providerid
Browse files Browse the repository at this point in the history
Fix VM provider ID to match node ID
  • Loading branch information
k8s-ci-robot authored Apr 7, 2021
2 parents cfdac96 + 7c4ec06 commit 7b8e792
Show file tree
Hide file tree
Showing 10 changed files with 30 additions and 23 deletions.
6 changes: 4 additions & 2 deletions azure/converters/identity.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ package converters
import (
"strings"

"sigs.k8s.io/cluster-api-provider-azure/azure"

"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
"github.com/pkg/errors"

Expand Down Expand Up @@ -59,7 +61,7 @@ func UserAssignedIdentitiesToVMSSSDK(identities []infrav1.UserAssignedIdentity)
return userIdentitiesMap, nil
}

// sanitized removes "azure:///" prefix from the given id
// sanitized removes "azure://" prefix from the given id
func sanitized(id string) string {
return strings.TrimPrefix(id, "azure:///")
return strings.TrimPrefix(id, azure.ProviderIDPrefix)
}
8 changes: 4 additions & 4 deletions azure/converters/identity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,14 @@ var sampleSubjectFactory = []infrav1.UserAssignedIdentity{
}

var expectedVMSDKObject = map[string]*compute.VirtualMachineIdentityUserAssignedIdentitiesValue{
"foo": {},
"bar": {},
"/foo": {},
"/bar": {},
"/without/prefix": {},
}

var expectedVMSSSDKObject = map[string]*compute.VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue{
"foo": {},
"bar": {},
"/foo": {},
"/bar": {},
"/without/prefix": {},
}

Expand Down
6 changes: 6 additions & 0 deletions azure/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,12 @@ const (
bootstrapSentinelFile = "/run/cluster-api/bootstrap-success.complete"
)

const (
// ProviderIDPrefix will be appended to the beginning of Azure resource IDs to form the Kubernetes Provider ID.
// NOTE: this format matches the 2 slashes format used in cloud-provider and cluster-autoscaler.
ProviderIDPrefix = "azure://"
)

// GenerateBackendAddressPoolName generates a load balancer backend address pool name.
func GenerateBackendAddressPoolName(lbName string) string {
return fmt.Sprintf("%s-%s", lbName, "backendPool")
Expand Down
5 changes: 2 additions & 3 deletions azure/scope/machinepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package scope
import (
"context"
"encoding/base64"
"fmt"
"time"

clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
Expand Down Expand Up @@ -174,7 +173,7 @@ func (m *MachinePoolScope) UpdateInstanceStatuses(ctx context.Context, instances

providerIDs := make([]string, len(instances))
for i, instance := range instances {
providerIDs[i] = fmt.Sprintf("azure://%s", instance.ID)
providerIDs[i] = azure.ProviderIDPrefix + instance.ID
}

nodeStatusByProviderID, err := m.getNodeStatusByProviderID(ctx, providerIDs)
Expand All @@ -186,7 +185,7 @@ func (m *MachinePoolScope) UpdateInstanceStatuses(ctx context.Context, instances
instanceStatuses := make([]*infrav1exp.AzureMachinePoolInstanceStatus, len(instances))
for i, instance := range instances {
instanceStatuses[i] = &infrav1exp.AzureMachinePoolInstanceStatus{
ProviderID: fmt.Sprintf("azure://%s", instance.ID),
ProviderID: azure.ProviderIDPrefix + instance.ID,
InstanceID: instance.InstanceID,
InstanceName: instance.Name,
ProvisioningState: &instance.State,
Expand Down
4 changes: 2 additions & 2 deletions azure/services/scalesets/scalesets.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (s *Service) Reconcile(ctx context.Context) error {
}
default:
// just in case, set the provider ID if the instance exists
s.Scope.SetProviderID(fmt.Sprintf("azure://%s", fetchedVMSS.ID))
s.Scope.SetProviderID(azure.ProviderIDPrefix + fetchedVMSS.ID)
}

// Try to get the VMSS to update status if we have created a long running operation. If the VMSS is still in a long
Expand Down Expand Up @@ -195,7 +195,7 @@ func (s *Service) patchVMSSIfNeeded(ctx context.Context, infraVMSS *infrav1exp.V
ctx, span := tele.Tracer().Start(ctx, "scalesets.Service.patchVMSSIfNeeded")
defer span.End()

s.Scope.SetProviderID(fmt.Sprintf("azure://%s", infraVMSS.ID))
s.Scope.SetProviderID(azure.ProviderIDPrefix + infraVMSS.ID)

spec := s.Scope.ScaleSetSpec()
result, err := s.buildVMSSFromSpec(ctx, spec)
Expand Down
11 changes: 5 additions & 6 deletions azure/services/scalesets/scalesets_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package scalesets

import (
"context"
"fmt"
"net/http"
"testing"

Expand Down Expand Up @@ -265,7 +264,7 @@ func TestReconcileVMSS(t *testing.T) {
createdVMSS := newDefaultVMSS()
instances := newDefaultInstances()
createdVMSS = setupDefaultVMSSInProgressOperationDoneExpectations(g, s, m, createdVMSS, instances)
s.SetProviderID(fmt.Sprintf("azure://%s", *createdVMSS.ID))
s.SetProviderID(azure.ProviderIDPrefix + *createdVMSS.ID)
s.SetLongRunningOperationState(nil)
s.SetProvisioningState(infrav1.Succeeded)
s.NeedsK8sVersionUpdate().Return(false)
Expand All @@ -286,7 +285,7 @@ func TestReconcileVMSS(t *testing.T) {
vmss := newDefaultVMSS()
instances := newDefaultInstances()
vmss = setupDefaultVMSSInProgressOperationDoneExpectations(g, s, m, vmss, instances)
s.SetProviderID(fmt.Sprintf("azure://%s", *vmss.ID))
s.SetProviderID(azure.ProviderIDPrefix + *vmss.ID)
s.SetProvisioningState(infrav1.Updating)

// create a VMSS patch with an updated hash to match the spec
Expand All @@ -313,7 +312,7 @@ func TestReconcileVMSS(t *testing.T) {
createdVMSS := newDefaultWindowsVMSS()
instances := newDefaultInstances()
createdVMSS = setupDefaultVMSSInProgressOperationDoneExpectations(g, s, m, createdVMSS, instances)
s.SetProviderID(fmt.Sprintf("azure://%s", *createdVMSS.ID))
s.SetProviderID(azure.ProviderIDPrefix + *createdVMSS.ID)
s.SetLongRunningOperationState(nil)
s.SetProvisioningState(infrav1.Succeeded)
s.NeedsK8sVersionUpdate().Return(false)
Expand Down Expand Up @@ -411,7 +410,7 @@ func TestReconcileVMSS(t *testing.T) {
spec.Identity = infrav1.VMIdentityUserAssigned
spec.UserAssignedIdentities = []infrav1.UserAssignedIdentity{
{
ProviderID: "azure:////subscriptions/123/resourcegroups/456/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1",
ProviderID: "azure:///subscriptions/123/resourcegroups/456/providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1",
},
}
s.ScaleSetSpec().Return(spec).AnyTimes()
Expand Down Expand Up @@ -1101,7 +1100,7 @@ func setupDefaultVMSSExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorde

func setupDefaultVMSSUpdateExpectations(s *mock_scalesets.MockScaleSetScopeMockRecorder) {
setupDefaultVMSSExpectations(s)
s.SetProviderID("azure://vmss-id")
s.SetProviderID(azure.ProviderIDPrefix + "vmss-id")
s.SetProvisioningState(infrav1.Updating)
s.GetLongRunningOperationState().Return(nil)
}
2 changes: 1 addition & 1 deletion azure/services/virtualmachines/virtualmachines.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func (s *Service) Reconcile(ctx context.Context) error {
return errors.Wrapf(err, "failed to get VM %s", vmSpec.Name)
case err == nil:
// VM already exists, update the spec and skip creation.
s.Scope.SetProviderID(fmt.Sprintf("azure:///%s", existingVM.ID))
s.Scope.SetProviderID(azure.ProviderIDPrefix + existingVM.ID)
s.Scope.SetAnnotation("cluster-api-provider-azure", "true")
s.Scope.SetAddresses(existingVM.Addresses)
s.Scope.SetVMState(existingVM.State)
Expand Down
2 changes: 1 addition & 1 deletion exp/controllers/azuremanagedmachinepool_reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func (s *azureManagedMachinePoolService) Reconcile(ctx context.Context, scope *s

var providerIDs = make([]string, len(instances))
for i := 0; i < len(instances); i++ {
providerIDs[i] = fmt.Sprintf("azure://%s", *instances[i].ID)
providerIDs[i] = azure.ProviderIDPrefix + *instances[i].ID
}

scope.InfraMachinePool.Spec.ProviderIDList = providerIDs
Expand Down
7 changes: 4 additions & 3 deletions test/e2e/azure_logcollector.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,11 @@ import (
"io/ioutil"
"net/http"
"path/filepath"
"sigs.k8s.io/cluster-api-provider-azure/azure"
"strings"

"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-30/compute"
"github.com/Azure/go-autorest/autorest/azure"
autorest "github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/pkg/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
Expand Down Expand Up @@ -111,8 +112,8 @@ func collectLogsFromNode(ctx context.Context, managementClusterClient client.Cli

// collectBootLog collects boot logs of the vm by using azure boot diagnostics
func collectBootLog(ctx context.Context, m *clusterv1.Machine, outputPath string) error {
resourceId := strings.TrimPrefix(*m.Spec.ProviderID, "azure:///")
resource, err := azure.ParseResourceID(resourceId)
resourceId := strings.TrimPrefix(*m.Spec.ProviderID, azure.ProviderIDPrefix)
resource, err := autorest.ParseResourceID(resourceId)
if err != nil {
return errors.Wrap(err, "failed to parse resource id")
}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/azure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ var _ = Describe("Workload cluster creation", func() {
// This is because the entire config map is stored in `last-applied` annotation for tracking.
// The workaround is to use server side apply by passing `--server-side` flag to kubectl apply.
// More on server side apply here: https://kubernetes.io/docs/reference/using-api/server-side-apply/
Args: []string{"--server-side"},
Args: []string{"--server-side"},
}, result)

Context("Running a GPU-based calculation", func() {
Expand Down

0 comments on commit 7b8e792

Please sign in to comment.