diff --git a/api/v1beta1/awscluster_conversion.go b/api/v1beta1/awscluster_conversion.go index 588ffac53d..5d418399b3 100644 --- a/api/v1beta1/awscluster_conversion.go +++ b/api/v1beta1/awscluster_conversion.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + apiconversion "k8s.io/apimachinery/pkg/conversion" infrav2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -73,6 +74,37 @@ func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.NetworkSpec.AdditionalControlPlaneIngressRules = restored.Spec.NetworkSpec.AdditionalControlPlaneIngressRules + if restored.Spec.NetworkSpec.VPC.IPAMPool != nil { + if dst.Spec.NetworkSpec.VPC.IPAMPool == nil { + dst.Spec.NetworkSpec.VPC.IPAMPool = &infrav2.IPAMPool{} + } + + restoreIPAMPool(restored.Spec.NetworkSpec.VPC.IPAMPool, dst.Spec.NetworkSpec.VPC.IPAMPool) + } + + if restored.Spec.NetworkSpec.VPC.IsIPv6Enabled() && restored.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil { + if dst.Spec.NetworkSpec.VPC.IPv6.IPAMPool == nil { + dst.Spec.NetworkSpec.VPC.IPv6.IPAMPool = &infrav2.IPAMPool{} + } + + restoreIPAMPool(restored.Spec.NetworkSpec.VPC.IPv6.IPAMPool, dst.Spec.NetworkSpec.VPC.IPv6.IPAMPool) + } + + dst.Spec.NetworkSpec.AdditionalControlPlaneIngressRules = restored.Spec.NetworkSpec.AdditionalControlPlaneIngressRules + + // Restore SubnetSpec.ResourceID field, if any. + for _, subnet := range restored.Spec.NetworkSpec.Subnets { + if len(subnet.ResourceID) == 0 { + continue + } + for i, dstSubnet := range dst.Spec.NetworkSpec.Subnets { + if dstSubnet.ID == subnet.ID { + dstSubnet.ResourceID = subnet.ResourceID + dstSubnet.DeepCopyInto(&dst.Spec.NetworkSpec.Subnets[i]) + } + } + } + return nil } @@ -133,3 +165,7 @@ func (r *AWSClusterList) ConvertFrom(srcRaw conversion.Hub) error { return Convert_v1beta2_AWSClusterList_To_v1beta1_AWSClusterList(src, r, nil) } + +func Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in *infrav2.SubnetSpec, out *SubnetSpec, s apiconversion.Scope) error { + return autoConvert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in, out, s) +} diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index ae38dacf7d..86402d8065 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -515,11 +515,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.SubnetSpec)(nil), (*SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(a.(*v1beta2.SubnetSpec), b.(*SubnetSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*VPCSpec)(nil), (*v1beta2.VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(a.(*VPCSpec), b.(*v1beta2.VPCSpec), scope) }); err != nil { @@ -595,6 +590,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.SubnetSpec)(nil), (*SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(a.(*v1beta2.SubnetSpec), b.(*SubnetSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.VPCSpec)(nil), (*VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(a.(*v1beta2.VPCSpec), b.(*VPCSpec), scope) }); err != nil { @@ -2000,7 +2000,17 @@ func autoConvert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(in *NetworkSpec, out if err := Convert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(&in.VPC, &out.VPC, s); err != nil { return err } - out.Subnets = *(*v1beta2.Subnets)(unsafe.Pointer(&in.Subnets)) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make(v1beta2.Subnets, len(*in)) + for i := range *in { + if err := Convert_v1beta1_SubnetSpec_To_v1beta2_SubnetSpec(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subnets = nil + } out.CNI = (*v1beta2.CNISpec)(unsafe.Pointer(in.CNI)) out.SecurityGroupOverrides = *(*map[v1beta2.SecurityGroupRole]string)(unsafe.Pointer(&in.SecurityGroupOverrides)) return nil @@ -2015,7 +2025,17 @@ func autoConvert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(in *v1beta2.NetworkS if err := Convert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(&in.VPC, &out.VPC, s); err != nil { return err } - out.Subnets = *(*Subnets)(unsafe.Pointer(&in.Subnets)) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make(Subnets, len(*in)) + for i := range *in { + if err := Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subnets = nil + } out.CNI = (*CNISpec)(unsafe.Pointer(in.CNI)) out.SecurityGroupOverrides = *(*map[SecurityGroupRole]string)(unsafe.Pointer(&in.SecurityGroupOverrides)) // WARNING: in.AdditionalControlPlaneIngressRules requires manual conversion: does not exist in peer-type @@ -2198,6 +2218,7 @@ func Convert_v1beta1_SubnetSpec_To_v1beta2_SubnetSpec(in *SubnetSpec, out *v1bet func autoConvert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in *v1beta2.SubnetSpec, out *SubnetSpec, s conversion.Scope) error { out.ID = in.ID + // WARNING: in.ResourceID requires manual conversion: does not exist in peer-type out.CidrBlock = in.CidrBlock out.IPv6CidrBlock = in.IPv6CidrBlock out.AvailabilityZone = in.AvailabilityZone @@ -2209,11 +2230,6 @@ func autoConvert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in *v1beta2.SubnetSpec return nil } -// Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec is an autogenerated conversion function. -func Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in *v1beta2.SubnetSpec, out *SubnetSpec, s conversion.Scope) error { - return autoConvert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(in, out, s) -} - func autoConvert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(in *VPCSpec, out *v1beta2.VPCSpec, s conversion.Scope) error { out.ID = in.ID out.CidrBlock = in.CidrBlock diff --git a/api/v1beta2/network_types.go b/api/v1beta2/network_types.go index 858b8e8dc2..3f68e770cc 100644 --- a/api/v1beta2/network_types.go +++ b/api/v1beta2/network_types.go @@ -348,8 +348,20 @@ func (v *VPCSpec) IsIPv6Enabled() bool { // SubnetSpec configures an AWS Subnet. type SubnetSpec struct { // ID defines a unique identifier to reference this resource. + // If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`. + // + // When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you, + // the id can be set to any placeholder value that does not start with `subnet-`; + // upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and + // the `id` field is going to be used as the subnet name. If you specify a tag + // called `Name`, it takes precedence. ID string `json:"id"` + // ResourceID is the subnet identifier from AWS, READ ONLY. + // This field is populated when the provider manages the subnet. + // +optional + ResourceID string `json:"resourceID,omitempty"` + // CidrBlock is the CIDR block to be used when the provider creates a managed VPC. CidrBlock string `json:"cidrBlock,omitempty"` @@ -384,9 +396,18 @@ type SubnetSpec struct { Tags Tags `json:"tags,omitempty"` } +// GetResourceID returns the identifier for this subnet, +// if the subnet was not created or reconciled, it returns the subnet ID. +func (s *SubnetSpec) GetResourceID() string { + if s.ResourceID != "" { + return s.ResourceID + } + return s.ID +} + // String returns a string representation of the subnet. func (s *SubnetSpec) String() string { - return fmt.Sprintf("id=%s/az=%s/public=%v", s.ID, s.AvailabilityZone, s.IsPublic) + return fmt.Sprintf("id=%s/az=%s/public=%v", s.GetResourceID(), s.AvailabilityZone, s.IsPublic) } // Subnets is a slice of Subnet. @@ -399,7 +420,7 @@ func (s Subnets) ToMap() map[string]*SubnetSpec { res := make(map[string]*SubnetSpec) for i := range s { x := s[i] - res[x.ID] = &x + res[x.GetResourceID()] = &x } return res } @@ -408,7 +429,7 @@ func (s Subnets) ToMap() map[string]*SubnetSpec { func (s Subnets) IDs() []string { res := []string{} for _, subnet := range s { - res = append(res, subnet.ID) + res = append(res, subnet.GetResourceID()) } return res } @@ -416,11 +437,10 @@ func (s Subnets) IDs() []string { // FindByID returns a single subnet matching the given id or nil. func (s Subnets) FindByID(id string) *SubnetSpec { for _, x := range s { - if x.ID == id { + if x.GetResourceID() == id { return &x } } - return nil } @@ -429,7 +449,9 @@ func (s Subnets) FindByID(id string) *SubnetSpec { // or if they are in the same vpc and the cidr block is the same. func (s Subnets) FindEqual(spec *SubnetSpec) *SubnetSpec { for _, x := range s { - if (spec.ID != "" && x.ID == spec.ID) || (spec.CidrBlock == x.CidrBlock) || (spec.IPv6CidrBlock != "" && spec.IPv6CidrBlock == x.IPv6CidrBlock) { + if (spec.GetResourceID() != "" && x.GetResourceID() == spec.GetResourceID()) || + (spec.CidrBlock == x.CidrBlock) || + (spec.IPv6CidrBlock != "" && spec.IPv6CidrBlock == x.IPv6CidrBlock) { return &x } } diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index cc334f8dfa..84b7e14331 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -481,8 +481,15 @@ spec: the provider creates a managed VPC. type: string id: - description: ID defines a unique identifier to reference - this resource. + description: "ID defines a unique identifier to reference + this resource. If you're bringing your subnet, set the + AWS subnet-id here, it must start with `subnet-`. \n When + the VPC is managed by CAPA, and you'd like the provider + to create a subnet for you, the id can be set to any placeholder + value that does not start with `subnet-`; upon creation, + the subnet AWS identifier will be populated in the `ResourceID` + field and the `id` field is going to be used as the subnet + name. If you specify a tag called `Name`, it takes precedence." type: string ipv6CidrBlock: description: IPv6CidrBlock is the IPv6 CIDR block to be @@ -510,6 +517,11 @@ spec: to determine routes for private subnets in the same AZ as the public subnet. type: string + resourceID: + description: ResourceID is the subnet identifier from AWS, + READ ONLY. This field is populated when the provider manages + the subnet. + type: string routeTableId: description: RouteTableID is the routing table id associated with the subnet. @@ -2052,8 +2064,15 @@ spec: the provider creates a managed VPC. type: string id: - description: ID defines a unique identifier to reference - this resource. + description: "ID defines a unique identifier to reference + this resource. If you're bringing your subnet, set the + AWS subnet-id here, it must start with `subnet-`. \n When + the VPC is managed by CAPA, and you'd like the provider + to create a subnet for you, the id can be set to any placeholder + value that does not start with `subnet-`; upon creation, + the subnet AWS identifier will be populated in the `ResourceID` + field and the `id` field is going to be used as the subnet + name. If you specify a tag called `Name`, it takes precedence." type: string ipv6CidrBlock: description: IPv6CidrBlock is the IPv6 CIDR block to be @@ -2081,6 +2100,11 @@ spec: to determine routes for private subnets in the same AZ as the public subnet. type: string + resourceID: + description: ResourceID is the subnet identifier from AWS, + READ ONLY. This field is populated when the provider manages + the subnet. + type: string routeTableId: description: RouteTableID is the routing table id associated with the subnet. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index 0994d63f57..1b90cfea77 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -1284,8 +1284,15 @@ spec: the provider creates a managed VPC. type: string id: - description: ID defines a unique identifier to reference - this resource. + description: "ID defines a unique identifier to reference + this resource. If you're bringing your subnet, set the + AWS subnet-id here, it must start with `subnet-`. \n When + the VPC is managed by CAPA, and you'd like the provider + to create a subnet for you, the id can be set to any placeholder + value that does not start with `subnet-`; upon creation, + the subnet AWS identifier will be populated in the `ResourceID` + field and the `id` field is going to be used as the subnet + name. If you specify a tag called `Name`, it takes precedence." type: string ipv6CidrBlock: description: IPv6CidrBlock is the IPv6 CIDR block to be @@ -1313,6 +1320,11 @@ spec: to determine routes for private subnets in the same AZ as the public subnet. type: string + resourceID: + description: ResourceID is the subnet identifier from AWS, + READ ONLY. This field is populated when the provider manages + the subnet. + type: string routeTableId: description: RouteTableID is the routing table id associated with the subnet. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml index 16fcf98bf9..2fc12b9acc 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml @@ -888,8 +888,17 @@ spec: when the provider creates a managed VPC. type: string id: - description: ID defines a unique identifier to reference - this resource. + description: "ID defines a unique identifier to + reference this resource. If you're bringing your + subnet, set the AWS subnet-id here, it must start + with `subnet-`. \n When the VPC is managed by + CAPA, and you'd like the provider to create a + subnet for you, the id can be set to any placeholder + value that does not start with `subnet-`; upon + creation, the subnet AWS identifier will be populated + in the `ResourceID` field and the `id` field is + going to be used as the subnet name. If you specify + a tag called `Name`, it takes precedence." type: string ipv6CidrBlock: description: IPv6CidrBlock is the IPv6 CIDR block @@ -920,6 +929,11 @@ spec: routes for private subnets in the same AZ as the public subnet. type: string + resourceID: + description: ResourceID is the subnet identifier + from AWS, READ ONLY. This field is populated when + the provider manages the subnet. + type: string routeTableId: description: RouteTableID is the routing table id associated with the subnet. diff --git a/pkg/cloud/scope/shared_test.go b/pkg/cloud/scope/shared_test.go index 9f7956357a..34d124abf3 100644 --- a/pkg/cloud/scope/shared_test.go +++ b/pkg/cloud/scope/shared_test.go @@ -41,39 +41,39 @@ func TestSubnetPlacement(t *testing.T) { }{ { name: "spec subnets expected", - specSubnetIDs: []string{"az1"}, + specSubnetIDs: []string{"subnet-az1"}, specAZs: []string{"eu-west-1b"}, parentAZs: []string{"eu-west-1c"}, subnetPlacementType: nil, controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az1"}, + expectedSubnetIDs: []string{"subnet-az1"}, expectError: false, }, { @@ -84,33 +84,33 @@ func TestSubnetPlacement(t *testing.T) { subnetPlacementType: nil, controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az2", "az3"}, + expectedSubnetIDs: []string{"subnet-az2", "subnet-az3"}, expectError: false, }, { @@ -121,33 +121,33 @@ func TestSubnetPlacement(t *testing.T) { subnetPlacementType: nil, controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az4", "az5"}, + expectedSubnetIDs: []string{"subnet-az4", "subnet-az5"}, expectError: false, }, { @@ -158,33 +158,33 @@ func TestSubnetPlacement(t *testing.T) { subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePrivate), controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az3"}, + expectedSubnetIDs: []string{"subnet-az3"}, expectError: false, }, { @@ -195,33 +195,33 @@ func TestSubnetPlacement(t *testing.T) { subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePublic), controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az2"}, + expectedSubnetIDs: []string{"subnet-az2"}, expectError: false, }, { @@ -232,33 +232,33 @@ func TestSubnetPlacement(t *testing.T) { subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypeAll), controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az2", "az3"}, + expectedSubnetIDs: []string{"subnet-az2", "subnet-az3"}, expectError: false, }, { @@ -269,27 +269,27 @@ func TestSubnetPlacement(t *testing.T) { subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePublic), controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, @@ -306,33 +306,33 @@ func TestSubnetPlacement(t *testing.T) { subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePrivate), controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az5"}, + expectedSubnetIDs: []string{"subnet-az5"}, expectError: false, }, { @@ -343,33 +343,33 @@ func TestSubnetPlacement(t *testing.T) { subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypePublic), controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az4"}, + expectedSubnetIDs: []string{"subnet-az4"}, expectError: false, }, { @@ -380,33 +380,33 @@ func TestSubnetPlacement(t *testing.T) { subnetPlacementType: expinfrav1.NewAZSubnetType(expinfrav1.AZSubnetTypeAll), controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az4", "az5"}, + expectedSubnetIDs: []string{"subnet-az4", "subnet-az5"}, expectError: false, }, { @@ -416,33 +416,33 @@ func TestSubnetPlacement(t *testing.T) { parentAZs: []string{}, controlPlaneSubnets: infrav1.Subnets{ infrav1.SubnetSpec{ - ID: "az1", + ID: "subnet-az1", AvailabilityZone: "eu-west-1a", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az2", + ID: "subnet-az2", AvailabilityZone: "eu-west-1b", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az3", + ID: "subnet-az3", AvailabilityZone: "eu-west-1b", IsPublic: false, }, infrav1.SubnetSpec{ - ID: "az4", + ID: "subnet-az4", AvailabilityZone: "eu-west-1c", IsPublic: true, }, infrav1.SubnetSpec{ - ID: "az5", + ID: "subnet-az5", AvailabilityZone: "eu-west-1c", IsPublic: false, }, }, logger: logger.NewLogger(klog.Background()), - expectedSubnetIDs: []string{"az1", "az3", "az5"}, + expectedSubnetIDs: []string{"subnet-az1", "subnet-az3", "subnet-az5"}, expectError: false, }, { diff --git a/pkg/cloud/services/awsnode/cni.go b/pkg/cloud/services/awsnode/cni.go index b60f1d78a6..25211e6062 100644 --- a/pkg/cloud/services/awsnode/cni.go +++ b/pkg/cloud/services/awsnode/cni.go @@ -118,7 +118,7 @@ func (s *Service) ReconcileCNI(ctx context.Context) error { Labels: metaLabels, }, Spec: amazoncni.ENIConfigSpec{ - Subnet: subnet.ID, + Subnet: subnet.GetResourceID(), SecurityGroups: sgs, }, } @@ -130,7 +130,7 @@ func (s *Service) ReconcileCNI(ctx context.Context) error { s.scope.Info("Updating ENIConfig", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name()), "subnet", subnet.ID, "availability-zone", subnet.AvailabilityZone) eniConfig.Spec = amazoncni.ENIConfigSpec{ - Subnet: subnet.ID, + Subnet: subnet.GetResourceID(), SecurityGroups: sgs, } diff --git a/pkg/cloud/services/awsnode/cni_test.go b/pkg/cloud/services/awsnode/cni_test.go index ba8074b541..1619d843ac 100644 --- a/pkg/cloud/services/awsnode/cni_test.go +++ b/pkg/cloud/services/awsnode/cni_test.go @@ -222,14 +222,14 @@ func TestReconcileCniVpcCniValues(t *testing.T) { secondaryCidrBlock: aws.String("100.0.0.1/20"), securityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ "node": { - ID: "sg-1234", + ID: "subnet-1234", Name: "node", }, }, subnets: infrav1.Subnets{ { // we aren't testing reconcileSubnets where this extra conf is added so putting it in by hand - ID: "sn-1234", + ID: "subnet-1234", CidrBlock: "100.0.0.1/20", Tags: infrav1.Tags{ infrav1.NameAWSSubnetAssociation: infrav1.SecondarySubnetTagValue, diff --git a/pkg/cloud/services/ec2/bastion.go b/pkg/cloud/services/ec2/bastion.go index 0b5cd2a839..826d03c6ef 100644 --- a/pkg/cloud/services/ec2/bastion.go +++ b/pkg/cloud/services/ec2/bastion.go @@ -197,7 +197,7 @@ func (s *Service) getDefaultBastion(instanceType, ami string) (*infrav1.Instance i := &infrav1.Instance{ Type: instanceType, - SubnetID: subnet.ID, + SubnetID: subnet.GetResourceID(), ImageID: ami, SSHKeyName: keyName, UserData: aws.String(base64.StdEncoding.EncodeToString([]byte(userData))), diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index acfcf03087..0df1ceba78 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -355,7 +355,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { record.Warnf(scope.AWSMachine, "FailedCreate", errMessage) return "", awserrors.NewFailedDependency(errMessage) } - return subnets[0].ID, nil + return subnets[0].GetResourceID(), nil } subnets := s.scope.Subnets().FilterPrivate().FilterByZone(*failureDomain) @@ -365,7 +365,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { record.Warnf(scope.AWSMachine, "FailedCreate", errMessage) return "", awserrors.NewFailedDependency(errMessage) } - return subnets[0].ID, nil + return subnets[0].GetResourceID(), nil case scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP: subnets := s.scope.Subnets().FilterPublic() if len(subnets) == 0 { @@ -373,7 +373,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { record.Eventf(scope.AWSMachine, "FailedCreate", errMessage) return "", awserrors.NewFailedDependency(errMessage) } - return subnets[0].ID, nil + return subnets[0].GetResourceID(), nil // TODO(vincepri): Define a tag that would allow to pick a preferred subnet in an AZ when working // with control plane machines. @@ -385,7 +385,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { record.Eventf(s.scope.InfraCluster(), "FailedCreateInstance", errMessage) return "", awserrors.NewFailedDependency(errMessage) } - return sns[0].ID, nil + return sns[0].GetResourceID(), nil } } diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index e4cee16626..25af21c8d8 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -223,7 +223,7 @@ func (s *Service) getAPIServerLBSpec(elbName string) (*infrav1.LoadBalancer, err } } res.AvailabilityZones = append(res.AvailabilityZones, sn.AvailabilityZone) - res.SubnetIDs = append(res.SubnetIDs, sn.ID) + res.SubnetIDs = append(res.SubnetIDs, sn.GetResourceID()) } } @@ -794,6 +794,7 @@ func (s *Service) getControlPlaneLoadBalancerSubnets() (infrav1.Subnets, error) lbSn := infrav1.SubnetSpec{ AvailabilityZone: *sn.AvailabilityZone, ID: *sn.SubnetId, + ResourceID: *sn.SubnetId, } subnets = append(subnets, lbSn) } @@ -998,7 +999,7 @@ func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.LoadBalan } } res.AvailabilityZones = append(res.AvailabilityZones, sn.AvailabilityZone) - res.SubnetIDs = append(res.SubnetIDs, sn.ID) + res.SubnetIDs = append(res.SubnetIDs, sn.GetResourceID()) } } diff --git a/pkg/cloud/services/network/natgateways.go b/pkg/cloud/services/network/natgateways.go index 26f19e63c7..abdf1c3c0a 100644 --- a/pkg/cloud/services/network/natgateways.go +++ b/pkg/cloud/services/network/natgateways.go @@ -74,11 +74,11 @@ func (s *Service) reconcileNatGateways() error { subnetIDs := []string{} for _, sn := range s.scope.Subnets().FilterPublic() { - if sn.ID == "" { + if sn.GetResourceID() == "" { continue } - if ngw, ok := existing[sn.ID]; ok { + if ngw, ok := existing[sn.GetResourceID()]; ok { if len(ngw.NatGatewayAddresses) > 0 && ngw.NatGatewayAddresses[0].PublicIp != nil { natGatewaysIPs = append(natGatewaysIPs, *ngw.NatGatewayAddresses[0].PublicIp) } @@ -98,7 +98,7 @@ func (s *Service) reconcileNatGateways() error { continue } - subnetIDs = append(subnetIDs, sn.ID) + subnetIDs = append(subnetIDs, sn.GetResourceID()) } s.scope.SetNatGatewaysIPs(natGatewaysIPs) @@ -149,11 +149,11 @@ func (s *Service) deleteNatGateways() error { var ngIDs []*ec2.NatGateway for _, sn := range s.scope.Subnets().FilterPublic() { - if sn.ID == "" { + if sn.GetResourceID() == "" { continue } - if ngID, ok := existing[sn.ID]; ok { + if ngID, ok := existing[sn.GetResourceID()]; ok { ngIDs = append(ngIDs, ngID) } } @@ -319,7 +319,7 @@ func (s *Service) deleteNatGateway(id string) error { func (s *Service) getNatGatewayForSubnet(sn *infrav1.SubnetSpec) (string, error) { if sn.IsPublic { - return "", errors.Errorf("cannot get NAT gateway for a public subnet, got id %q", sn.ID) + return "", errors.Errorf("cannot get NAT gateway for a public subnet, got id %q", sn.GetResourceID()) } azGateways := make(map[string][]string) @@ -335,5 +335,5 @@ func (s *Service) getNatGatewayForSubnet(sn *infrav1.SubnetSpec) (string, error) return gws[0], nil } - return "", errors.Errorf("no nat gateways available in %q for private subnet %q, current state: %+v", sn.AvailabilityZone, sn.ID, azGateways) + return "", errors.Errorf("no nat gateways available in %q for private subnet %q, current state: %+v", sn.AvailabilityZone, sn.GetResourceID(), azGateways) } diff --git a/pkg/cloud/services/network/routetables.go b/pkg/cloud/services/network/routetables.go index 3a3be9cc9b..934ff65fbb 100644 --- a/pkg/cloud/services/network/routetables.go +++ b/pkg/cloud/services/network/routetables.go @@ -82,8 +82,8 @@ func (s *Service) reconcileRouteTables() error { } } - if rt, ok := subnetRouteMap[sn.ID]; ok { - s.scope.Debug("Subnet is already associated with route table", "subnet-id", sn.ID, "route-table-id", *rt.RouteTableId) + if rt, ok := subnetRouteMap[sn.GetResourceID()]; ok { + s.scope.Debug("Subnet is already associated with route table", "subnet-id", sn.GetResourceID(), "route-table-id", *rt.RouteTableId) // TODO(vincepri): check that everything is in order, e.g. routes match the subnet type. // For managed environments we need to reconcile the routes of our tables if there is a mistmatch. @@ -124,8 +124,8 @@ func (s *Service) reconcileRouteTables() error { } if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { - if err := s.associateRouteTable(rt, sn.ID); err != nil { - s.scope.Error(err, "trying to associate route table", "subnet_id", sn.ID) + if err := s.associateRouteTable(rt, sn.GetResourceID()); err != nil { + s.scope.Error(err, "trying to associate route table", "subnet_id", sn.GetResourceID()) return false, err } return true, nil @@ -133,7 +133,7 @@ func (s *Service) reconcileRouteTables() error { return err } - s.scope.Debug("Subnet has been associated with route table", "subnet-id", sn.ID, "route-table-id", rt.ID) + s.scope.Debug("Subnet has been associated with route table", "subnet-id", sn.GetResourceID(), "route-table-id", rt.ID) sn.RouteTableID = aws.String(rt.ID) } conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition) diff --git a/pkg/cloud/services/network/subnets.go b/pkg/cloud/services/network/subnets.go index 9b2dd651db..84766c4289 100644 --- a/pkg/cloud/services/network/subnets.go +++ b/pkg/cloud/services/network/subnets.go @@ -136,7 +136,7 @@ func (s *Service) reconcileSubnets() error { subnetTags := sub.Tags // Make sure tags are up-to-date. if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { - buildParams := s.getSubnetTagParams(unmanagedVPC, existingSubnet.ID, existingSubnet.IsPublic, existingSubnet.AvailabilityZone, subnetTags) + buildParams := s.getSubnetTagParams(unmanagedVPC, existingSubnet.GetResourceID(), existingSubnet.IsPublic, existingSubnet.AvailabilityZone, subnetTags) tagsBuilder := tags.New(&buildParams, tags.WithEC2(s.EC2Client)) if err := tagsBuilder.Ensure(existingSubnet.Tags); err != nil { return false, err @@ -144,26 +144,40 @@ func (s *Service) reconcileSubnets() error { return true, nil }, awserrors.SubnetNotFound); err != nil { if !unmanagedVPC { - record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging managed Subnet %q: %v", existingSubnet.ID, err) - return errors.Wrapf(err, "failed to ensure tags on subnet %q", existingSubnet.ID) + record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging managed Subnet %q: %v", existingSubnet.GetResourceID(), err) + return errors.Wrapf(err, "failed to ensure tags on subnet %q", existingSubnet.GetResourceID()) } else { // We may not have a permission to tag unmanaged subnets. // When tagging unmanaged subnet fails, record an event and proceed. - record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging unmanaged Subnet %q: %v", existingSubnet.ID, err) + record.Warnf(s.scope.InfraCluster(), "FailedTagSubnet", "Failed tagging unmanaged Subnet %q: %v", existingSubnet.GetResourceID(), err) break } } - // Update subnet spec with the existing subnet details // TODO(vincepri): check if subnet needs to be updated. + + if len(sub.ID) > 0 { + // NOTE: Describing subnets assumes the subnet.ID is the same as the subnet's identifier (i.e. subnet-), + // if we have a subnet ID specified in the spec, we need to restore it. + existingSubnet.ID = sub.ID + } + + // Update subnet spec with the existing subnet details existingSubnet.DeepCopyInto(sub) } else if unmanagedVPC { // If there is no existing subnet and we have an umanaged vpc report an error - record.Warnf(s.scope.InfraCluster(), "FailedMatchSubnet", "Using unmanaged VPC and failed to find existing subnet for specified subnet id %d, cidr %q", sub.ID, sub.CidrBlock) - return errors.New(fmt.Errorf("usign unmanaged vpc and subnet %s (cidr %s) specified but it doesn't exist in vpc %s", sub.ID, sub.CidrBlock, s.scope.VPC().ID).Error()) + record.Warnf(s.scope.InfraCluster(), "FailedMatchSubnet", "Using unmanaged VPC and failed to find existing subnet for specified subnet id %d, cidr %q", sub.GetResourceID(), sub.CidrBlock) + return errors.New(fmt.Errorf("usign unmanaged vpc and subnet %s (cidr %s) specified but it doesn't exist in vpc %s", sub.GetResourceID(), sub.CidrBlock, s.scope.VPC().ID).Error()) } } + // If we have an unmanaged VPC, require that the user has specified at least 1 subnet. + if unmanagedVPC && len(subnets) < 1 { + record.Warnf(s.scope.InfraCluster(), "FailedNoSubnet", "Expected at least 1 subnet but got 0") + return errors.New("expected at least 1 subnet but got 0") + } + + // When the VPC is managed by CAPA, we need to create the subnets. if !unmanagedVPC { // Check that we need at least 1 private and 1 public subnet after we have updated the metadata if len(subnets.FilterPrivate()) < 1 { @@ -174,18 +188,13 @@ func (s *Service) reconcileSubnets() error { record.Warnf(s.scope.InfraCluster(), "FailedNoPublicSubnet", "Expected at least 1 public subnet but got 0") return errors.New("expected at least 1 public subnet but got 0") } - } else if unmanagedVPC { - if len(subnets) < 1 { - record.Warnf(s.scope.InfraCluster(), "FailedNoSubnet", "Expected at least 1 subnet but got 0") - return errors.New("expected at least 1 subnet but got 0") - } - } - // Proceed to create the rest of the subnets that don't have an ID. - if !unmanagedVPC { + // Proceed to create the rest of the subnets that don't have an ID. for i := range subnets { subnet := &subnets[i] - if subnet.ID != "" { + + // If we have a ResourceID (i.e. subnet-), the resource was already created. + if subnet.ResourceID != "" { continue } @@ -197,7 +206,7 @@ func (s *Service) reconcileSubnets() error { } } - s.scope.Debug("reconciled subnets", "subnets", subnets) + s.scope.Debug("Reconciled subnets", "subnets", subnets) conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition) return nil } @@ -270,11 +279,13 @@ func (s *Service) getDefaultSubnets() (infrav1.Subnets, error) { subnets := infrav1.Subnets{} for i, zone := range zones { publicSubnet := infrav1.SubnetSpec{ + ID: fmt.Sprintf("%s-subnet-%s-%s", s.scope.Name(), infrav1.PublicRoleTagValue, zone), CidrBlock: publicSubnetCIDRs[i].String(), AvailabilityZone: zone, IsPublic: true, } privateSubnet := infrav1.SubnetSpec{ + ID: fmt.Sprintf("%s-subnet-%s-%s", s.scope.Name(), infrav1.PrivateRoleTagValue, zone), CidrBlock: privateSubnetCIDRs[i].String(), AvailabilityZone: zone, IsPublic: false, @@ -336,6 +347,7 @@ func (s *Service) describeVpcSubnets() (infrav1.Subnets, error) { for _, ec2sn := range sns.Subnets { spec := infrav1.SubnetSpec{ ID: *ec2sn.SubnetId, + ResourceID: *ec2sn.SubnetId, AvailabilityZone: *ec2sn.AvailabilityZone, Tags: converters.TagsToMap(ec2sn.Tags), } @@ -399,6 +411,16 @@ func (s *Service) describeSubnets() (*ec2.DescribeSubnetsOutput, error) { } func (s *Service) createSubnet(sn *infrav1.SubnetSpec) (*infrav1.SubnetSpec, error) { + // When managing subnets, the ID specified in the spec is the name of the subnet. + if sn.Tags == nil { + sn.Tags = make(infrav1.Tags) + } + if sn.ID != "" && !strings.HasPrefix(sn.ID, "subnet-") && sn.Tags["Name"] == "" { + // If subnet.ID isn't the subnet identifier, and the name tag isn't already set, set the Name. + sn.Tags["Name"] = sn.ID + } + + // Build the subnet creation request. input := &ec2.CreateSubnetInput{ VpcId: aws.String(s.scope.VPC().ID), CidrBlock: aws.String(sn.CidrBlock), @@ -469,10 +491,13 @@ func (s *Service) createSubnet(sn *infrav1.SubnetSpec) (*infrav1.SubnetSpec, err } subnet := &infrav1.SubnetSpec{ - ID: *out.Subnet.SubnetId, + // Preserve the original identifier. The AWS identifier `subnet-` is stored in the ResourceID field. + ID: sn.ID, + ResourceID: *out.Subnet.SubnetId, AvailabilityZone: *out.Subnet.AvailabilityZone, CidrBlock: *out.Subnet.CidrBlock, // TODO: this will panic in case of IPv6 only subnets... IsPublic: sn.IsPublic, + Tags: sn.Tags, } for _, set := range out.Subnet.Ipv6CidrBlockAssociationSet { if *set.Ipv6CidrBlockState.State == ec2.SubnetCidrBlockStateCodeAssociated { diff --git a/pkg/cloud/services/network/subnets_test.go b/pkg/cloud/services/network/subnets_test.go index d2d0ce71e2..f7c02d4359 100644 --- a/pkg/cloud/services/network/subnets_test.go +++ b/pkg/cloud/services/network/subnets_test.go @@ -2474,6 +2474,7 @@ func TestDiscoverSubnets(t *testing.T) { expect: []infrav1.SubnetSpec{ { ID: "subnet-1", + ResourceID: "subnet-1", AvailabilityZone: "us-east-1a", CidrBlock: "10.0.10.0/24", IsPublic: true, @@ -2484,6 +2485,7 @@ func TestDiscoverSubnets(t *testing.T) { }, { ID: "subnet-2", + ResourceID: "subnet-2", AvailabilityZone: "us-east-1a", CidrBlock: "10.0.11.0/24", IsPublic: false, diff --git a/test/e2e/data/e2e_conf.yaml b/test/e2e/data/e2e_conf.yaml index 1790ed7014..394b2e626d 100644 --- a/test/e2e/data/e2e_conf.yaml +++ b/test/e2e/data/e2e_conf.yaml @@ -137,6 +137,7 @@ providers: - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-limit-az.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-machine-pool.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-md-remediation.yaml" + - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-multi-az.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-nested-multitenancy.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-remote-management-cluster.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-simple-multitenancy.yaml" diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-multi-az.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-multi-az.yaml index a6e2b18676..2bf031b08a 100644 --- a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-multi-az.yaml +++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-multi-az.yaml @@ -27,14 +27,18 @@ metadata: spec: network: subnets: - - availabilityZone: ${AWS_AVAILABILITY_ZONE_1} + - id: subnet-zone-1 + availabilityZone: ${AWS_AVAILABILITY_ZONE_1} cidrBlock: 10.0.0.0/24 - - availabilityZone: ${AWS_AVAILABILITY_ZONE_1} + - id: subnet-zone-2 + availabilityZone: ${AWS_AVAILABILITY_ZONE_1} cidrBlock: 10.0.1.0/24 isPublic: true - - availabilityZone: ${AWS_AVAILABILITY_ZONE_2} + - id: subnet-zone-2-2 + availabilityZone: ${AWS_AVAILABILITY_ZONE_2} cidrBlock: 10.0.2.0/24 - - availabilityZone: ${AWS_AVAILABILITY_ZONE_2} + - id: subnet-zone-2-3 + availabilityZone: ${AWS_AVAILABILITY_ZONE_2} cidrBlock: 10.0.3.0/24 isPublic: true region: ${AWS_REGION} diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/multi-az/patches/multi-az.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/multi-az/patches/multi-az.yaml index a4a158fbe1..9f1c35dd55 100644 --- a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/multi-az/patches/multi-az.yaml +++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/multi-az/patches/multi-az.yaml @@ -6,13 +6,17 @@ metadata: spec: network: subnets: - - availabilityZone: "${AWS_AVAILABILITY_ZONE_1}" + - id: subnet-zone-1-private + availabilityZone: "${AWS_AVAILABILITY_ZONE_1}" cidrBlock: "10.0.0.0/24" - - availabilityZone: "${AWS_AVAILABILITY_ZONE_1}" + - id: subnet-zone-1-public + availabilityZone: "${AWS_AVAILABILITY_ZONE_1}" cidrBlock: "10.0.1.0/24" isPublic: true - - availabilityZone: "${AWS_AVAILABILITY_ZONE_2}" + - id: subnet-zone-2-private + availabilityZone: "${AWS_AVAILABILITY_ZONE_2}" cidrBlock: "10.0.2.0/24" - - availabilityZone: "${AWS_AVAILABILITY_ZONE_2}" + - id: subnet-zone-2-public + availabilityZone: "${AWS_AVAILABILITY_ZONE_2}" cidrBlock: "10.0.3.0/24" isPublic: true diff --git a/test/e2e/shared/defaults.go b/test/e2e/shared/defaults.go index 8bc736565f..5342a5cfc7 100644 --- a/test/e2e/shared/defaults.go +++ b/test/e2e/shared/defaults.go @@ -50,6 +50,7 @@ const ( AwsNodeMachineType = "AWS_NODE_MACHINE_TYPE" AwsAvailabilityZone1 = "AWS_AVAILABILITY_ZONE_1" AwsAvailabilityZone2 = "AWS_AVAILABILITY_ZONE_2" + MultiAzFlavor = "multi-az" LimitAzFlavor = "limit-az" SpotInstancesFlavor = "spot-instances" SSMFlavor = "ssm" diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index b4f838b31f..dc1c9bff7b 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -388,6 +388,35 @@ func getEvents(namespace string) *corev1.EventList { return eventsList } +func getSubnetID(filterKey, filterValue, clusterName string) *string { + var subnetOutput *ec2.DescribeSubnetsOutput + var err error + + ec2Client := ec2.New(e2eCtx.AWSSession) + subnetInput := &ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String(filterKey), + Values: []*string{ + aws.String(filterValue), + }, + }, + { + Name: aws.String("tag-key"), + Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/" + clusterName}), + }, + }, + } + + Eventually(func() int { + subnetOutput, err = ec2Client.DescribeSubnets(subnetInput) + Expect(err).NotTo(HaveOccurred()) + return len(subnetOutput.Subnets) + }, e2eCtx.E2EConfig.GetIntervals("", "wait-infra-subnets")...).Should(Equal(1)) + + return subnetOutput.Subnets[0].SubnetId +} + func getVolumeIds(info statefulSetInfo, k8sclient crclient.Client) []*string { ginkgo.By("Retrieving IDs of dynamically provisioned volumes.") statefulset := &appsv1.StatefulSet{} diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index a26ce2831c..fac43c2c56 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -22,6 +22,7 @@ package unmanaged import ( "context" "fmt" + "os" "path/filepath" "strings" "time" @@ -583,6 +584,59 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) }) + ginkgo.Describe("Workload cluster in multiple AZs", func() { + ginkgo.It("It should be creatable and deletable", func() { + specName := "functional-test-multi-az" + requiredResources = &shared.TestResource{EC2Normal: 3 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) + defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) + ginkgo.By("Creating a cluster") + clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + configCluster := defaultConfigCluster(clusterName, namespace.Name) + configCluster.ControlPlaneMachineCount = pointer.Int64(3) + configCluster.Flavor = shared.MultiAzFlavor + cluster, _, _ := createCluster(ctx, configCluster, result) + + ginkgo.By("Validating that the subnet were created") + awsCluster, err := GetAWSClusterByName(ctx, namespace.Name, clusterName) + Expect(err).To(BeNil()) + for _, subnet := range awsCluster.Spec.NetworkSpec.Subnets { + Expect(subnet.GetResourceID()).To(HavePrefix("subnet-")) + Expect(subnet.ResourceID).ToNot(BeEmpty()) + } + + ginkgo.By("Adding worker nodes to additional subnets") + mdName1 := clusterName + "-md-1" + mdName2 := clusterName + "-md-2" + az1 := os.Getenv(shared.AwsAvailabilityZone1) + az2 := os.Getenv(shared.AwsAvailabilityZone2) + md1 := makeMachineDeployment(namespace.Name, mdName1, clusterName, &az1, 1) + md2 := makeMachineDeployment(namespace.Name, mdName2, clusterName, &az2, 1) + + // private CIDRs set in cluster-template-multi-az.yaml. + framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{ + Creator: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + MachineDeployment: md1, + BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, mdName1), + InfraMachineTemplate: makeAWSMachineTemplate(namespace.Name, mdName1, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), getSubnetID("cidr-block", "10.0.0.0/24", clusterName)), + }) + framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{ + Creator: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + MachineDeployment: md2, + BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, mdName2), + InfraMachineTemplate: makeAWSMachineTemplate(namespace.Name, mdName2, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), getSubnetID("cidr-block", "10.0.2.0/24", clusterName)), + }) + + ginkgo.By("Waiting for new worker nodes to become ready") + k8sClient := e2eCtx.Environment.BootstrapClusterProxy.GetClient() + framework.WaitForMachineDeploymentNodesToExist(ctx, framework.WaitForMachineDeploymentNodesToExistInput{Lister: k8sClient, Cluster: cluster, MachineDeployment: md1}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...) + framework.WaitForMachineDeploymentNodesToExist(ctx, framework.WaitForMachineDeploymentNodesToExistInput{Lister: k8sClient, Cluster: cluster, MachineDeployment: md2}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...) + }) + }) + // TODO @randomvariable: Await more resources ginkgo.PDescribe("Multiple workload clusters", func() { ginkgo.Context("in different namespaces with machine failures", func() {