From c2b7417ef25c781d899b8db07103b13a27a66663 Mon Sep 17 00:00:00 2001 From: Tahsin Date: Sun, 7 Apr 2019 22:04:25 +0600 Subject: [PATCH] Implement bastion host --- .../examples/azure/cluster.yaml.template | 2 + ...der_v1alpha1_azureclusterproviderspec.yaml | 8 + .../azureclusterproviderconfig_types.go | 5 + pkg/apis/azureprovider/v1alpha1/types.go | 2 + pkg/cloud/azure/actuators/cluster/BUILD.bazel | 3 + .../azure/actuators/cluster/actuator_test.go | 66 +++---- .../azure/actuators/cluster/reconciler.go | 167 +++++++++++++++--- pkg/cloud/azure/defaults.go | 22 +++ .../azure/services/certificates/BUILD.bazel | 1 + .../services/certificates/certificates.go | 34 ++++ .../certificates/certificates_test.go | 37 ++++ .../networkinterfaces/networkinterfaces.go | 21 ++- .../publicloadbalancers.go | 26 --- .../services/securitygroups/securitygroups.go | 20 ++- 14 files changed, 317 insertions(+), 97 deletions(-) diff --git a/cmd/clusterctl/examples/azure/cluster.yaml.template b/cmd/clusterctl/examples/azure/cluster.yaml.template index 52d052abccb..3555c6cbf4a 100644 --- a/cmd/clusterctl/examples/azure/cluster.yaml.template +++ b/cmd/clusterctl/examples/azure/cluster.yaml.template @@ -15,6 +15,8 @@ spec: kind: "AzureClusterProviderSpec" resourceGroup: "${RESOURCE_GROUP}" location: "${LOCATION}" + sshPublicKey: ${SSH_PUBLIC_KEY} + sshPrivateKey: ${SSH_PRIVATE_KEY} networkSpec: vnet: name: "${VNET_NAME}" diff --git a/config/crds/azureprovider_v1alpha1_azureclusterproviderspec.yaml b/config/crds/azureprovider_v1alpha1_azureclusterproviderspec.yaml index f388215ba62..9929e2924b4 100644 --- a/config/crds/azureprovider_v1alpha1_azureclusterproviderspec.yaml +++ b/config/crds/azureprovider_v1alpha1_azureclusterproviderspec.yaml @@ -489,9 +489,17 @@ spec: - cert - key type: object + sshPrivateKey: + description: SSHPrivateKey is the ssh private key for the bastion host + type: string + sshPublicKey: + description: SSHPublicKey is the ssh public key for the bastion host + type: string required: - resourceGroup - location + - sshPublicKey + - sshPrivateKey version: v1alpha1 status: acceptedNames: diff --git a/pkg/apis/azureprovider/v1alpha1/azureclusterproviderconfig_types.go b/pkg/apis/azureprovider/v1alpha1/azureclusterproviderconfig_types.go index 348deaa5356..e3ac4bc78c9 100644 --- a/pkg/apis/azureprovider/v1alpha1/azureclusterproviderconfig_types.go +++ b/pkg/apis/azureprovider/v1alpha1/azureclusterproviderconfig_types.go @@ -36,6 +36,11 @@ type AzureClusterProviderSpec struct { ResourceGroup string `json:"resourceGroup"` Location string `json:"location"` + // SSHPublicKey is the ssh public key for the bastion host + SSHPublicKey string `json:"sshPublicKey"` + // SSHPrivateKey is the ssh private key for the bastion host + SSHPrivateKey string `json:"sshPrivateKey"` + // CAKeyPair is the key pair for CA certs. CAKeyPair KeyPair `json:"caKeyPair,omitempty"` diff --git a/pkg/apis/azureprovider/v1alpha1/types.go b/pkg/apis/azureprovider/v1alpha1/types.go index 0a6ca7deb38..1c9a990d093 100644 --- a/pkg/apis/azureprovider/v1alpha1/types.go +++ b/pkg/apis/azureprovider/v1alpha1/types.go @@ -74,6 +74,8 @@ const ( ControlPlane string = "controlplane" // Node machine label Node string = "node" + // Bastion matching label + Bastion string = "bastion" ) // Network encapsulates Azure networking resources. diff --git a/pkg/cloud/azure/actuators/cluster/BUILD.bazel b/pkg/cloud/azure/actuators/cluster/BUILD.bazel index 42ff06ea2e7..1f69d4c6847 100644 --- a/pkg/cloud/azure/actuators/cluster/BUILD.bazel +++ b/pkg/cloud/azure/actuators/cluster/BUILD.bazel @@ -9,16 +9,19 @@ go_library( importpath = "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure/actuators/cluster", visibility = ["//visibility:public"], deps = [ + "//pkg/apis/azureprovider/v1alpha1:go_default_library", "//pkg/cloud/azure:go_default_library", "//pkg/cloud/azure/actuators:go_default_library", "//pkg/cloud/azure/services/certificates:go_default_library", "//pkg/cloud/azure/services/groups:go_default_library", "//pkg/cloud/azure/services/internalloadbalancers:go_default_library", + "//pkg/cloud/azure/services/networkinterfaces:go_default_library", "//pkg/cloud/azure/services/publicips:go_default_library", "//pkg/cloud/azure/services/publicloadbalancers:go_default_library", "//pkg/cloud/azure/services/routetables:go_default_library", "//pkg/cloud/azure/services/securitygroups:go_default_library", "//pkg/cloud/azure/services/subnets:go_default_library", + "//pkg/cloud/azure/services/virtualmachines:go_default_library", "//pkg/cloud/azure/services/virtualnetworks:go_default_library", "//pkg/deployer:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", diff --git a/pkg/cloud/azure/actuators/cluster/actuator_test.go b/pkg/cloud/azure/actuators/cluster/actuator_test.go index 96db4ae478e..a526c718060 100644 --- a/pkg/cloud/azure/actuators/cluster/actuator_test.go +++ b/pkg/cloud/azure/actuators/cluster/actuator_test.go @@ -45,16 +45,18 @@ func TestReconcileSuccess(t *testing.T) { fakeNotFoundSvc := &azure.FakeNotFoundService{} fakeReconciler := &Reconciler{ - scope: newFakeScope(), - groupsSvc: fakeSuccessSvc, - certificatesSvc: fakeSuccessSvc, - vnetSvc: fakeSuccessSvc, - securityGroupSvc: fakeSuccessSvc, - routeTableSvc: fakeSuccessSvc, - subnetsSvc: fakeSuccessSvc, - internalLBSvc: fakeSuccessSvc, - publicIPSvc: fakeSuccessSvc, - publicLBSvc: fakeSuccessSvc, + scope: newFakeScope(), + certificatesSvc: fakeSuccessSvc, + groupsSvc: fakeSuccessSvc, + vnetSvc: fakeSuccessSvc, + securityGroupSvc: fakeSuccessSvc, + routeTableSvc: fakeSuccessSvc, + subnetsSvc: fakeSuccessSvc, + internalLBSvc: fakeSuccessSvc, + publicIPSvc: fakeSuccessSvc, + publicLBSvc: fakeSuccessSvc, + virtualMachineSvc: fakeSuccessSvc, + networkInterfacesSvc: fakeSuccessSvc, } if err := fakeReconciler.Reconcile(); err != nil { @@ -102,16 +104,18 @@ func TestPublicIPNonEmpty(t *testing.T) { fakeSuccessSvc := &azure.FakeSuccessService{} fakeReconciler := &Reconciler{ - scope: newFakeScope(), - groupsSvc: fakeSuccessSvc, - certificatesSvc: fakeSuccessSvc, - vnetSvc: fakeSuccessSvc, - securityGroupSvc: fakeSuccessSvc, - routeTableSvc: fakeSuccessSvc, - subnetsSvc: fakeSuccessSvc, - internalLBSvc: fakeSuccessSvc, - publicIPSvc: fakeSuccessSvc, - publicLBSvc: fakeSuccessSvc, + scope: newFakeScope(), + certificatesSvc: fakeSuccessSvc, + groupsSvc: fakeSuccessSvc, + vnetSvc: fakeSuccessSvc, + securityGroupSvc: fakeSuccessSvc, + routeTableSvc: fakeSuccessSvc, + subnetsSvc: fakeSuccessSvc, + internalLBSvc: fakeSuccessSvc, + publicIPSvc: fakeSuccessSvc, + publicLBSvc: fakeSuccessSvc, + virtualMachineSvc: fakeSuccessSvc, + networkInterfacesSvc: fakeSuccessSvc, } if err := fakeReconciler.Reconcile(); err != nil { @@ -138,16 +142,18 @@ func TestServicesCreatedCount(t *testing.T) { fakeSuccessSvc := &azure.FakeCachedService{Cache: &cache} fakeReconciler := &Reconciler{ - scope: newFakeScope(), - groupsSvc: fakeSuccessSvc, - certificatesSvc: fakeSuccessSvc, - vnetSvc: fakeSuccessSvc, - securityGroupSvc: fakeSuccessSvc, - routeTableSvc: fakeSuccessSvc, - subnetsSvc: fakeSuccessSvc, - internalLBSvc: fakeSuccessSvc, - publicIPSvc: fakeSuccessSvc, - publicLBSvc: fakeSuccessSvc, + scope: newFakeScope(), + certificatesSvc: fakeSuccessSvc, + groupsSvc: fakeSuccessSvc, + vnetSvc: fakeSuccessSvc, + securityGroupSvc: fakeSuccessSvc, + routeTableSvc: fakeSuccessSvc, + subnetsSvc: fakeSuccessSvc, + internalLBSvc: fakeSuccessSvc, + publicIPSvc: fakeSuccessSvc, + publicLBSvc: fakeSuccessSvc, + virtualMachineSvc: fakeSuccessSvc, + networkInterfacesSvc: fakeSuccessSvc, } if err := fakeReconciler.Reconcile(); err != nil { diff --git a/pkg/cloud/azure/actuators/cluster/reconciler.go b/pkg/cloud/azure/actuators/cluster/reconciler.go index be1b531bc02..bf3aa51fe36 100644 --- a/pkg/cloud/azure/actuators/cluster/reconciler.go +++ b/pkg/cloud/azure/actuators/cluster/reconciler.go @@ -17,8 +17,14 @@ limitations under the License. package cluster import ( + "encoding/base64" + "fmt" + + "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure/services/networkinterfaces" + "github.com/pkg/errors" "k8s.io/klog" + "sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1alpha1" "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure" "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure/actuators" "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure/services/certificates" @@ -29,36 +35,41 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure/services/routetables" "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure/services/securitygroups" "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure/services/subnets" + "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure/services/virtualmachines" "sigs.k8s.io/cluster-api-provider-azure/pkg/cloud/azure/services/virtualnetworks" ) // Reconciler are list of services required by cluster actuator, easy to create a fake type Reconciler struct { - scope *actuators.Scope - certificatesSvc azure.Service - groupsSvc azure.Service - vnetSvc azure.Service - securityGroupSvc azure.Service - routeTableSvc azure.Service - subnetsSvc azure.Service - internalLBSvc azure.Service - publicIPSvc azure.Service - publicLBSvc azure.Service + scope *actuators.Scope + certificatesSvc azure.Service + groupsSvc azure.Service + vnetSvc azure.Service + securityGroupSvc azure.Service + routeTableSvc azure.Service + subnetsSvc azure.Service + internalLBSvc azure.Service + publicIPSvc azure.Service + publicLBSvc azure.Service + virtualMachineSvc azure.Service + networkInterfacesSvc azure.Service } // NewReconciler populates all the services based on input scope func NewReconciler(scope *actuators.Scope) *Reconciler { return &Reconciler{ - scope: scope, - certificatesSvc: certificates.NewService(scope), - groupsSvc: groups.NewService(scope), - vnetSvc: virtualnetworks.NewService(scope), - securityGroupSvc: securitygroups.NewService(scope), - routeTableSvc: routetables.NewService(scope), - subnetsSvc: subnets.NewService(scope), - internalLBSvc: internalloadbalancers.NewService(scope), - publicIPSvc: publicips.NewService(scope), - publicLBSvc: publicloadbalancers.NewService(scope), + scope: scope, + certificatesSvc: certificates.NewService(scope), + groupsSvc: groups.NewService(scope), + vnetSvc: virtualnetworks.NewService(scope), + securityGroupSvc: securitygroups.NewService(scope), + routeTableSvc: routetables.NewService(scope), + subnetsSvc: subnets.NewService(scope), + internalLBSvc: internalloadbalancers.NewService(scope), + publicIPSvc: publicips.NewService(scope), + publicLBSvc: publicloadbalancers.NewService(scope), + virtualMachineSvc: virtualmachines.NewService(scope), + networkInterfacesSvc: networkinterfaces.NewService(scope), } } @@ -84,21 +95,29 @@ func (r *Reconciler) Reconcile() error { return errors.Wrapf(err, "failed to reconcile virtual network for cluster %s", r.scope.Cluster.Name) } sgSpec := &securitygroups.Spec{ - Name: azure.GenerateControlPlaneSecurityGroupName(r.scope.Cluster.Name), - IsControlPlane: true, + Name: azure.GenerateControlPlaneSecurityGroupName(r.scope.Cluster.Name), + Role: v1alpha1.ControlPlane, } if err := r.securityGroupSvc.Reconcile(r.scope.Context, sgSpec); err != nil { return errors.Wrapf(err, "failed to reconcile control plane network security group for cluster %s", r.scope.Cluster.Name) } sgSpec = &securitygroups.Spec{ - Name: azure.GenerateNodeSecurityGroupName(r.scope.Cluster.Name), - IsControlPlane: false, + Name: azure.GenerateNodeSecurityGroupName(r.scope.Cluster.Name), + Role: v1alpha1.Node, } if err := r.securityGroupSvc.Reconcile(r.scope.Context, sgSpec); err != nil { return errors.Wrapf(err, "failed to reconcile node network security group for cluster %s", r.scope.Cluster.Name) } + sgSpec = &securitygroups.Spec{ + Name: azure.GenerateBastionSecurityGroupName(r.scope.Cluster.Name), + Role: v1alpha1.Bastion, + } + if err := r.securityGroupSvc.Reconcile(r.scope.Context, sgSpec); err != nil { + return errors.Wrapf(err, "failed to reconcile bastion network security group for cluster %s", r.scope.Cluster.Name) + } + rtSpec := &routetables.Spec{ Name: azure.GenerateNodeRouteTableName(r.scope.Cluster.Name), } @@ -127,6 +146,16 @@ func (r *Reconciler) Reconcile() error { return errors.Wrapf(err, "failed to reconcile node subnet for cluster %s", r.scope.Cluster.Name) } + subnetSpec = &subnets.Spec{ + Name: azure.GenerateBastionSubnetName(r.scope.Cluster.Name), + CIDR: azure.DefaultBastionSubnetCIDR, + VnetName: azure.GenerateVnetName(r.scope.Cluster.Name), + SecurityGroupName: azure.GenerateBastionSecurityGroupName(r.scope.Cluster.Name), + } + if err := r.subnetsSvc.Reconcile(r.scope.Context, subnetSpec); err != nil { + return errors.Wrapf(err, "failed to createorupdate bastion subnet for cluster %s", r.scope.Cluster.Name) + } + internalLBSpec := &internalloadbalancers.Spec{ Name: azure.GenerateInternalLBName(r.scope.Cluster.Name), SubnetName: azure.GenerateControlPlaneSubnetName(r.scope.Cluster.Name), @@ -152,10 +181,56 @@ func (r *Reconciler) Reconcile() error { return errors.Wrapf(err, "failed to reconcile control plane public load balancer for cluster %s", r.scope.Cluster.Name) } + if err := reconcileBastion(r); err != nil { + return errors.Wrapf(err, "failed to reconcile bastion host for cluster %s", r.scope.Cluster.Name) + } + klog.V(2).Infof("successfully reconciled cluster %s", r.scope.Cluster.Name) return nil } +func reconcileBastion(r *Reconciler) error { + bastionNicSpec := &networkinterfaces.Spec{ + Name: azure.GenerateBastionNicName(r.scope.Cluster.Name), + SubnetName: azure.GenerateBastionSubnetName(r.scope.Cluster.Name), + VnetName: azure.GenerateVnetName(r.scope.Cluster.Name), + PublicLoadBalancerName: azure.GeneratePublicLBName(r.scope.Cluster.Name), + IsBastion: true, + } + + if err := r.networkInterfacesSvc.Reconcile(r.scope.Context, bastionNicSpec); err != nil { + return errors.Wrapf(err, "failed to createofupdate bastion network interface for cluster %s", r.scope.Cluster.Name) + } + + bastionPublicKey, err := base64.StdEncoding.DecodeString(r.scope.ClusterConfig.SSHPublicKey) + if err != nil { + return errors.Wrap(err, "failed to decode ssh public key for bastion host") + } + + bastionSpec := &virtualmachines.Spec{ + Name: fmt.Sprintf("%s-bastion", r.scope.Cluster.Name), + NICName: azure.GenerateBastionNicName(r.scope.Cluster.Name), + SSHKeyData: string(bastionPublicKey), + Size: "Standard_B1ls", + Image: v1alpha1.Image{ + Publisher: "Canonical", + Offer: "UbuntuServer", + SKU: "18.04-LTS", + Version: "latest", + }, + OSDisk: v1alpha1.OSDisk{ + OSType: "Linux", + DiskSizeGB: 30, + }, + } + + if err := r.virtualMachineSvc.Reconcile(r.scope.Context, bastionSpec); err != nil { + return errors.Wrapf(err, "failed to createorupdate bastion instance for cluster %s", r.scope.Cluster.Name) + } + + return nil +} + // Delete reconciles all the services in pre determined order func (r *Reconciler) Delete() error { if err := r.deleteLB(); err != nil { @@ -188,6 +263,10 @@ func (r *Reconciler) Delete() error { } } + if err := r.deleteBastion(); err != nil { + return errors.Wrap(err, "failed to delete bastion") + } + if err := r.groupsSvc.Delete(r.scope.Context, nil); err != nil { if !azure.ResourceNotFound(err) { return errors.Wrapf(err, "failed to delete resource group for cluster %s", r.scope.Cluster.Name) @@ -238,6 +317,16 @@ func (r *Reconciler) deleteSubnets() error { } } + subnetSpec = &subnets.Spec{ + Name: azure.GenerateBastionSubnetName(r.scope.Cluster.Name), + VnetName: azure.GenerateVnetName(r.scope.Cluster.Name), + } + if err := r.subnetsSvc.Delete(r.scope.Context, subnetSpec); err != nil { + if !azure.ResourceNotFound(err) { + return errors.Wrapf(err, "failed to delete %s subnet for cluster %s", azure.GenerateBastionSubnetName(r.scope.Cluster.Name), r.scope.Cluster.Name) + } + } + subnetSpec = &subnets.Spec{ Name: azure.GenerateControlPlaneSubnetName(r.scope.Cluster.Name), VnetName: azure.GenerateVnetName(r.scope.Cluster.Name), @@ -269,5 +358,35 @@ func (r *Reconciler) deleteNSG() error { } } + sgSpec = &securitygroups.Spec{ + Name: azure.GenerateBastionSecurityGroupName(r.scope.Cluster.Name), + } + if err := r.securityGroupSvc.Delete(r.scope.Context, sgSpec); err != nil { + if !azure.ResourceNotFound(err) { + return errors.Wrapf(err, "failed to delete security group %s for cluster %s", azure.GenerateBastionSecurityGroupName(r.scope.Cluster.Name), r.scope.Cluster.Name) + } + } + return nil +} + +func (s *Reconciler) deleteBastion() error { + vmSpec := &virtualmachines.Spec{ + Name: azure.GenerateBastionVMName(s.scope.Cluster.Name), + } + + err := s.virtualMachineSvc.Delete(s.scope.Context, vmSpec) + if err != nil { + return errors.Wrapf(err, "failed to delete machine") + } + + networkInterfaceSpec := &networkinterfaces.Spec{ + Name: azure.GenerateBastionNicName(s.scope.Cluster.Name), + VnetName: azure.GenerateVnetName(s.scope.Cluster.Name), + } + + err = s.networkInterfacesSvc.Delete(s.scope.Context, networkInterfaceSpec) + if err != nil { + return errors.Wrapf(err, "Unable to delete network interface") + } return nil } diff --git a/pkg/cloud/azure/defaults.go b/pkg/cloud/azure/defaults.go index 621d266f317..b991b58105e 100644 --- a/pkg/cloud/azure/defaults.go +++ b/pkg/cloud/azure/defaults.go @@ -27,6 +27,8 @@ const ( DefaultControlPlaneSubnetCIDR = "10.0.0.0/16" // DefaultNodeSubnetCIDR is the default Node Subnet CIDR DefaultNodeSubnetCIDR = "10.1.0.0/16" + // DefaultBastionSubnetCIDR is the default Bastion Subnet CIDR + DefaultBastionSubnetCIDR = "10.2.0.0/16" // DefaultInternalLBIPAddress is the default internal load balancer ip address DefaultInternalLBIPAddress = "10.0.0.100" // DefaultAzureDNSZone is the default provided azure dns zone @@ -50,6 +52,11 @@ func GenerateNodeSecurityGroupName(clusterName string) string { return fmt.Sprintf("%s-%s", clusterName, "node-nsg") } +// GenerateNodeSecurityGroupName generates a node security group name, based on the cluster name. +func GenerateBastionSecurityGroupName(clusterName string) string { + return fmt.Sprintf("%s-%s", clusterName, "bastion-nsg") +} + // GenerateNodeRouteTableName generates a node route table name, based on the cluster name. func GenerateNodeRouteTableName(clusterName string) string { return fmt.Sprintf("%s-%s", clusterName, "node-routetable") @@ -65,6 +72,16 @@ func GenerateNodeSubnetName(clusterName string) string { return fmt.Sprintf("%s-%s", clusterName, "node-subnet") } +// GenerateBastionSubnetName generates a node subnet name, based on the cluster name. +func GenerateBastionSubnetName(clusterName string) string { + return fmt.Sprintf("%s-%s", clusterName, "bastion-subnet") +} + +// GenerateBastionNicName generates a bastion network interface name, based on the cluster name. +func GenerateBastionNicName(clusterName string) string { + return fmt.Sprintf("%s-%s", clusterName, "bastion-nic") +} + // GenerateInternalLBName generates a internal load balancer name, based on the cluster name. func GenerateInternalLBName(clusterName string) string { return fmt.Sprintf("%s-%s", clusterName, "internal-lb") @@ -89,3 +106,8 @@ func GenerateFQDN(publicIPName, location string) string { func GenerateOSDiskName(clusterName string) string { return fmt.Sprintf("%s_OSDisk", clusterName) } + +// GenerateBastionVMName generates a name of the vm for bastion host +func GenerateBastionVMName(clusterName string) string { + return fmt.Sprintf("%s-bastion", clusterName) +} diff --git a/pkg/cloud/azure/services/certificates/BUILD.bazel b/pkg/cloud/azure/services/certificates/BUILD.bazel index 29e315029ba..958c8be9ae9 100644 --- a/pkg/cloud/azure/services/certificates/BUILD.bazel +++ b/pkg/cloud/azure/services/certificates/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "//pkg/cloud/azure:go_default_library", "//pkg/cloud/azure/actuators:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/golang.org/x/crypto/ssh:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", diff --git a/pkg/cloud/azure/services/certificates/certificates.go b/pkg/cloud/azure/services/certificates/certificates.go index 13c43c8d41f..8033130a4de 100644 --- a/pkg/cloud/azure/services/certificates/certificates.go +++ b/pkg/cloud/azure/services/certificates/certificates.go @@ -18,12 +18,18 @@ package certificates import ( "context" + "crypto/rand" + "crypto/rsa" "crypto/x509" + "encoding/base64" + "encoding/pem" "fmt" "io/ioutil" "os" "time" + "golang.org/x/crypto/ssh" + "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -84,6 +90,10 @@ func (s *Service) Reconcile(ctx context.Context, spec v1alpha1.ResourceSpec) err return errors.Wrapf(err, "Failed to generate sa certs: %q", err) } + if err := CreateBastionSSHKeys(s.scope.ClusterConfig); err != nil { + return errors.Wrap(err, "Failed to generate ssh keys for bastion host") + } + kubeConfigDir := tmpDirName + "/kubeconfigs" if err := CreateKubeconfigs(cfg, kubeConfigDir); err != nil { return errors.Wrapf(err, "Failed to generate kubeconfigs: %q", err) @@ -299,3 +309,27 @@ func updateClusterConfigKubeConfig(clusterConfig *v1alpha1.AzureClusterProviderS } return nil } + +func CreateBastionSSHKeys(clusterConfig *v1alpha1.AzureClusterProviderSpec) error { + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return err + } + publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return err + } + + if len(clusterConfig.SSHPublicKey) <= 0 { + clusterConfig.SSHPublicKey = base64.StdEncoding.EncodeToString(ssh.MarshalAuthorizedKey(publicKey)) + } + if len(clusterConfig.SSHPrivateKey) <= 0 { + clusterConfig.SSHPrivateKey = base64.StdEncoding.EncodeToString(pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + })) + } + + return nil +} diff --git a/pkg/cloud/azure/services/certificates/certificates_test.go b/pkg/cloud/azure/services/certificates/certificates_test.go index 02404e8c658..786a207fc77 100644 --- a/pkg/cloud/azure/services/certificates/certificates_test.go +++ b/pkg/cloud/azure/services/certificates/certificates_test.go @@ -18,6 +18,7 @@ package certificates import ( "context" + "encoding/base64" "reflect" "testing" "time" @@ -80,3 +81,39 @@ func TestReconcileCertificates(t *testing.T) { t.Errorf("Expected ca key pair not be regenerated") } } + +func TestCreateBastionSSHKeys(t *testing.T) { + type args struct { + clusterConfig *v1alpha1.AzureClusterProviderSpec + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "create ssh key-pairs", + args: args{ + clusterConfig: &v1alpha1.AzureClusterProviderSpec{}, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := CreateBastionSSHKeys(tt.args.clusterConfig); (err != nil) != tt.wantErr { + t.Errorf("CreateBastionSSHKeys() error = %v, wantErr %v", err, tt.wantErr) + } else { + b, _ := base64.StdEncoding.DecodeString(tt.args.clusterConfig.SSHPublicKey) + if len(b) <= 0 { + t.Errorf("ssh public key can't be empty") + } + + b, _ = base64.StdEncoding.DecodeString(tt.args.clusterConfig.SSHPrivateKey) + if len(b) <= 0 { + t.Errorf("ssh private key can't be empty") + } + } + }) + } +} diff --git a/pkg/cloud/azure/services/networkinterfaces/networkinterfaces.go b/pkg/cloud/azure/services/networkinterfaces/networkinterfaces.go index 9aa3e875d54..97f8065dc06 100644 --- a/pkg/cloud/azure/services/networkinterfaces/networkinterfaces.go +++ b/pkg/cloud/azure/services/networkinterfaces/networkinterfaces.go @@ -38,7 +38,7 @@ type Spec struct { StaticIPAddress string PublicLoadBalancerName string InternalLoadBalancerName string - NatRule int + IsBastion bool } // Get provides information about a network interface. @@ -94,14 +94,17 @@ func (s *Service) Reconcile(ctx context.Context, spec v1alpha1.ResourceSpec) err return errors.New("public load balancer get returned invalid network interface") } - backendAddressPools = append(backendAddressPools, - network.BackendAddressPool{ - ID: (*lb.BackendAddressPools)[0].ID, - }) - nicConfig.LoadBalancerInboundNatRules = &[]network.InboundNatRule{ - { - ID: (*lb.InboundNatRules)[nicSpec.NatRule].ID, - }, + if !nicSpec.IsBastion { + backendAddressPools = append(backendAddressPools, + network.BackendAddressPool{ + ID: (*lb.BackendAddressPools)[0].ID, + }) + } else { + nicConfig.LoadBalancerInboundNatRules = &[]network.InboundNatRule{ + { + ID: (*lb.InboundNatRules)[0].ID, + }, + } } } if nicSpec.InternalLoadBalancerName != "" { diff --git a/pkg/cloud/azure/services/publicloadbalancers/publicloadbalancers.go b/pkg/cloud/azure/services/publicloadbalancers/publicloadbalancers.go index 0a9da24b63a..c0a0ce94ba3 100644 --- a/pkg/cloud/azure/services/publicloadbalancers/publicloadbalancers.go +++ b/pkg/cloud/azure/services/publicloadbalancers/publicloadbalancers.go @@ -144,32 +144,6 @@ func (s *Service) Reconcile(ctx context.Context, spec v1alpha1.ResourceSpec) err }, }, }, - { - Name: to.StringPtr("natRule2"), - InboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{ - Protocol: network.TransportProtocolTCP, - FrontendPort: to.Int32Ptr(2201), - BackendPort: to.Int32Ptr(22), - EnableFloatingIP: to.BoolPtr(false), - IdleTimeoutInMinutes: to.Int32Ptr(4), - FrontendIPConfiguration: &network.SubResource{ - ID: to.StringPtr(fmt.Sprintf("/%s/%s/frontendIPConfigurations/%s", idPrefix, lbName, frontEndIPConfigName)), - }, - }, - }, - { - Name: to.StringPtr("natRule3"), - InboundNatRulePropertiesFormat: &network.InboundNatRulePropertiesFormat{ - Protocol: network.TransportProtocolTCP, - FrontendPort: to.Int32Ptr(2202), - BackendPort: to.Int32Ptr(22), - EnableFloatingIP: to.BoolPtr(false), - IdleTimeoutInMinutes: to.Int32Ptr(4), - FrontendIPConfiguration: &network.SubResource{ - ID: to.StringPtr(fmt.Sprintf("/%s/%s/frontendIPConfigurations/%s", idPrefix, lbName, frontEndIPConfigName)), - }, - }, - }, }, }, }) diff --git a/pkg/cloud/azure/services/securitygroups/securitygroups.go b/pkg/cloud/azure/services/securitygroups/securitygroups.go index ebb1a403de5..e8ca579a197 100644 --- a/pkg/cloud/azure/services/securitygroups/securitygroups.go +++ b/pkg/cloud/azure/services/securitygroups/securitygroups.go @@ -29,8 +29,8 @@ import ( // Spec specification for network security groups type Spec struct { - Name string - IsControlPlane bool + Name string + Role string } // Get provides information about a network security group. @@ -57,33 +57,37 @@ func (s *Service) Reconcile(ctx context.Context, spec v1alpha1.ResourceSpec) err securityRules := &[]network.SecurityRule{} - if nsgSpec.IsControlPlane { + if nsgSpec.Role == v1alpha1.ControlPlane { klog.V(2).Infof("using additional rules for control plane %s", nsgSpec.Name) securityRules = &[]network.SecurityRule{ { - Name: to.StringPtr("allow_ssh"), + Name: to.StringPtr("allow_6443"), SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolTCP, SourceAddressPrefix: to.StringPtr("*"), SourcePortRange: to.StringPtr("*"), DestinationAddressPrefix: to.StringPtr("*"), - DestinationPortRange: to.StringPtr("22"), + DestinationPortRange: to.StringPtr("6443"), Access: network.SecurityRuleAccessAllow, Direction: network.SecurityRuleDirectionInbound, Priority: to.Int32Ptr(100), }, }, + } + } else if nsgSpec.Role == v1alpha1.Bastion { + klog.V(2).Infof("using additional rules for bastion %s", nsgSpec.Name) + securityRules = &[]network.SecurityRule{ { - Name: to.StringPtr("allow_6443"), + Name: to.StringPtr("allow_ssh"), SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ Protocol: network.SecurityRuleProtocolTCP, SourceAddressPrefix: to.StringPtr("*"), SourcePortRange: to.StringPtr("*"), DestinationAddressPrefix: to.StringPtr("*"), - DestinationPortRange: to.StringPtr("6443"), + DestinationPortRange: to.StringPtr("22"), Access: network.SecurityRuleAccessAllow, Direction: network.SecurityRuleDirectionInbound, - Priority: to.Int32Ptr(101), + Priority: to.Int32Ptr(100), }, }, }