diff --git a/Makefile b/Makefile index 9300d0827..9db2ed276 100644 --- a/Makefile +++ b/Makefile @@ -270,7 +270,7 @@ release: clean-release ## Builds and push container images using the latest git $(MAKE) release-templates .PHONY: release-manifests -release-manifests: manifests $(KUSTOMIZE) $(RELEASE_DIR) ## Builds the manifests to publish with a release +release-manifests: manifests kustomize $(RELEASE_DIR) ## Builds the manifests to publish with a release $(KUSTOMIZE) build config/default > $(RELEASE_DIR)/infrastructure-components.yaml .PHONY: release-templates @@ -416,7 +416,7 @@ tilt-up: envsubst yq kustomize cluster ## Start a mgt-cluster & Tilt. Installs EXP_CLUSTER_RESOURCE_SET=true tilt up .PHONY: create-workload-cluster -create-workload-cluster: $(KUSTOMIZE) $(ENVSUBST) ## Creates a workload-cluster. ENV Variables need to be exported or defined in the tilt-settings.json +create-workload-cluster-with-network: kustomize envsubst ## Creates a workload-cluster. ENV Variables need to be exported or defined in the tilt-settings.json # Create workload Cluster. $(ENVSUBST) -i templates/cluster-template-hcloud-network.yaml | kubectl apply -f - @@ -440,7 +440,33 @@ create-workload-cluster: $(KUSTOMIZE) $(ENVSUBST) ## Creates a workload-cluster. @echo 'run "kubectl --kubeconfig=$(CAPH_WORKER_CLUSTER_KUBECONFIG) ..." to work with the new target cluster' -create-talos-workload-cluster: $(KUSTOMIZE) $(ENVSUBST) ## Creates a workload-cluster. ENV Variables need to be exported or defined in the tilt-settings.json +create-workload-cluster: kustomize envsubst ## Creates a workload-cluster. ENV Variables need to be exported or defined in the tilt-settings.json + # Create workload Cluster. + $(ENVSUBST) -i templates/cluster-template.yaml | kubectl apply -f - + + # Wait for the kubeconfig to become available. + ${TIMEOUT} 5m bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done" + # Get kubeconfig and store it locally. + kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > $(CAPH_WORKER_CLUSTER_KUBECONFIG) + ${TIMEOUT} 15m bash -c "while ! kubectl --kubeconfig=$(CAPH_WORKER_CLUSTER_KUBECONFIG) get nodes | grep master; do sleep 1; done" + + # Deploy cilium + helm repo add cilium https://helm.cilium.io/ + KUBECONFIG=$(CAPH_WORKER_CLUSTER_KUBECONFIG) helm upgrade --install cilium cilium/cilium --version 1.10.5 \ + --namespace kube-system \ + -f templates/cilium/cilium.yaml + + # Deploy HCloud Cloud Controller Manager + helm repo add syself https://charts.syself.com + KUBECONFIG=$(CAPH_WORKER_CLUSTER_KUBECONFIG) helm upgrade --install ccm syself/ccm-hcloud --version 1.0.0 \ + --namespace kube-system \ + --set secret.name=hetzner-token \ + --set privateNetwork.enabled=false + + @echo 'run "kubectl --kubeconfig=$(CAPH_WORKER_CLUSTER_KUBECONFIG) ..." to work with the new target cluster' + + +create-talos-workload-cluster: kustomize envsubst ## Creates a workload-cluster. ENV Variables need to be exported or defined in the tilt-settings.json # Create workload Cluster. $(ENVSUBST) -i templates/cluster-template-talos.yaml | kubectl apply -f - diff --git a/Tiltfile b/Tiltfile index ea86ea92e..04f501f0b 100644 --- a/Tiltfile +++ b/Tiltfile @@ -302,6 +302,22 @@ caph() waitforsystem() +cmd_button( + "Create Talos Cluster", + argv = ["make", "create-talos-workload-cluster"], + location = location.NAV, + icon_name = "circle_triangle_up_fill", + text = "Create Talos Cluster", +) + +cmd_button( + "Create Cluster With Private Network", + argv = ["make", "create-workload-cluster-with-network"], + location = location.NAV, + icon_name = "circle_chevron_up_fill", + text = "Create Cluster With Private Network", +) + cmd_button( "Create Workload Cluster", argv = ["make", "create-workload-cluster"], @@ -311,9 +327,9 @@ cmd_button( ) cmd_button( - "Delete Workload Cluster", + "Delete Cluster", argv = ["make", "delete-workload-cluster"], location = location.NAV, icon_name = "cloud_download", - text = "Delete Workload Cluster", + text = "Delete Cluster", ) diff --git a/api/v1beta1/hetznercluster_types.go b/api/v1beta1/hetznercluster_types.go index f5261a343..36bf11809 100644 --- a/api/v1beta1/hetznercluster_types.go +++ b/api/v1beta1/hetznercluster_types.go @@ -100,7 +100,7 @@ type HetznerClusterStatus struct { // +optional Network *NetworkStatus `json:"networkStatus,omitempty"` - ControlPlaneLoadBalancer LoadBalancerStatus `json:"controlPlaneLoadBalancer,omitempty"` + ControlPlaneLoadBalancer *LoadBalancerStatus `json:"controlPlaneLoadBalancer,omitempty"` // +optional HCloudPlacementGroup []HCloudPlacementGroupStatus `json:"hcloudPlacementGroup,omitempty"` FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index ec12757df..a71e3bdd9 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -413,7 +413,11 @@ func (in *HetznerClusterStatus) DeepCopyInto(out *HetznerClusterStatus) { *out = new(NetworkStatus) (*in).DeepCopyInto(*out) } - in.ControlPlaneLoadBalancer.DeepCopyInto(&out.ControlPlaneLoadBalancer) + if in.ControlPlaneLoadBalancer != nil { + in, out := &in.ControlPlaneLoadBalancer, &out.ControlPlaneLoadBalancer + *out = new(LoadBalancerStatus) + (*in).DeepCopyInto(*out) + } if in.HCloudPlacementGroup != nil { in, out := &in.HCloudPlacementGroup, &out.HCloudPlacementGroup *out = make([]HCloudPlacementGroupStatus, len(*in)) diff --git a/pkg/services/hcloud/loadbalancer/loadbalancer.go b/pkg/services/hcloud/loadbalancer/loadbalancer.go index dc860b1da..fb9c7b728 100644 --- a/pkg/services/hcloud/loadbalancer/loadbalancer.go +++ b/pkg/services/hcloud/loadbalancer/loadbalancer.go @@ -53,7 +53,8 @@ func (s *Service) Reconcile(ctx context.Context) (err error) { } // update current status - s.scope.HetznerCluster.Status.ControlPlaneLoadBalancer = s.apiToStatus(lb) + lbStatus := s.apiToStatus(lb) + s.scope.HetznerCluster.Status.ControlPlaneLoadBalancer = &lbStatus return nil } @@ -135,6 +136,12 @@ func (s *Service) createLoadBalancer(ctx context.Context) (*hcloud.LoadBalancer, clusterTagKey := infrav1.ClusterTagKey(s.scope.HetznerCluster.Name) + var network *hcloud.Network + if s.scope.HetznerCluster.Status.Network != nil { + network = &hcloud.Network{ + ID: s.scope.HetznerCluster.Status.Network.ID, + } + } opts := hcloud.LoadBalancerCreateOpts{ LoadBalancerType: loadBalancerType, Name: name, @@ -144,9 +151,7 @@ func (s *Service) createLoadBalancer(ctx context.Context) (*hcloud.LoadBalancer, Location: &hcloud.Location{ Name: string(s.scope.HetznerCluster.Spec.ControlPlaneLoadBalancer.Region), }, - Network: &hcloud.Network{ - ID: s.scope.HetznerCluster.Status.Network.ID, - }, + Network: network, Labels: map[string]string{ clusterTagKey: string(infrav1.ResourceLifecycleOwned), }, @@ -192,13 +197,18 @@ func (s *Service) createLoadBalancer(ctx context.Context) (*hcloud.LoadBalancer, // Delete implements the deletion of HCloud load balancers. func (s *Service) Delete(ctx context.Context) (err error) { + if s.scope.HetznerCluster.Status.ControlPlaneLoadBalancer == nil { + // nothing to do + return nil + } + if _, err := s.scope.HCloudClient().DeleteLoadBalancer(ctx, s.scope.HetznerCluster.Status.ControlPlaneLoadBalancer.ID); err != nil { record.Eventf(s.scope.HetznerCluster, "FailedLoadBalancerDelete", "Failed to delete load balancer: %s", err) return errors.Wrap(err, "failed to delete load balancer") } // Delete lb information from cluster status - s.scope.HetznerCluster.Status = infrav1.HetznerClusterStatus{} + s.scope.HetznerCluster.Status.ControlPlaneLoadBalancer = nil record.Eventf(s.scope.HetznerCluster, "DeleteLoadBalancer", "Deleted load balancer") return nil diff --git a/pkg/services/hcloud/network/network.go b/pkg/services/hcloud/network/network.go index 67c6a9029..791d87f15 100644 --- a/pkg/services/hcloud/network/network.go +++ b/pkg/services/hcloud/network/network.go @@ -28,11 +28,12 @@ func NewService(scope *scope.ClusterScope) *Service { // Reconcile implements life cycle of networks. func (s *Service) Reconcile(ctx context.Context) (err error) { - s.scope.Info("Reconciling network", "spec", s.scope.HetznerCluster.Spec.HCloudNetworkSpec) if !s.scope.HetznerCluster.Spec.HCloudNetworkSpec.NetworkEnabled { return nil } + s.scope.Info("Reconciling network", "spec", s.scope.HetznerCluster.Spec.HCloudNetworkSpec) + network, err := s.findNetwork(ctx) if err != nil { return errors.Wrap(err, "failed to find network") @@ -96,6 +97,10 @@ func (s *Service) createNetwork(ctx context.Context, spec *infrav1.HCloudNetwork // Delete implements deletion of networks. func (s *Service) Delete(ctx context.Context) error { + if s.scope.HetznerCluster.Status.Network == nil { + // Nothing to delete + return nil + } _, err := s.scope.HCloudClient().DeleteNetwork(ctx, &hcloud.Network{ID: s.scope.HetznerCluster.Status.Network.ID}) if err != nil { // If resource has been deleted already then do nothing