diff --git a/cmd/clusterctl/client/config/providers_client.go b/cmd/clusterctl/client/config/providers_client.go
index 433cb08302ac..31d886d5de0d 100644
--- a/cmd/clusterctl/client/config/providers_client.go
+++ b/cmd/clusterctl/client/config/providers_client.go
@@ -35,25 +35,26 @@ const (
// Infra providers.
const (
- AWSProviderName = "aws"
- AzureProviderName = "azure"
- BYOHProviderName = "byoh"
- DockerProviderName = "docker"
- DOProviderName = "digitalocean"
- GCPProviderName = "gcp"
- HetznerProviderName = "hetzner"
- IBMCloudProviderName = "ibmcloud"
- Metal3ProviderName = "metal3"
- NestedProviderName = "nested"
- NutanixProviderName = "nutanix"
- OCIProviderName = "oci"
- OpenStackProviderName = "openstack"
- PacketProviderName = "packet"
- SideroProviderName = "sidero"
- VSphereProviderName = "vsphere"
- MAASProviderName = "maas"
- KubevirtProviderName = "kubevirt"
- VclusterProviderName = "vcluster"
+ AWSProviderName = "aws"
+ AzureProviderName = "azure"
+ BYOHProviderName = "byoh"
+ CloudStackProviderName = "cloudstack"
+ DockerProviderName = "docker"
+ DOProviderName = "digitalocean"
+ GCPProviderName = "gcp"
+ HetznerProviderName = "hetzner"
+ IBMCloudProviderName = "ibmcloud"
+ Metal3ProviderName = "metal3"
+ NestedProviderName = "nested"
+ NutanixProviderName = "nutanix"
+ OCIProviderName = "oci"
+ OpenStackProviderName = "openstack"
+ PacketProviderName = "packet"
+ SideroProviderName = "sidero"
+ VSphereProviderName = "vsphere"
+ MAASProviderName = "maas"
+ KubevirtProviderName = "kubevirt"
+ VclusterProviderName = "vcluster"
)
// Bootstrap providers.
@@ -132,6 +133,11 @@ func (p *providersClient) defaults() []Provider {
url: "https://github.com/kubernetes-sigs/cluster-api/releases/latest/infrastructure-components-development.yaml",
providerType: clusterctlv1.InfrastructureProviderType,
},
+ &provider{
+ name: CloudStackProviderName,
+ url: "https://github.com/kubernetes-sigs/cluster-api-provider-cloudstack/releases/latest/infrastructure-components.yaml",
+ providerType: clusterctlv1.InfrastructureProviderType,
+ },
&provider{
name: DOProviderName,
url: "https://github.com/kubernetes-sigs/cluster-api-provider-digitalocean/releases/latest/infrastructure-components.yaml",
diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go
index d877975da58a..251bcb8d653c 100644
--- a/cmd/clusterctl/client/config_test.go
+++ b/cmd/clusterctl/client/config_test.go
@@ -64,6 +64,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) {
config.AWSProviderName,
config.AzureProviderName,
config.BYOHProviderName,
+ config.CloudStackProviderName,
config.DOProviderName,
config.DockerProviderName,
config.GCPProviderName,
@@ -100,6 +101,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) {
config.AWSProviderName,
config.AzureProviderName,
config.BYOHProviderName,
+ config.CloudStackProviderName,
config.DOProviderName,
config.DockerProviderName,
config.GCPProviderName,
diff --git a/cmd/clusterctl/cmd/config_repositories_test.go b/cmd/clusterctl/cmd/config_repositories_test.go
index 088d44f4eb0c..2f3153750bed 100644
--- a/cmd/clusterctl/cmd/config_repositories_test.go
+++ b/cmd/clusterctl/cmd/config_repositories_test.go
@@ -110,6 +110,7 @@ talos ControlPlaneProvider https://github.com/siderolabs/clust
aws InfrastructureProvider my-aws-infrastructure-components.yaml
azure InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/latest/ infrastructure-components.yaml
byoh InfrastructureProvider https://github.com/vmware-tanzu/cluster-api-provider-bringyourownhost/releases/latest/ infrastructure-components.yaml
+cloudstack InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-cloudstack/releases/latest/ infrastructure-components.yaml
digitalocean InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-digitalocean/releases/latest/ infrastructure-components.yaml
docker InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ infrastructure-components-development.yaml
gcp InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-gcp/releases/latest/ infrastructure-components.yaml
@@ -169,6 +170,10 @@ var expectedOutputYaml = `- File: core_components.yaml
Name: byoh
ProviderType: InfrastructureProvider
URL: https://github.com/vmware-tanzu/cluster-api-provider-bringyourownhost/releases/latest/
+- File: infrastructure-components.yaml
+ Name: cloudstack
+ ProviderType: InfrastructureProvider
+ URL: https://github.com/kubernetes-sigs/cluster-api-provider-cloudstack/releases/latest/
- File: infrastructure-components.yaml
Name: digitalocean
ProviderType: InfrastructureProvider
diff --git a/docs/book/src/clusterctl/provider-contract.md b/docs/book/src/clusterctl/provider-contract.md
index 165f4c3af2ca..33b5e2813aaa 100644
--- a/docs/book/src/clusterctl/provider-contract.md
+++ b/docs/book/src/clusterctl/provider-contract.md
@@ -138,7 +138,7 @@ Each provider is expected to deploy controllers using a Deployment.
While defining the Deployment Spec, the container that executes the controller binary MUST be called `manager`.
The manager MUST support a `--namespace` flag for specifying the namespace where the controller
-will look for objects to reconcile; however, clusterctl will always install providers watching for all namespaces
+will look for objects to reconcile; however, clusterctl will always install providers watching for all namespaces
(`--namespace=""`); for more details see [support for multiple instances](../developer/architecture/controllers/support-multiple-instances.md)
for more context.
@@ -190,28 +190,29 @@ providers.
|CACPN | cluster.x-k8s.io/provider=control-plane-nested |
|CAPA | cluster.x-k8s.io/provider=infrastructure-aws |
|CAPB | cluster.x-k8s.io/provider=infrastructure-byoh |
+|CAPC | cluster.x-k8s.io/provider=infrastructure-cloudstack |
+|CAPD | cluster.x-k8s.io/provider=infrastructure-docker |
+|CAPDO | cluster.x-k8s.io/provider=infrastructure-digitalocean |
+|CAPG | cluster.x-k8s.io/provider=infrastructure-gcp |
|CAPH | cluster.x-k8s.io/provider=infrastructure-hetzner |
|CAPIBM | cluster.x-k8s.io/provider=infrastructure-ibmcloud |
-|CAPV | cluster.x-k8s.io/provider=infrastructure-vsphere |
-|CAPD | cluster.x-k8s.io/provider=infrastructure-docker |
+|CAPK | cluster.x-k8s.io/provider=infrastructure-kubevirt |
|CAPM3 | cluster.x-k8s.io/provider=infrastructure-metal3 |
|CAPN | cluster.x-k8s.io/provider=infrastructure-nested |
-|CAPP | cluster.x-k8s.io/provider=infrastructure-packet |
-|CAPZ | cluster.x-k8s.io/provider=infrastructure-azure |
-|CAPOCI | cluster.x-k8s.io/provider=infrastructure-oci |
|CAPO | cluster.x-k8s.io/provider=infrastructure-openstack |
-|CAPDO | cluster.x-k8s.io/provider=infrastructure-digitalocean |
-|CAPG | cluster.x-k8s.io/provider=infrastructure-gcp |
-|CAPX | cluster.x-k8s.io/provider=infrastructure-nutanix |
-|CAPK | cluster.x-k8s.io/provider=infrastructure-kubevirt |
+|CAPOCI | cluster.x-k8s.io/provider=infrastructure-oci |
+|CAPP | cluster.x-k8s.io/provider=infrastructure-packet |
+|CAPV | cluster.x-k8s.io/provider=infrastructure-vsphere |
|CAPVC | cluster.x-k8s.io/provider=infrastructure-vcluster |
+|CAPX | cluster.x-k8s.io/provider=infrastructure-nutanix |
+|CAPZ | cluster.x-k8s.io/provider=infrastructure-azure |
### Workload cluster templates
An infrastructure provider could publish a **cluster templates** file to be used by `clusterctl generate cluster`.
This is single YAML with _all_ the objects required to create a new workload cluster.
-With ClusterClass enabled it is possible to have cluster templates with managed topologies. Cluster templates with managed
+With ClusterClass enabled it is possible to have cluster templates with managed topologies. Cluster templates with managed
topologies require only the cluster object in the template and a corresponding ClusterClass definition.
The following rules apply:
@@ -269,8 +270,8 @@ The following rules apply:
ClusterClass definitions MUST be stored in the same location as the component YAML and follow this naming convention:
1. The ClusterClass definition should be named `clusterclass-{ClusterClass-name}.yaml`, e.g `clusterclass-prod.yaml`.
-`{ClusterClass-name}` is the name of the ClusterClass that is referenced from the Cluster.spec.topology.class field
-in the Cluster template; Cluster template files using a ClusterClass are usually simpler because they are no longer
+`{ClusterClass-name}` is the name of the ClusterClass that is referenced from the Cluster.spec.topology.class field
+in the Cluster template; Cluster template files using a ClusterClass are usually simpler because they are no longer
required to have all the templates.
Each provider should create user facing documentation with the list of available ClusterClass definitions.
@@ -283,18 +284,18 @@ The references in the ClusterClass definition should NOT specify a namespace.
It is recommended that none of the objects in the ClusterClass YAML should specify a namespace.
-Even if technically possible, it is strongly recommended that none of the objects in the ClusterClass definitions are shared across multiple definitions;
+Even if technically possible, it is strongly recommended that none of the objects in the ClusterClass definitions are shared across multiple definitions;
this helps in preventing changing an object inadvertently impacting many ClusterClasses, and consequently, all the Clusters using those ClusterClasses.
#### Variables
Currently the ClusterClass definitions SHOULD NOT have any environment variables in them.
-ClusterClass definitions files should not use variable substitution, given that ClusterClass and managed topologies provide an alternative model for variable definition.
+ClusterClass definitions files should not use variable substitution, given that ClusterClass and managed topologies provide an alternative model for variable definition.
#### Note
-A ClusterClass definition is automatically included in the output of `clusterctl generate cluster` if the cluster template uses a managed topology
+A ClusterClass definition is automatically included in the output of `clusterctl generate cluster` if the cluster template uses a managed topology
and a ClusterClass with the same name does not already exists in the Cluster.
## OwnerReferences chain
@@ -353,7 +354,7 @@ functioning of `clusterctl` when using non-compliant component YAML or cluster t
Provider authors should be aware that `clusterctl move` command implements a discovery mechanism that considers:
-* All the Kind defined in one of the CRDs installed by clusterctl using `clusterctl init` (identified via the `clusterctl.cluster.x-k8s.io label`);
+* All the Kind defined in one of the CRDs installed by clusterctl using `clusterctl init` (identified via the `clusterctl.cluster.x-k8s.io label`);
For each CRD, discovery collects:
* All the objects from the namespace being moved only if the CRD scope is `Namespaced`.
* All the objects if the CRD scope is `Cluster`.
@@ -365,22 +366,22 @@ that are compliant with one of the following rules:
* The object is directly or indirectly linked to a `Cluster` object (linked through the `OwnerReference` chain).
* The object is a secret containing a user provided certificate (linked to a `Cluster` object via a naming convention).
* The object is directly or indirectly linked to a `ClusterResourceSet` object (through the `OwnerReference` chain).
- * The object is directly or indirectly linked to another object with the `clusterctl.cluster.x-k8s.io/move-hierarchy`
+ * The object is directly or indirectly linked to another object with the `clusterctl.cluster.x-k8s.io/move-hierarchy`
label, e.g. the infrastructure Provider ClusterIdentity objects (linked through the `OwnerReference` chain).
* The object hase the `clusterctl.cluster.x-k8s.io/move` label or the `clusterctl.cluster.x-k8s.io/move-hierarchy` label,
- e.g. the CPI config secret.
-
-Note. `clusterctl.cluster.x-k8s.io/move` and `clusterctl.cluster.x-k8s.io/move-hierarchy` labels could be applied
+ e.g. the CPI config secret.
+
+Note. `clusterctl.cluster.x-k8s.io/move` and `clusterctl.cluster.x-k8s.io/move-hierarchy` labels could be applied
to single objects or at the CRD level (the label applies to all the objects).
-
+
Please note that during move:
* Namespaced objects, if not existing in the target cluster, are created.
- * Namespaced objects, if already existing in the target cluster, are updated.
+ * Namespaced objects, if already existing in the target cluster, are updated.
* Namespaced objects are removed from the source cluster.
* Global objects, if not existing in the target cluster, are created.
* Global objects, if already existing in the target cluster, are not updated.
* Global objects are not removed from the source cluster.
- * Namespaced objects which are part of an owner chain that starts with a global object (e.g. a secret containing
+ * Namespaced objects which are part of an owner chain that starts with a global object (e.g. a secret containing
credentials for an infrastructure Provider ClusterIdentity) are treated as Global objects.
-The Docker provider requires the `ClusterTopology` feature to deploy ClusterClass-based clusters. We are
-only supporting ClusterClass-based cluster-templates in this quickstart as ClusterClass makes it possible to
-adapt configuration based on Kubernetes version. This is required to install Kubernetes clusters < v1.24 and
+The Docker provider requires the `ClusterTopology` feature to deploy ClusterClass-based clusters. We are
+only supporting ClusterClass-based cluster-templates in this quickstart as ClusterClass makes it possible to
+adapt configuration based on Kubernetes version. This is required to install Kubernetes clusters < v1.24 and
for the upgrade from v1.23 to v1.24 as we have to use different cgroupDrivers depending on Kubernetes version.
```
@@ -523,7 +547,7 @@ before configuring a cluster with Cluster API. Instructions are provided for com
Otherwise, you can look at the `clusterctl generate cluster` [command][clusterctl generate cluster] documentation for details about how to
discover the list of variables required by a cluster templates.
-{{#tabs name:"tab-configuration-infrastructure" tabs:"AWS,Azure,DigitalOcean,Docker,Equinix Metal,GCP,IBM Cloud,Metal3,Nutanix,Kubevirt,OpenStack,vSphere"}}
+{{#tabs name:"tab-configuration-infrastructure" tabs:"AWS,Azure,CloudStack,DigitalOcean,Docker,Equinix Metal,GCP,IBM Cloud,Metal3,Nutanix,Kubevirt,OpenStack,vSphere"}}
{{#tab AWS}}
```bash
@@ -559,6 +583,41 @@ export AZURE_NODE_MACHINE_TYPE="Standard_D2s_v3"
export AZURE_RESOURCE_GROUP=""
```
+{{#/tab }}
+{{#tab CloudStack}}
+
+A ClusterAPI compatible image must be available in your Cloudstack installation. For instructions on how to build a compatible image
+see [image-builder (Cloudstack)](https://image-builder.sigs.k8s.io/capi/providers/cloudstack.html)
+
+Prebuilt images can be found [here](http://packages.shapeblue.com/cluster-api-provider-cloudstack/images/)
+
+To see all required Cloudstack environment variables execute:
+```bash
+clusterctl generate cluster --infrastructure cloudstack --list-variables capi-quickstart
+```
+
+Apart from the script, the following Cloudstack environment variables are required.
+```bash
+# Set this to the name of the zone in which to deploy the cluster
+export CLOUDSTACK_ZONE_NAME=
+# The name of the network on which the VMs will reside
+export CLOUDSTACK_NETWORK_NAME=
+# The endpoint of the workload cluster
+export CLUSTER_ENDPOINT_IP=
+export CLUSTER_ENDPOINT_PORT=
+# The service offering of the control plane nodes
+export CLOUDSTACK_CONTROL_PLANE_MACHINE_OFFERING=
+# The service offering of the worker nodes
+export CLOUDSTACK_WORKER_MACHINE_OFFERING=
+# The capi compatible template to use
+export CLOUDSTACK_TEMPLATE_NAME=
+# The ssh key to use to log into the nodes
+export CLOUDSTACK_SSH_KEY_NAME=
+
+```
+
+A full configuration reference can be found in [configuration.md](https://github.com/kubernetes-sigs/cluster-api-provider-cloudstack/blob/master/docs/book/src/clustercloudstack/configuration.md).
+
{{#/tab }}
{{#tab DigitalOcean}}
@@ -811,8 +870,8 @@ For more information about prerequisites, credentials management, or permissions
For the purpose of this tutorial, we'll name our cluster capi-quickstart.
-{{#tabs name:"tab-clusterctl-config-cluster" tabs:"Azure|AWS|DigitalOcean|Equinix Metal|GCP|Metal3|Nutanix|Kubevirt|OpenStack|vSphere,Docker"}}
-{{#tab Azure|AWS|DigitalOcean|Equinix Metal|GCP|Metal3|OpenStack|vSphere}}
+{{#tabs name:"tab-clusterctl-config-cluster" tabs:"Azure|AWS|CloudStack|DigitalOcean|Equinix Metal|GCP|Metal3|Nutanix|Kubevirt|OpenStack|vSphere,Docker"}}
+{{#tab Azure|AWS|CloudStack|DigitalOcean|Equinix Metal|GCP|Metal3|OpenStack|vSphere}}
```bash
clusterctl generate cluster capi-quickstart \
@@ -926,8 +985,8 @@ See [Additional Notes for the Docker Provider](../clusterctl/developers.md#addit
Calico is used here as an example.
-{{#tabs name:"tab-deploy-cni" tabs:"AWS|DigitalOcean|Docker|Equinix Metal|GCP|Metal3|Nutanix|Kubevirt|OpenStack|vSphere,Azure"}}
-{{#tab AWS|DigitalOcean|Docker|Equinix Metal|GCP|Metal3|OpenStack|vSphere}}
+{{#tabs name:"tab-deploy-cni" tabs:"AWS|CloudStack|DigitalOcean|Docker|Equinix Metal|GCP|Metal3|Nutanix|Kubevirt|OpenStack|vSphere,Azure"}}
+{{#tab AWS|CloudStack|DigitalOcean|Docker|Equinix Metal|GCP|Metal3|OpenStack|vSphere}}
```bash
kubectl --kubeconfig=./capi-quickstart.kubeconfig \