From 6a69ce8591a89a9803ab38621be1c0c83ed7d25c Mon Sep 17 00:00:00 2001 From: Arvinderpal Wander Date: Tue, 11 Feb 2020 14:26:29 -0800 Subject: [PATCH] Generate the docker infrastructure-components.yaml using kustomize. Added a README.md with instructions to run tests. Minor changes to delete and move tests based on feedback. --- cmd/clusterctl/test/README.md | 26 + cmd/clusterctl/test/e2e/delete_test.go | 36 +- cmd/clusterctl/test/e2e/e2e_suite_test.go | 3 + cmd/clusterctl/test/e2e/move_test.go | 31 +- cmd/clusterctl/test/run-e2e.sh | 41 +- .../v0.3.0/infrastructure-components.yaml | 514 ------------------ 6 files changed, 79 insertions(+), 572 deletions(-) create mode 100644 cmd/clusterctl/test/README.md delete mode 100644 cmd/clusterctl/test/testdata/docker/v0.3.0/infrastructure-components.yaml diff --git a/cmd/clusterctl/test/README.md b/cmd/clusterctl/test/README.md new file mode 100644 index 000000000000..7271c32dba9b --- /dev/null +++ b/cmd/clusterctl/test/README.md @@ -0,0 +1,26 @@ +# Running the tests + +NOTE: the e2e tests may not work on a Mac; they should however work just fine on any Linux distro. Mac support will be added in a follow-up PR. + +Currently, overrides are needed for the cluster-api, kubeadm-bootstrap and kubeadm-control-plane providers. The override for the infra provider docker must be removed as it is generated locally by the e2e test script: + + cmd/clusterctl/hack/local-overrides.py + rm -rf $HOME/.cluster-api/overrides/docker + +The entire test suite can be run using the script: + + ./run-e2e.sh + +To run specific tests, use the `GINKGO_FOCUS` + + GINKGO_FOCUS="clusterctl create cluster" ./run-e2e.sh + +## Skip local build of CAPD + +By default, the a local capd image will be built and loaded into kind. This can be skipped as so: + + SKIP_DOCKER_BUILD=1 ./run-e2e.sh + +You can also specifiy a pre-build image and skip the build: + + SKIP_DOCKER_BUILD=1 MANAGER_IMAGE=gcr.io/my-project-name/docker-provider-manager-amd64:dev ./run-e2e.sh \ No newline at end of file diff --git a/cmd/clusterctl/test/e2e/delete_test.go b/cmd/clusterctl/test/e2e/delete_test.go index ea1d902bc59b..59ed164cf4e1 100644 --- a/cmd/clusterctl/test/e2e/delete_test.go +++ b/cmd/clusterctl/test/e2e/delete_test.go @@ -82,15 +82,14 @@ var _ = Describe("clusterctl delete", func() { }) It("should delete of all infra provider components except the hosting namespace and the CRDs.", func() { Eventually( - func() error { - err := mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{}) - if err == nil || !apierrors.IsNotFound(err) { - // deployment still exists or some other error other than not found occured. - return fmt.Errorf("%v", err) + func() bool { + if !apierrors.IsNotFound(mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{})) { + return false } - return nil + // TODO: check that namespace and CRD are still present. + return true }, 3*time.Minute, 5*time.Second, - ).ShouldNot(HaveOccurred()) + ).Should(BeTrue()) }) }) Context("deletes everything", func() { @@ -104,28 +103,23 @@ var _ = Describe("clusterctl delete", func() { }) It("should reset the management cluster to its original state", func() { Eventually( - func() error { + func() bool { // TODO: check all components are deleted. - err := mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{}) - if err == nil || !apierrors.IsNotFound(err) { - return fmt.Errorf("%v", err) + if !apierrors.IsNotFound(mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "capd-system", Name: "capd-controller-manager"}, &appsv1.Deployment{})) { + return false } // TODO: check namespace of all components are deleted. - err = mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Name: "capd-system"}, &corev1.Namespace{}) - if err == nil || !apierrors.IsNotFound(err) { - return fmt.Errorf("%v", err) + if !apierrors.IsNotFound(mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Name: "capd-system"}, &corev1.Namespace{})) { + return false } // TODO: check that all CRDs are deleted. - err = mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, &infrav1.DockerCluster{}) - if err == nil { - return fmt.Errorf("%v", err) - } + err := mgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo-cluster"}, &infrav1.DockerCluster{}) if _, ok := err.(*meta.NoResourceMatchError); !ok { - return err + return false } - return nil + return true }, 3*time.Minute, 5*time.Second, - ).ShouldNot(HaveOccurred()) + ).Should(BeTrue()) }) }) }) diff --git a/cmd/clusterctl/test/e2e/e2e_suite_test.go b/cmd/clusterctl/test/e2e/e2e_suite_test.go index 2e7b122cfe6f..fdbcb841097c 100644 --- a/cmd/clusterctl/test/e2e/e2e_suite_test.go +++ b/cmd/clusterctl/test/e2e/e2e_suite_test.go @@ -49,7 +49,10 @@ var _ = BeforeSuite(func() { // Docker image to load into the kind cluster for testing managerImage = os.Getenv("MANAGER_IMAGE") if managerImage == "" { + fmt.Fprintf(GinkgoWriter, "MANAGER_IMAGE not specified, using default %v\n", "gcr.io/k8s-staging-capi-docker/capd-manager-amd64:dev") managerImage = "gcr.io/k8s-staging-capi-docker/capd-manager-amd64:dev" + } else { + fmt.Fprintf(GinkgoWriter, "Using MANAGER_IMAGE %v\n", managerImage) } kindConfigFile = os.Getenv("KIND_CONFIG_FILE") if kindConfigFile == "" { diff --git a/cmd/clusterctl/test/e2e/move_test.go b/cmd/clusterctl/test/e2e/move_test.go index 85881ddf5290..fac57b1d9300 100644 --- a/cmd/clusterctl/test/e2e/move_test.go +++ b/cmd/clusterctl/test/e2e/move_test.go @@ -142,30 +142,25 @@ var _ = Describe("clusterctl move", func() { ).Should(BeTrue()) // Should delete all Cluster API objects from the previous management cluster. Eventually( - func() error { - err := fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &clusterv1.Cluster{}) - if err == nil || !apierrors.IsNotFound(err) { - return fmt.Errorf("%v", err) + func() bool { + if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &clusterv1.Cluster{})) { + return false } - err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &clusterv1.Machine{}) - if err == nil || !apierrors.IsNotFound(err) { - return fmt.Errorf("%v", err) + if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &clusterv1.Machine{})) { + return false } - err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0-config"}, &bootstrapv1.KubeadmConfig{}) - if err == nil || !apierrors.IsNotFound(err) { - return fmt.Errorf("%v", err) + if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0-config"}, &bootstrapv1.KubeadmConfig{})) { + return false } - err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &infrav1.DockerCluster{}) - if err == nil || !apierrors.IsNotFound(err) { - return fmt.Errorf("%v", err) + if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: workloadInfo.workloadClusterName}, &infrav1.DockerCluster{})) { + return false } - err = fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &infrav1.DockerMachine{}) - if err == nil || !apierrors.IsNotFound(err) { - return fmt.Errorf("%v", err) + if !apierrors.IsNotFound(fromMgmtInfo.mgmtClient.Get(ctx, client.ObjectKey{Namespace: "default", Name: "controlplane-0"}, &infrav1.DockerMachine{})) { + return false } - return nil + return true }, 3*time.Minute, 5*time.Second, - ).ShouldNot(HaveOccurred()) + ).Should(BeTrue()) }) }) }) diff --git a/cmd/clusterctl/test/run-e2e.sh b/cmd/clusterctl/test/run-e2e.sh index f327472039e0..0c33134c4301 100755 --- a/cmd/clusterctl/test/run-e2e.sh +++ b/cmd/clusterctl/test/run-e2e.sh @@ -18,12 +18,27 @@ set -o errexit set -o nounset set -o pipefail +export CAPD_VERSION=v0.3.0 + REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../.. cd "${REPO_ROOT}" || exit 1 REPO_ROOT_ABS=${PWD} ARTIFACTS="${ARTIFACTS:-${REPO_ROOT_ABS}/_artifacts}" mkdir -p "$ARTIFACTS/logs/" +if [ -z "${SKIP_DOCKER_BUILD-}" ]; then + REGISTRY=gcr.io/"$(gcloud config get-value project)" + export REGISTRY + export DOCKER_MANAGER_IMAGE=docker-provider-manager + export TAG=dev + export ARCH=amd64 + # This will load the capd image into the kind mgmt cluster. + export MANAGER_IMAGE=${REGISTRY}/${DOCKER_MANAGER_IMAGE}-${ARCH}:${TAG} + export PULL_POLICY=IfNotPresent + cd "${REPO_ROOT_ABS}/test/infrastructure/docker" + CONTROLLER_IMG=${REGISTRY}/${DOCKER_MANAGER_IMAGE} make docker-build +fi + cat < "clusterctl-settings.json" { "providers": [ "cluster-api", "kubeadm-bootstrap", "kubeadm-control-plane", "docker"] @@ -31,13 +46,16 @@ cat < "clusterctl-settings.json" EOF # Create a local filesystem repository for the docker provider and update clusterctl.yaml -mkdir -p "$ARTIFACTS/testdata/" -cp -r "${REPO_ROOT_ABS}/cmd/clusterctl/test/testdata" "$ARTIFACTS/" -export CLUSTERCTL_CONFIG="${ARTIFACTS}/testdata/clusterctl.yaml" +LOCAL_CAPD_REPO_PATH="${ARTIFACTS}/testdata/docker" +mkdir -p "${LOCAL_CAPD_REPO_PATH}" +cp -r "${REPO_ROOT_ABS}/cmd/clusterctl/test/testdata/docker/${CAPD_VERSION}" "${LOCAL_CAPD_REPO_PATH}" +# We build the infrastructure-components.yaml from the capd folder and put in local repo folder +kustomize build "${REPO_ROOT_ABS}/test/infrastructure/docker/config/default/" > "${LOCAL_CAPD_REPO_PATH}/${CAPD_VERSION}/infrastructure-components.yaml" +export CLUSTERCTL_CONFIG="${ARTIFACTS}/testdata/clusterctl.yaml" cat < "${CLUSTERCTL_CONFIG}" providers: - name: docker - url: ${ARTIFACTS}/testdata/docker/v0.3.0/infrastructure-components.yaml + url: ${LOCAL_CAPD_REPO_PATH}/${CAPD_VERSION}/infrastructure-components.yaml type: InfrastructureProvider DOCKER_SERVICE_DOMAIN: "cluster.local" @@ -58,21 +76,6 @@ nodes: containerPath: /var/run/docker.sock EOF -REGISTRY=gcr.io/"$(gcloud config get-value project)" -export REGISTRY -export DOCKER_MANAGER_IMAGE=docker-provider-manager -export TAG=dev -export ARCH=amd64 - -# This will load the capd image into the kind mgmt cluster. -export MANAGER_IMAGE=${REGISTRY}/${DOCKER_MANAGER_IMAGE}-${ARCH}:${TAG} -export PULL_POLICY=IfNotPresent - -if [ -z "${SKIP_DOCKER_BUILD-}" ]; then - cd "${REPO_ROOT_ABS}/test/infrastructure/docker" - CONTROLLER_IMG=${REGISTRY}/${DOCKER_MANAGER_IMAGE} make docker-build -fi - GINKGO_FOCUS=${GINKGO_FOCUS:-""} cd "${REPO_ROOT_ABS}/cmd/clusterctl/test/e2e" diff --git a/cmd/clusterctl/test/testdata/docker/v0.3.0/infrastructure-components.yaml b/cmd/clusterctl/test/testdata/docker/v0.3.0/infrastructure-components.yaml deleted file mode 100644 index 3faf895c22d1..000000000000 --- a/cmd/clusterctl/test/testdata/docker/v0.3.0/infrastructure-components.yaml +++ /dev/null @@ -1,514 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - control-plane: controller-manager - name: capd-system ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.2.4 - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-docker - name: dockerclusters.infrastructure.cluster.x-k8s.io -spec: - group: infrastructure.cluster.x-k8s.io - names: - categories: - - cluster-api - kind: DockerCluster - listKind: DockerClusterList - plural: dockerclusters - singular: dockercluster - preserveUnknownFields: false - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: DockerCluster is the Schema for the dockerclusters API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: DockerClusterSpec defines the desired state of DockerCluster. - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint used to communicate - with the control plane. - properties: - host: - description: Host is the hostname on which the API server is serving. - type: string - port: - description: Port is the port on which the API server is serving. - type: integer - required: - - host - - port - type: object - failureDomains: - additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure domains - a cluster can optionally span across. - properties: - attributes: - additionalProperties: - type: string - description: Attributes is a free form map of attributes an infrastructure - provider might use or require. - type: object - controlPlane: - description: ControlPlane determines if this failure domain is - suitable for use by control plane machines. - type: boolean - type: object - description: FailureDomains are not usulaly defined on the spec. The - docker provider is special since failure domains don't mean anything - in a local docker environment. Instead, the docker cluster controller - will simply copy these into the Status and allow the Cluster API controllers - to do what they will with the defined failure domains. - type: object - type: object - status: - description: DockerClusterStatus defines the observed state of DockerCluster. - properties: - failureDomains: - additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure domains - a cluster can optionally span across. - properties: - attributes: - additionalProperties: - type: string - description: Attributes is a free form map of attributes an infrastructure - provider might use or require. - type: object - controlPlane: - description: ControlPlane determines if this failure domain is - suitable for use by control plane machines. - type: boolean - type: object - description: FailureDomains don't mean much in CAPD since it's all local, - but we can see how the rest of cluster API will use this if we populate - it. - type: object - ready: - description: Ready denotes that the docker cluster (infrastructure) - is ready. - type: boolean - required: - - ready - type: object - type: object - version: v1alpha3 - versions: - - name: v1alpha3 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.2.4 - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-docker - name: dockermachines.infrastructure.cluster.x-k8s.io -spec: - group: infrastructure.cluster.x-k8s.io - names: - categories: - - cluster-api - kind: DockerMachine - listKind: DockerMachineList - plural: dockermachines - singular: dockermachine - preserveUnknownFields: false - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: DockerMachine is the Schema for the dockermachines API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: DockerMachineSpec defines the desired state of DockerMachine - properties: - customImage: - description: CustomImage allows customizing the container image that - is used for running the machine - type: string - providerID: - description: ProviderID will be the container name in ProviderID format - (docker:////) - type: string - type: object - status: - description: DockerMachineStatus defines the observed state of DockerMachine - properties: - ready: - description: Ready denotes that the machine (docker container) is ready - type: boolean - required: - - ready - type: object - type: object - version: v1alpha3 - versions: - - name: v1alpha3 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.2.4 - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-docker - name: dockermachinetemplates.infrastructure.cluster.x-k8s.io -spec: - group: infrastructure.cluster.x-k8s.io - names: - categories: - - cluster-api - kind: DockerMachineTemplate - listKind: DockerMachineTemplateList - plural: dockermachinetemplates - singular: dockermachinetemplate - preserveUnknownFields: false - scope: Namespaced - validation: - openAPIV3Schema: - description: DockerMachineTemplate is the Schema for the dockermachinetemplates - API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: DockerMachineTemplateSpec defines the desired state of DockerMachineTemplate - properties: - template: - description: DockerMachineTemplateResource describes the data needed - to create a DockerMachine from a template - properties: - spec: - description: Spec is the specification of the desired behavior of - the machine. - properties: - customImage: - description: CustomImage allows customizing the container image - that is used for running the machine - type: string - providerID: - description: ProviderID will be the container name in ProviderID - format (docker:////) - type: string - type: object - required: - - spec - type: object - required: - - template - type: object - type: object - version: v1alpha3 - versions: - - name: v1alpha3 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - name: capd-leader-election-role - namespace: capd-system -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - update - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - labels: - cluster.x-k8s.io/provider: infrastructure-docker - name: capd-manager-role -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - clusters - - machines - verbs: - - get - - list - - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - dockerclusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - dockerclusters/status - verbs: - - get - - patch - - update -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - dockermachines - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - dockermachines/status - verbs: - - get - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - name: capd-proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - name: capd-leader-election-rolebinding - namespace: capd-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: capd-leader-election-role -subjects: -- kind: ServiceAccount - name: default - namespace: capd-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - name: capd-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: capd-manager-role -subjects: -- kind: ServiceAccount - name: default - namespace: capd-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - name: capd-proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: capd-proxy-role -subjects: -- kind: ServiceAccount - name: default - namespace: capd-system ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - prometheus.io/port: "8443" - prometheus.io/scheme: https - prometheus.io/scrape: "true" - labels: - cluster.x-k8s.io/provider: infrastructure-docker - control-plane: controller-manager - name: capd-controller-manager-metrics-service - namespace: capd-system -spec: - ports: - - name: https - port: 8443 - targetPort: https - selector: - cluster.x-k8s.io/provider: infrastructure-docker - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - control-plane: controller-manager - name: capd-controller-manager - namespace: capd-system -spec: - replicas: 1 - selector: - matchLabels: - cluster.x-k8s.io/provider: infrastructure-docker - control-plane: controller-manager - template: - metadata: - labels: - cluster.x-k8s.io/provider: infrastructure-docker - control-plane: controller-manager - spec: - containers: - - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=10 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - - args: - - --metrics-addr=0 - - -v=4 - image: gcr.io/arvinders-1st-project/docker-provider-manager-amd64:dev - imagePullPolicy: IfNotPresent - name: manager - securityContext: - privileged: true - volumeMounts: - - mountPath: /var/run/docker.sock - name: dockersock - terminationGracePeriodSeconds: 10 - volumes: - - hostPath: - path: /var/run/docker.sock - name: dockersock