diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b2b212dbe..ddff0e75b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # How to Contribute -MetalKube projects are [Apache 2.0 licensed](LICENSE) and accept contributions via +Metal3 projects are [Apache 2.0 licensed](LICENSE) and accept contributions via GitHub pull requests. ## Certificate of Origin diff --git a/Dockerfile b/Dockerfile index 93476a8d2..a8e1ecf89 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 AS builder -WORKDIR /go/src/github.com/metalkube/cluster-api-provider-baremetal +WORKDIR /go/src/github.com/metal3-io/cluster-api-provider-baremetal COPY . . RUN go build -o machine-controller-manager ./cmd/manager RUN go build -o manager ./vendor/github.com/openshift/cluster-api/cmd/manager @@ -11,5 +11,5 @@ FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base # yum install -y $INSTALL_PKGS && \ # rpm -V $INSTALL_PKGS && \ # yum clean all -COPY --from=builder /go/src/github.com/metalkube/cluster-api-provider-baremetal/manager / -COPY --from=builder /go/src/github.com/metalkube/cluster-api-provider-baremetal/machine-controller-manager / +COPY --from=builder /go/src/github.com/metal3-io/cluster-api-provider-baremetal/manager / +COPY --from=builder /go/src/github.com/metal3-io/cluster-api-provider-baremetal/machine-controller-manager / diff --git a/Gopkg.lock b/Gopkg.lock index 4a3131904..f6353f14d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -250,14 +250,14 @@ [[projects]] branch = "master" - digest = "1:70f8d672ed0f89f3b1bd121fa264d6ad3cf20ab5e14a0a20c74a63f7f280d884" - name = "github.com/metalkube/baremetal-operator" + digest = "1:e8f7dec9267de068cce426a0167fa5d1ae96dfe627959dfb2eed6c92fc39ad94" + name = "github.com/metal3-io/baremetal-operator" packages = [ "pkg/apis", - "pkg/apis/metalkube/v1alpha1", + "pkg/apis/metal3/v1alpha1", ] pruneopts = "T" - revision = "96ca68ea37e4e8c64ae53d872d14412fa95b9ae0" + revision = "9140d7b451b41c2d80c6222991bbc00b0a75313c" [[projects]] digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" @@ -993,8 +993,8 @@ analyzer-version = 1 input-imports = [ "github.com/emicklei/go-restful", - "github.com/metalkube/baremetal-operator/pkg/apis", - "github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1", + "github.com/metal3-io/baremetal-operator/pkg/apis", + "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1", "github.com/onsi/ginkgo", "github.com/onsi/gomega", "github.com/openshift/cluster-api/pkg/apis", @@ -1031,6 +1031,7 @@ "sigs.k8s.io/controller-runtime/pkg/source", "sigs.k8s.io/controller-tools/cmd/controller-gen", "sigs.k8s.io/testing_frameworks/integration", + "sigs.k8s.io/yaml", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 2f065f43d..8f857e751 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -21,7 +21,7 @@ required = [ [[constraint]] - name="github.com/metalkube/baremetal-operator" + name="github.com/metal3-io/baremetal-operator" branch="master" # STANZAS BELOW ARE GENERATED AND MAY BE WRITTEN - DO NOT MODIFY BELOW THIS LINE. diff --git a/Makefile b/Makefile index 9e3071eb4..e5574ecff 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ unit: manifests # Build manager binary manager: generate fmt vet - go build -o bin/manager github.com/metalkube/cluster-api-provider-baremetal/cmd/manager + go build -o bin/manager github.com/metal3-io/cluster-api-provider-baremetal/cmd/manager # Run against the configured Kubernetes cluster in ~/.kube/config run: generate fmt vet diff --git a/PROJECT b/PROJECT index 229d76c20..abee4e2b7 100644 --- a/PROJECT +++ b/PROJECT @@ -1,3 +1,3 @@ version: "1" domain: cluster.k8s.io -repo: github.com/metalkube/cluster-api-provider-baremetal +repo: github.com/metal3-io/cluster-api-provider-baremetal diff --git a/README.md b/README.md index 8a8eaf573..08b1e099a 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,36 @@ This repository contains a Machine actuator implementation for the Kubernetes [Cluster API](https://github.com/kubernetes-sigs/cluster-api/). For more information about this actuator and related repositories, see -[metalkube.org](http://metalkube.org/). +[metal3.io](http://metal3.io/). ## Development Environment +* [Setting up for tests](docs/dev/setup.md) * Using [Minikube](docs/dev/minikube.md) * Using [OpenShift 4](docs/dev/openshift.md) + +## ProviderSpec + +In order to create a valid Machine resource, you must include a ProviderSpec +that looks like the following example. See the +[type definition](pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go) +for details on each field. + +``` +apiVersion: cluster.k8s.io/v1alpha1 +kind: Machine +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: sample0 +spec: + providerSpec: + value: + apiVersion: "baremetal.cluster.k8s.io/v1alpha1" + kind: "BareMetalMachineProviderSpec" + image: + url: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2" + checksum: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2.md5sum" + userData: + Name: "worker-user-data" +``` diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 16b27865d..159ec2548 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -22,9 +22,9 @@ import ( "os" "time" - bmoapis "github.com/metalkube/baremetal-operator/pkg/apis" - "github.com/metalkube/cluster-api-provider-baremetal/pkg/apis" - "github.com/metalkube/cluster-api-provider-baremetal/pkg/cloud/baremetal/actuators/machine" + bmoapis "github.com/metal3-io/baremetal-operator/pkg/apis" + "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis" + "github.com/metal3-io/cluster-api-provider-baremetal/pkg/cloud/baremetal/actuators/machine" clusterapis "github.com/openshift/cluster-api/pkg/apis" capimachine "github.com/openshift/cluster-api/pkg/controller/machine" "k8s.io/apimachinery/pkg/runtime/schema" @@ -100,19 +100,19 @@ func waitForAPIs(cfg *rest.Config) error { return err } - metalkubeGV := schema.GroupVersion{ - Group: "metalkube.org", + metal3GV := schema.GroupVersion{ + Group: "metal3.io", Version: "v1alpha1", } for { - err = discovery.ServerSupportsVersion(c, metalkubeGV) + err = discovery.ServerSupportsVersion(c, metal3GV) if err != nil { - log.Info(fmt.Sprintf("Waiting for API group %v to be available: %v", metalkubeGV, err)) + log.Info(fmt.Sprintf("Waiting for API group %v to be available: %v", metal3GV, err)) time.Sleep(time.Second * 10) continue } - log.Info(fmt.Sprintf("Found API group %v", metalkubeGV)) + log.Info(fmt.Sprintf("Found API group %v", metal3GV)) break } diff --git a/config/crds/baremetal_v1alpha1_baremetalmachineproviderspec.yaml b/config/crds/baremetal_v1alpha1_baremetalmachineproviderspec.yaml index 272ede197..b4639fc2a 100644 --- a/config/crds/baremetal_v1alpha1_baremetalmachineproviderspec.yaml +++ b/config/crds/baremetal_v1alpha1_baremetalmachineproviderspec.yaml @@ -19,6 +19,19 @@ spec: of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' type: string + image: + description: Image is the image to be provisioned. + properties: + checksum: + description: Checksum is a md5sum value or a URL to retrieve one. + type: string + url: + description: URL is a location of an image to deploy. + type: string + required: + - url + - checksum + type: object kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client @@ -26,10 +39,13 @@ spec: type: string metadata: type: object - spec: - type: object - status: + userData: + description: UserData references the Secret that holds user data needed + by the bare metal operator. The Namespace is optional; it will default + to the Machine's namespace if not specified. type: object + required: + - image version: v1alpha1 status: acceptedNames: diff --git a/config/crds/baremetal_v1alpha1_baremetalmachineproviderstatus.yaml b/config/crds/baremetal_v1alpha1_baremetalmachineproviderstatus.yaml index 714223705..336153fa2 100644 --- a/config/crds/baremetal_v1alpha1_baremetalmachineproviderstatus.yaml +++ b/config/crds/baremetal_v1alpha1_baremetalmachineproviderstatus.yaml @@ -26,10 +26,6 @@ spec: type: string metadata: type: object - spec: - type: object - status: - type: object version: v1alpha1 status: acceptedNames: diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index fcbf39dc6..a093e3a71 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -7,6 +7,5 @@ spec: template: spec: containers: - # Change the value of image field below to your controller image URL - - image: IMAGE_URL + - image: quay.io/metal3-io/cluster-api-provider-baremetal:master name: manager diff --git a/config/kustomization.yaml b/config/kustomization.yaml index a00f2f7c5..35f202103 100644 --- a/config/kustomization.yaml +++ b/config/kustomization.yaml @@ -1,5 +1,5 @@ # Adds namespace to all resources. -namespace: cluster-api-provider-baremetal-system +namespace: metal3 # Value of this field is prepended to the # names of all resources, e.g. a deployment named diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index b718bb4ec..401bb08a3 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -43,7 +43,7 @@ spec: spec: containers: - command: - - /manager + - /machine-controller-manager image: controller:latest imagePullPolicy: Always name: manager diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index 027073f95..a7fc14ddc 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -8,7 +8,7 @@ metadata: labels: control-plane: controller-manager controller-tools.k8s.io: "1.0" - name: controller-manager-metrics-service + name: controller-manager-metrics-svc namespace: system spec: ports: diff --git a/config/rbac/rbac_role.yaml b/config/rbac/rbac_role.yaml index 0766795cf..39422b64a 100644 --- a/config/rbac/rbac_role.yaml +++ b/config/rbac/rbac_role.yaml @@ -44,6 +44,16 @@ rules: - update - patch - delete +- apiGroups: + - metal3.io + resources: + - baremetalhosts + verbs: + - get + - list + - watch + - update + - patch - apiGroups: - baremetal.k8s.io resources: diff --git a/docs/api.md b/docs/api.md new file mode 100644 index 000000000..a6ff31ea8 --- /dev/null +++ b/docs/api.md @@ -0,0 +1,72 @@ +# API and Resource Definitions + +## Machine + +The `Machine` resource is defined by the +[cluster-api](https://github.com/kubernetes-sigs/cluster-api) project. A +`Machine` includes a `providerSpec` field which includes the data specific to +this `cluster-api` provider. + +## BareMetalMachineProviderSpec + +* **image** -- This includes two sub-fields, `url` and `checksum`, which + include the URL to the image and the URL to a checksum for that image. These + fields are required. The image will be used for provisioning of the + `BareMetalHost` chosen by the `Machine` actuator. + +* **userData** -- This includes two sub-fields, `name` and `namespace`, which + reference a `Secret` that contains base64 encoded user-data to be written to + a config drive on the provisioned `BareMetalHost`. This field is optional. + +## Sample Machine + +```yaml +apiVersion: cluster.k8s.io/v1alpha1 +kind: Machine +metadata: + annotations: + metal3.io/BareMetalHost: metal3/master-0 + creationTimestamp: "2019-05-13T13:00:51Z" + finalizers: + - machine.cluster.k8s.io + generateName: baremetal-machine- + generation: 2 + name: centos + namespace: metal3 + resourceVersion: "1112" + selfLink: /apis/cluster.k8s.io/v1alpha1/namespaces/metal3/machines/centos + uid: 22acee54-757f-11e9-8091-280d3563c053 +spec: + metadata: + creationTimestamp: null + providerSpec: + value: + apiVersion: baremetal.cluster.k8s.io/v1alpha1 + image: + checksum: http://172.22.0.1/images/CentOS-7-x86_64-GenericCloud-1901.qcow2.md5sum + url: http://172.22.0.1/images/CentOS-7-x86_64-GenericCloud-1901.qcow2 + kind: BareMetalMachineProviderSpec + userData: + name: centos-user-data + namespace: metal3 + versions: + kubelet: "" +``` + +## Sample userData Secret + +```yaml +apiVersion: v1 +data: + userData: BASE64_ENCODED_USER_DATA +kind: Secret +metadata: + annotations: + creationTimestamp: 2019-05-13T13:00:51Z + name: centos-user-data + namespace: metal3 + resourceVersion: "1108" + selfLink: /api/v1/namespaces/metal3/secrets/centos-user-data + uid: 22792b3e-757f-11e9-8091-280d3563c053 +type: Opaque +``` diff --git a/docs/dev/minikube.md b/docs/dev/minikube.md index e54503ff7..aff5702d3 100644 --- a/docs/dev/minikube.md +++ b/docs/dev/minikube.md @@ -18,7 +18,7 @@ The actuator also uses the `BareMetalHost` custom resource that’s defined by the `baremetal-operator`. ```bash -kubectl apply -f vendor/github.com/metalkube/baremetal-operator/deploy/crds/metalkube_v1alpha1_baremetalhost_crd.yaml +kubectl apply -f vendor/github.com/metal3-io/baremetal-operator/deploy/crds/metal3_v1alpha1_baremetalhost_crd.yaml ``` ## Create a BareMetalHost @@ -30,7 +30,7 @@ a dummy `BareMetalHost` object. There’s no requirement to actually run the `baremetal-operator` to test the reconciliation logic of the actuator. Refer to the [baremetal-operator developer -documentation](https://github.com/metalkube/baremetal-operator/blob/master/docs/dev-setup.md) +documentation](https://github.com/metal3-io/baremetal-operator/blob/master/docs/dev-setup.md) for instructions and tools for creating BareMetalHost objects. ## Run the Actuator diff --git a/docs/dev/setup.md b/docs/dev/setup.md new file mode 100644 index 000000000..afd125a5e --- /dev/null +++ b/docs/dev/setup.md @@ -0,0 +1,19 @@ +# Setting up a development environment + +The cluster-api requires two external tools for running the tests +during development. + +## Install kustomize + +```bash +eval $(go env) +export GOPATH +./tools/install_kustomize.sh +``` + +## Install kubebuilder + +```bash +./tools/install_kubebuilder.sh +sudo mv kubebuilder /usr/local +``` diff --git a/pkg/apis/addtoscheme_baremetal_v1alpha1.go b/pkg/apis/addtoscheme_baremetal_v1alpha1.go index 361f6fbc7..5fbf89d01 100644 --- a/pkg/apis/addtoscheme_baremetal_v1alpha1.go +++ b/pkg/apis/addtoscheme_baremetal_v1alpha1.go @@ -17,7 +17,7 @@ limitations under the License. package apis import ( - "github.com/metalkube/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" + "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" ) func init() { diff --git a/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go b/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go index 391c0477b..825af9573 100644 --- a/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go +++ b/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types.go @@ -17,35 +17,59 @@ limitations under the License. package v1alpha1 import ( + "fmt" + + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// BareMetalMachineProviderSpecSpec defines the desired state of BareMetalMachineProviderSpec -type BareMetalMachineProviderSpecSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file -} - -// BareMetalMachineProviderSpecStatus defines the observed state of BareMetalMachineProviderSpec -type BareMetalMachineProviderSpecStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// BareMetalMachineProviderSpec is the Schema for the baremetalmachineproviderspecs API +// BareMetalMachineProviderSpec holds data that the actuator needs to provision +// and manage a Machine. // +k8s:openapi-gen=true type BareMetalMachineProviderSpec struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec BareMetalMachineProviderSpecSpec `json:"spec,omitempty"` - Status BareMetalMachineProviderSpecStatus `json:"status,omitempty"` + // Image is the image to be provisioned. + Image Image `json:"image"` + + // UserData references the Secret that holds user data needed by the bare metal + // operator. The Namespace is optional; it will default to the Machine's + // namespace if not specified. + UserData *corev1.SecretReference `json:"userData,omitempty"` +} + +// Image holds the details of an image to use during provisioning. +type Image struct { + // URL is a location of an image to deploy. + URL string `json:"url"` + + // Checksum is a md5sum value or a URL to retrieve one. + Checksum string `json:"checksum"` +} + +// IsValid returns an error if the object is not valid, otherwise nil. The +// string representation of the error is suitable for human consumption. +func (s *BareMetalMachineProviderSpec) IsValid() error { + missing := []string{} + if s.Image.URL == "" { + missing = append(missing, "Image.URL") + } + if s.Image.Checksum == "" { + missing = append(missing, "Image.Checksum") + } + if s.UserData == nil { + missing = append(missing, "UserData") + } else if s.UserData.Name == "" { + missing = append(missing, "UserData.Name") + } + if len(missing) > 0 { + return fmt.Errorf("Missing fields from ProviderSpec: %v", missing) + } + return nil } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types_test.go b/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types_test.go index 53e8a2957..52a0ef3b0 100644 --- a/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types_test.go +++ b/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderspec_types_test.go @@ -21,10 +21,109 @@ import ( "github.com/onsi/gomega" "golang.org/x/net/context" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) +func TestProviderSpecIsValid(t *testing.T) { + cases := []struct { + Spec BareMetalMachineProviderSpec + ErrorExpected bool + Name string + }{ + { + Spec: BareMetalMachineProviderSpec{}, + ErrorExpected: true, + Name: "empty spec", + }, + { + Spec: BareMetalMachineProviderSpec{ + Image: Image{ + URL: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2", + Checksum: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2.md5sum", + }, + UserData: &corev1.SecretReference{ + Name: "worker-user-data", + }, + }, + ErrorExpected: false, + Name: "Valid spec without UserData.Namespace", + }, + { + Spec: BareMetalMachineProviderSpec{ + Image: Image{ + URL: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2", + Checksum: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2.md5sum", + }, + UserData: &corev1.SecretReference{ + Name: "worker-user-data", + Namespace: "otherns", + }, + }, + ErrorExpected: false, + Name: "Valid spec with UserData.Namespace", + }, + { + Spec: BareMetalMachineProviderSpec{ + Image: Image{ + Checksum: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2.md5sum", + }, + UserData: &corev1.SecretReference{ + Name: "worker-user-data", + }, + }, + ErrorExpected: true, + Name: "missing Image.URL", + }, + { + Spec: BareMetalMachineProviderSpec{ + Image: Image{ + URL: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2", + }, + UserData: &corev1.SecretReference{ + Name: "worker-user-data", + }, + }, + ErrorExpected: true, + Name: "missing Image.Checksum", + }, + { + Spec: BareMetalMachineProviderSpec{ + Image: Image{ + URL: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2", + Checksum: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2.md5sum", + }, + }, + ErrorExpected: true, + Name: "missing UserData", + }, + { + Spec: BareMetalMachineProviderSpec{ + Image: Image{ + URL: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2", + Checksum: "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2.md5sum", + }, + UserData: &corev1.SecretReference{ + Namespace: "otherns", + }, + }, + ErrorExpected: true, + Name: "missing UserData.Name", + }, + } + + for _, tc := range cases { + err := tc.Spec.IsValid() + if tc.ErrorExpected && err == nil { + t.Errorf("Did not get error from case \"%v\"", tc.Name) + } + if !tc.ErrorExpected && err != nil { + t.Errorf("Got unexpected error from case \"%v\": %v", tc.Name, err) + } + } +} + func TestStorageBareMetalMachineProviderSpec(t *testing.T) { key := types.NamespacedName{ Name: "foo", @@ -34,7 +133,11 @@ func TestStorageBareMetalMachineProviderSpec(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", - }} + }, + UserData: &corev1.SecretReference{ + Name: "foo", + }, + } g := gomega.NewGomegaWithT(t) // Test Create diff --git a/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go b/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go index 1fc46c26b..122ba5f7b 100644 --- a/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go +++ b/pkg/apis/baremetal/v1alpha1/baremetalmachineproviderstatus_types.go @@ -20,21 +20,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// BareMetalMachineProviderStatusSpec defines the desired state of BareMetalMachineProviderStatus -type BareMetalMachineProviderStatusSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file -} - -// BareMetalMachineProviderStatusStatus defines the observed state of BareMetalMachineProviderStatus -type BareMetalMachineProviderStatusStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -43,9 +28,6 @@ type BareMetalMachineProviderStatusStatus struct { type BareMetalMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec BareMetalMachineProviderStatusSpec `json:"spec,omitempty"` - Status BareMetalMachineProviderStatusStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/baremetal/v1alpha1/doc.go b/pkg/apis/baremetal/v1alpha1/doc.go index f10ba10df..e88a2570d 100644 --- a/pkg/apis/baremetal/v1alpha1/doc.go +++ b/pkg/apis/baremetal/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // Package v1alpha1 contains API Schema definitions for the baremetal v1alpha1 API group // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/metalkube/cluster-api-provider-baremetal/pkg/apis/baremetal +// +k8s:conversion-gen=github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal // +k8s:defaulter-gen=TypeMeta // +groupName=baremetal.cluster.k8s.io package v1alpha1 diff --git a/pkg/apis/baremetal/v1alpha1/register.go b/pkg/apis/baremetal/v1alpha1/register.go index ce08fd83f..0d73274ea 100644 --- a/pkg/apis/baremetal/v1alpha1/register.go +++ b/pkg/apis/baremetal/v1alpha1/register.go @@ -19,7 +19,7 @@ limitations under the License. // Package v1alpha1 contains API Schema definitions for the baremetal v1alpha1 API group // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/metalkube/cluster-api-provider-baremetal/pkg/apis/baremetal +// +k8s:conversion-gen=github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal // +k8s:defaulter-gen=TypeMeta // +groupName=baremetal.cluster.k8s.io package v1alpha1 diff --git a/pkg/apis/baremetal/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/baremetal/v1alpha1/zz_generated.deepcopy.go index f481f2b4d..5e1ab76a6 100644 --- a/pkg/apis/baremetal/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/baremetal/v1alpha1/zz_generated.deepcopy.go @@ -20,6 +20,7 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -28,8 +29,12 @@ func (in *BareMetalMachineProviderSpec) DeepCopyInto(out *BareMetalMachineProvid *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + out.Image = in.Image + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(v1.SecretReference) + **out = **in + } return } @@ -84,45 +89,11 @@ func (in *BareMetalMachineProviderSpecList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalMachineProviderSpecSpec) DeepCopyInto(out *BareMetalMachineProviderSpecSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalMachineProviderSpecSpec. -func (in *BareMetalMachineProviderSpecSpec) DeepCopy() *BareMetalMachineProviderSpecSpec { - if in == nil { - return nil - } - out := new(BareMetalMachineProviderSpecSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalMachineProviderSpecStatus) DeepCopyInto(out *BareMetalMachineProviderSpecStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalMachineProviderSpecStatus. -func (in *BareMetalMachineProviderSpecStatus) DeepCopy() *BareMetalMachineProviderSpecStatus { - if in == nil { - return nil - } - out := new(BareMetalMachineProviderSpecStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BareMetalMachineProviderStatus) DeepCopyInto(out *BareMetalMachineProviderStatus) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status return } @@ -178,33 +149,17 @@ func (in *BareMetalMachineProviderStatusList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalMachineProviderStatusSpec) DeepCopyInto(out *BareMetalMachineProviderStatusSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalMachineProviderStatusSpec. -func (in *BareMetalMachineProviderStatusSpec) DeepCopy() *BareMetalMachineProviderStatusSpec { - if in == nil { - return nil - } - out := new(BareMetalMachineProviderStatusSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BareMetalMachineProviderStatusStatus) DeepCopyInto(out *BareMetalMachineProviderStatusStatus) { +func (in *Image) DeepCopyInto(out *Image) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalMachineProviderStatusStatus. -func (in *BareMetalMachineProviderStatusStatus) DeepCopy() *BareMetalMachineProviderStatusStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { if in == nil { return nil } - out := new(BareMetalMachineProviderStatusStatus) + out := new(Image) in.DeepCopyInto(out) return out } diff --git a/pkg/cloud/baremetal/actuators/machine/actuator.go b/pkg/cloud/baremetal/actuators/machine/actuator.go index daea23630..91621b2dc 100644 --- a/pkg/cloud/baremetal/actuators/machine/actuator.go +++ b/pkg/cloud/baremetal/actuators/machine/actuator.go @@ -23,27 +23,25 @@ import ( "math/rand" "time" - bmh "github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1" + bmh "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" + bmv1alpha1 "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" clusterv1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" + "github.com/openshift/cluster-api/pkg/apis/machine/common" machinev1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" clustererror "github.com/openshift/cluster-api/pkg/controller/error" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" ) const ( - ProviderName = "solas" + ProviderName = "baremetal" // HostAnnotation is the key for an annotation that should go on a Machine to // reference what BareMetalHost it corresponds to. - HostAnnotation = "metalkube.org/BareMetalHost" - // FIXME(dhellmann): These image values should probably come from - // configuration settings and something that can tell the IP - // address of the web server hosting the image in the ironic pod. - instanceImageSource = "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2" - instanceImageChecksumURL = instanceImageSource + ".md5sum" - requeueAfter = time.Second * 30 + HostAnnotation = "metal3.io/BareMetalHost" + requeueAfter = time.Second * 30 ) // Add RBAC rules to access cluster-api resources @@ -51,6 +49,9 @@ const ( //+kubebuilder:rbac:groups=cluster.k8s.io,resources=clusters;clusters/status,verbs=get;list;watch //+kubebuilder:rbac:groups="",resources=nodes;events,verbs=get;list;watch;create;update;patch;delete +// RBAC to access BareMetalHost resources from metal3.io +//+kubebuilder:rbac:groups=metal3.io,resources=baremetalhosts,verbs=get;list;watch;update;patch + // Actuator is responsible for performing machine reconciliation type Actuator struct { client client.Client @@ -71,6 +72,27 @@ func NewActuator(params ActuatorParams) (*Actuator, error) { // Create creates a machine and is invoked by the Machine Controller func (a *Actuator) Create(ctx context.Context, cluster *clusterv1.Cluster, machine *machinev1.Machine) error { log.Printf("Creating machine %v .", machine.Name) + + // load and validate the config + if machine.Spec.ProviderSpec.Value == nil { + return a.setError(ctx, machine, "ProviderSpec is missing") + } + config, err := configFromProviderSpec(machine.Spec.ProviderSpec) + if err != nil { + log.Printf("Error reading ProviderSpec for machine %s: %s", machine.Name, err.Error()) + return err + } + err = config.IsValid() + if err != nil { + return a.setError(ctx, machine, err.Error()) + } + + // clear an error if one was previously set + err = a.clearError(ctx, machine) + if err != nil { + return err + } + // look for associated BMH host, err := a.getHost(ctx, machine) if err != nil { @@ -92,6 +114,11 @@ func (a *Actuator) Create(ctx context.Context, cluster *clusterv1.Cluster, machi log.Printf("Machine %s already associated with host %s", machine.Name, host.Name) } + err = a.setHostSpec(ctx, host, machine, config) + if err != nil { + return err + } + err = a.ensureAnnotation(ctx, machine, host) if err != nil { return err @@ -128,6 +155,14 @@ func (a *Actuator) Delete(ctx context.Context, cluster *clusterv1.Cluster, machi // Update updates a machine and is invoked by the Machine Controller func (a *Actuator) Update(ctx context.Context, cluster *clusterv1.Cluster, machine *machinev1.Machine) error { log.Printf("Updating machine %v .", machine.Name) + + // clear any error message that was previously set. This method doesn't set + // error messages yet, so we know that it's incorrect to have one here. + err := a.clearError(ctx, machine) + if err != nil { + return err + } + host, err := a.getHost(ctx, machine) if err != nil { return err @@ -210,9 +245,7 @@ func (a *Actuator) getHost(ctx context.Context, machine *machinev1.Machine) (*bm // chooseHost iterates through known hosts and returns one that can be // associated with the machine. It searches all hosts in case one already has an -// association with this machine. It will add a Machine reference and update the -// host via the kube API before returning the host. Returns nil if a host is not -// available. +// association with this machine. func (a *Actuator) chooseHost(ctx context.Context, machine *machinev1.Machine) (*bmh.BareMetalHost, error) { // get list of BMH hosts := bmh.BareMetalHostList{} @@ -231,7 +264,7 @@ func (a *Actuator) chooseHost(ctx context.Context, machine *machinev1.Machine) ( for i, host := range hosts.Items { if host.Available() { availableHosts = append(availableHosts, &hosts.Items[i]) - } else if host.Spec.MachineRef.Name == machine.Name && host.Spec.MachineRef.Namespace == machine.Namespace { + } else if host.Spec.MachineRef != nil && host.Spec.MachineRef.Name == machine.Name && host.Spec.MachineRef.Namespace == machine.Namespace { log.Printf("found host %s with existing MachineRef", host.Name) return &hosts.Items[i], nil } @@ -244,29 +277,31 @@ func (a *Actuator) chooseHost(ctx context.Context, machine *machinev1.Machine) ( // choose a host at random from available hosts rand.Seed(time.Now().Unix()) chosenHost := availableHosts[rand.Intn(len(availableHosts))] - chosenHost.Spec.MachineRef = &corev1.ObjectReference{ + + return chosenHost, nil +} + +// setHostSpec will ensure the host's Spec is set according to the machine's +// details. It will then update the host via the kube API. If UserData does not +// include a Namespace, it will default to the Machine's namespace. +func (a *Actuator) setHostSpec(ctx context.Context, host *bmh.BareMetalHost, machine *machinev1.Machine, + config *bmv1alpha1.BareMetalMachineProviderSpec) error { + + host.Spec.MachineRef = &corev1.ObjectReference{ Name: machine.Name, Namespace: machine.Namespace, } - // FIXME(dhellmann): When we stop using the consts for these - // settings, we need to pass the right values. - chosenHost.Spec.Image = &bmh.Image{ - URL: instanceImageSource, - Checksum: instanceImageChecksumURL, - } - chosenHost.Spec.Online = true - chosenHost.Spec.UserData = &corev1.SecretReference{ - Namespace: machine.Namespace, // is it safe to assume the same namespace? - // FIXME(dhellmann): Is this name openshift-specific? - Name: "worker-user-data", + host.Spec.Image = &bmh.Image{ + URL: config.Image.URL, + Checksum: config.Image.Checksum, } - err = a.client.Update(ctx, chosenHost) - if err != nil { - return nil, err + host.Spec.Online = true + host.Spec.UserData = config.UserData + if host.Spec.UserData.Namespace == "" { + host.Spec.UserData.Namespace = machine.Namespace } - - return chosenHost, nil + return a.client.Update(ctx, host) } // ensureAnnotation makes sure the machine has an annotation that references the @@ -292,3 +327,47 @@ func (a *Actuator) ensureAnnotation(ctx context.Context, machine *machinev1.Mach machine.ObjectMeta.SetAnnotations(annotations) return a.client.Update(ctx, machine) } + +// setError sets the ErrorMessage and ErrorReason fields on the machine and logs +// the message. It assumes the reason is invalid configuration, since that is +// currently the only relevant MachineStatusError choice. +func (a *Actuator) setError(ctx context.Context, machine *machinev1.Machine, message string) error { + machine.Status.ErrorMessage = &message + reason := common.InvalidConfigurationMachineError + machine.Status.ErrorReason = &reason + log.Printf("Machine %s: %s", machine.Name, message) + return a.client.Status().Update(ctx, machine) +} + +// clearError removes the ErrorMessage from the machine's Status if set. Returns +// nil if ErrorMessage was already nil. Returns a RequeueAfterError if the +// machine was updated. +func (a *Actuator) clearError(ctx context.Context, machine *machinev1.Machine) error { + if machine.Status.ErrorMessage != nil || machine.Status.ErrorReason != nil { + machine.Status.ErrorMessage = nil + machine.Status.ErrorReason = nil + err := a.client.Status().Update(ctx, machine) + if err != nil { + return err + } + log.Printf("Cleared error message from machine %s", machine.Name) + return &clustererror.RequeueAfterError{} + } + return nil +} + +// configFromProviderSpec returns a BareMetalMachineProviderSpec by +// deserializing the contents of a ProviderSpec +func configFromProviderSpec(providerSpec machinev1.ProviderSpec) (*bmv1alpha1.BareMetalMachineProviderSpec, error) { + if providerSpec.Value == nil { + return nil, fmt.Errorf("ProviderSpec missing") + } + + var config bmv1alpha1.BareMetalMachineProviderSpec + err := yaml.UnmarshalStrict(providerSpec.Value.Raw, &config) + if err != nil { + return nil, err + } + + return &config, nil +} diff --git a/pkg/cloud/baremetal/actuators/machine/actuator_test.go b/pkg/cloud/baremetal/actuators/machine/actuator_test.go index c71b738ec..416cb3caa 100644 --- a/pkg/cloud/baremetal/actuators/machine/actuator_test.go +++ b/pkg/cloud/baremetal/actuators/machine/actuator_test.go @@ -4,8 +4,9 @@ import ( "context" "testing" - bmoapis "github.com/metalkube/baremetal-operator/pkg/apis" - bmh "github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1" + bmoapis "github.com/metal3-io/baremetal-operator/pkg/apis" + bmh "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" + bmv1alpha1 "github.com/metal3-io/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" clusterapis "github.com/openshift/cluster-api/pkg/apis" machinev1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" corev1 "k8s.io/api/core/v1" @@ -13,6 +14,14 @@ import ( "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/yaml" +) + +const ( + testImageURL = "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2" + testImageChecksumURL = "http://172.22.0.1/images/rhcos-ootpa-latest.qcow2.md5sum" + testUserDataSecretName = "worker-user-data" + testUserDataSecretNamespace = "myns" ) func TestChooseHost(t *testing.T) { @@ -55,6 +64,17 @@ func TestChooseHost(t *testing.T) { Namespace: "someotherns", }, } + discoveredHost := bmh.BareMetalHost{ + ObjectMeta: metav1.ObjectMeta{ + Name: "discoveredHost", + Namespace: "myns", + }, + Status: bmh.BareMetalHostStatus{ + ErrorMessage: "this host is discovered and not usable", + }, + } + + _, providerSpec := newConfig(t, "") testCases := []struct { Machine machinev1.Machine @@ -68,10 +88,24 @@ func TestChooseHost(t *testing.T) { Name: "machine1", Namespace: "myns", }, + Spec: machinev1.MachineSpec{ + ProviderSpec: providerSpec, + }, }, Hosts: []runtime.Object{&host2, &host1}, ExpectedHostName: host2.Name, }, + { + // should ignore discoveredHost and pick host2, which lacks a MachineRef + Machine: machinev1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Namespace: "myns", + }, + }, + Hosts: []runtime.Object{&discoveredHost, &host2, &host1}, + ExpectedHostName: host2.Name, + }, { // should pick host3, which already has a matching MachineRef Machine: machinev1.Machine{ @@ -79,6 +113,9 @@ func TestChooseHost(t *testing.T) { Name: "machine1", Namespace: "myns", }, + Spec: machinev1.MachineSpec{ + ProviderSpec: providerSpec, + }, }, Hosts: []runtime.Object{&host1, &host3, &host2}, ExpectedHostName: host3.Name, @@ -91,6 +128,9 @@ func TestChooseHost(t *testing.T) { Name: "machine2", Namespace: "myns", }, + Spec: machinev1.MachineSpec{ + ProviderSpec: providerSpec, + }, }, Hosts: []runtime.Object{&host1, &host3, &host4}, ExpectedHostName: "", @@ -116,20 +156,108 @@ func TestChooseHost(t *testing.T) { } if err != nil { t.Errorf("%v", err) - } - if result.Spec.MachineRef.Name != tc.Machine.Name { - t.Errorf("found machine ref %v", result.Spec.MachineRef) + return } if result.Name != tc.ExpectedHostName { t.Errorf("host %s chosen instead of %s", result.Name, tc.ExpectedHostName) } + } +} + +func TestSetHostSpec(t *testing.T) { + for _, tc := range []struct { + UserDataNamespace string + ExpectedUserDataNamespace string + }{ + { + UserDataNamespace: "otherns", + ExpectedUserDataNamespace: "otherns", + }, + { + UserDataNamespace: "", + ExpectedUserDataNamespace: "myns", + }, + } { + + // test data + config, providerSpec := newConfig(t, tc.UserDataNamespace) + host := bmh.BareMetalHost{ + ObjectMeta: metav1.ObjectMeta{ + Name: "host2", + Namespace: "myns", + }, + } + machine := machinev1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Namespace: "myns", + }, + Spec: machinev1.MachineSpec{ + ProviderSpec: providerSpec, + }, + } + + // test setup + scheme := runtime.NewScheme() + bmoapis.AddToScheme(scheme) + c := fakeclient.NewFakeClientWithScheme(scheme, &host) + + actuator, err := NewActuator(ActuatorParams{ + Client: c, + }) + if err != nil { + t.Errorf("%v", err) + return + } + + // run the function + err = actuator.setHostSpec(context.TODO(), &host, &machine, config) + if err != nil { + t.Errorf("%v", err) + return + } + + // get the saved result savedHost := bmh.BareMetalHost{} - err = c.Get(context.TODO(), client.ObjectKey{Name: result.Name, Namespace: result.Namespace}, &savedHost) + err = c.Get(context.TODO(), client.ObjectKey{Name: host.Name, Namespace: host.Namespace}, &savedHost) if err != nil { t.Errorf("%v", err) + return } + + // validate the result if savedHost.Spec.MachineRef == nil { - t.Errorf("machine ref %v not saved to host", result.Spec.MachineRef) + t.Errorf("MachineRef not set") + return + } + if savedHost.Spec.MachineRef.Name != machine.Name { + t.Errorf("found machine ref %v", savedHost.Spec.MachineRef) + } + if savedHost.Spec.MachineRef.Namespace != machine.Namespace { + t.Errorf("found machine ref %v", savedHost.Spec.MachineRef) + } + if savedHost.Spec.Online != true { + t.Errorf("host not set to Online") + } + if savedHost.Spec.Image == nil { + t.Errorf("Image not set") + return + } + if savedHost.Spec.Image.URL != testImageURL { + t.Errorf("expected ImageURL %s, got %s", testImageURL, savedHost.Spec.Image.URL) + } + if savedHost.Spec.Image.Checksum != testImageChecksumURL { + t.Errorf("expected ImageChecksumURL %s, got %s", testImageChecksumURL, savedHost.Spec.Image.Checksum) + } + if savedHost.Spec.UserData == nil { + t.Errorf("UserData not set") + return + } + if savedHost.Spec.UserData.Namespace != tc.ExpectedUserDataNamespace { + t.Errorf("expected Userdata.Namespace %s, got %s", tc.ExpectedUserDataNamespace, savedHost.Spec.UserData.Namespace) + } + if savedHost.Spec.UserData.Name != testUserDataSecretName { + t.Errorf("expected Userdata.Name %s, got %s", testUserDataSecretName, savedHost.Spec.UserData.Name) } } } @@ -501,3 +629,58 @@ func TestDelete(t *testing.T) { } } } + +func TestConfigFromProviderSpec(t *testing.T) { + ps := machinev1.ProviderSpec{ + Value: &runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"baremetal.cluster.k8s.io/v1alpha1","userData":{"Name":"worker-user-data","Namespace":"myns"},"image":{"Checksum":"http://172.22.0.1/images/rhcos-ootpa-latest.qcow2.md5sum","URL":"http://172.22.0.1/images/rhcos-ootpa-latest.qcow2"},"kind":"BareMetalMachineProviderSpec"}`), + }, + } + config, err := configFromProviderSpec(ps) + if err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + if config == nil { + t.Errorf("got a nil config") + return + } + + if config.Image.URL != testImageURL { + t.Errorf("expected Image.URL %s, got %s", testImageURL, config.Image.URL) + } + if config.Image.Checksum != testImageChecksumURL { + t.Errorf("expected Image.Checksum %s, got %s", testImageChecksumURL, config.Image.Checksum) + } + if config.UserData == nil { + t.Errorf("UserData not set") + return + } + if config.UserData.Name != testUserDataSecretName { + t.Errorf("expected UserData.Name %s, got %s", testUserDataSecretName, config.UserData.Name) + } + if config.UserData.Namespace != testUserDataSecretNamespace { + t.Errorf("expected UserData.Namespace %s, got %s", testUserDataSecretNamespace, config.UserData.Namespace) + } +} + +func newConfig(t *testing.T, UserDataNamespace string) (*bmv1alpha1.BareMetalMachineProviderSpec, machinev1.ProviderSpec) { + config := bmv1alpha1.BareMetalMachineProviderSpec{ + Image: bmv1alpha1.Image{ + URL: testImageURL, + Checksum: testImageChecksumURL, + }, + UserData: &corev1.SecretReference{ + Name: testUserDataSecretName, + Namespace: UserDataNamespace, + }, + } + out, err := yaml.Marshal(&config) + if err != nil { + t.Logf("could not marshal BareMetalMachineProviderSpec: %v", err) + t.FailNow() + } + return &config, machinev1.ProviderSpec{ + Value: &runtime.RawExtension{Raw: out}, + } +} diff --git a/pkg/controller/add_machine.go b/pkg/controller/add_machine.go index 116baed8a..f3abe9e18 100644 --- a/pkg/controller/add_machine.go +++ b/pkg/controller/add_machine.go @@ -17,7 +17,7 @@ limitations under the License. package controller import ( - "github.com/metalkube/cluster-api-provider-baremetal/pkg/cloud/baremetal/actuators/machine" + "github.com/metal3-io/cluster-api-provider-baremetal/pkg/cloud/baremetal/actuators/machine" capimachine "github.com/openshift/cluster-api/pkg/controller/machine" "sigs.k8s.io/controller-runtime/pkg/manager" ) diff --git a/vendor/github.com/metalkube/baremetal-operator/.gitignore b/vendor/github.com/metal3-io/baremetal-operator/.gitignore similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/.gitignore rename to vendor/github.com/metal3-io/baremetal-operator/.gitignore diff --git a/vendor/github.com/metalkube/baremetal-operator/.travis.yml b/vendor/github.com/metal3-io/baremetal-operator/.travis.yml similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/.travis.yml rename to vendor/github.com/metal3-io/baremetal-operator/.travis.yml diff --git a/vendor/github.com/metalkube/baremetal-operator/CONTRIBUTING.md b/vendor/github.com/metal3-io/baremetal-operator/CONTRIBUTING.md similarity index 80% rename from vendor/github.com/metalkube/baremetal-operator/CONTRIBUTING.md rename to vendor/github.com/metal3-io/baremetal-operator/CONTRIBUTING.md index b2b212dbe..ddff0e75b 100644 --- a/vendor/github.com/metalkube/baremetal-operator/CONTRIBUTING.md +++ b/vendor/github.com/metal3-io/baremetal-operator/CONTRIBUTING.md @@ -1,6 +1,6 @@ # How to Contribute -MetalKube projects are [Apache 2.0 licensed](LICENSE) and accept contributions via +Metal3 projects are [Apache 2.0 licensed](LICENSE) and accept contributions via GitHub pull requests. ## Certificate of Origin diff --git a/vendor/github.com/metalkube/baremetal-operator/DCO b/vendor/github.com/metal3-io/baremetal-operator/DCO similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/DCO rename to vendor/github.com/metal3-io/baremetal-operator/DCO diff --git a/vendor/github.com/metalkube/baremetal-operator/Gopkg.lock b/vendor/github.com/metal3-io/baremetal-operator/Gopkg.lock similarity index 99% rename from vendor/github.com/metalkube/baremetal-operator/Gopkg.lock rename to vendor/github.com/metal3-io/baremetal-operator/Gopkg.lock index 0b2e4d708..3abe57636 100644 --- a/vendor/github.com/metalkube/baremetal-operator/Gopkg.lock +++ b/vendor/github.com/metal3-io/baremetal-operator/Gopkg.lock @@ -203,7 +203,7 @@ [[projects]] branch = "master" - digest = "1:d76e352fd12815ad6093fee6712149803c0ac7a13d3abce04b627372d0838039" + digest = "1:d363d138539cbd0db190e08657de2f29e28077f211abcfeaad871cf537216cbe" name = "github.com/gophercloud/gophercloud" packages = [ ".", @@ -213,7 +213,7 @@ "pagination", ] pruneopts = "NT" - revision = "954aa14363ced787c28efcfcd15ae6945eb862fb" + revision = "fe629955684802399c46e2a8e9aeecc4459d2923" [[projects]] branch = "master" diff --git a/vendor/github.com/metalkube/baremetal-operator/Gopkg.toml b/vendor/github.com/metal3-io/baremetal-operator/Gopkg.toml similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/Gopkg.toml rename to vendor/github.com/metal3-io/baremetal-operator/Gopkg.toml diff --git a/vendor/github.com/metalkube/baremetal-operator/LICENSE b/vendor/github.com/metal3-io/baremetal-operator/LICENSE similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/LICENSE rename to vendor/github.com/metal3-io/baremetal-operator/LICENSE diff --git a/vendor/github.com/metalkube/baremetal-operator/Makefile b/vendor/github.com/metal3-io/baremetal-operator/Makefile similarity index 79% rename from vendor/github.com/metalkube/baremetal-operator/Makefile rename to vendor/github.com/metal3-io/baremetal-operator/Makefile index 0350d29b6..66cf5a774 100644 --- a/vendor/github.com/metalkube/baremetal-operator/Makefile +++ b/vendor/github.com/metal3-io/baremetal-operator/Makefile @@ -1,9 +1,15 @@ TEST_NAMESPACE = operator-test -RUN_NAMESPACE = metalkube +RUN_NAMESPACE = metal3 GO_TEST_FLAGS = $(VERBOSE) DEBUG = --debug SETUP = --no-setup +# Set some variables the operator expects to have in order to work +export OPERATOR_NAME=baremetal-operator +export DEPLOY_KERNEL_URL=http://172.22.0.1/images/ironic-python-agent.kernel +export DEPLOY_RAMDISK_URL=http://172.22.0.1/images/ironic-python-agent.initramfs +export IRONIC_ENDPOINT=http://localhost:6385/v1/ + .PHONY: help help: @echo "Targets:" @@ -58,15 +64,13 @@ dep: .PHONY: run run: - OPERATOR_NAME=baremetal-operator \ - operator-sdk up local \ + operator-sdk up local \ --namespace=$(RUN_NAMESPACE) \ --operator-flags="-dev" .PHONY: demo demo: - OPERATOR_NAME=baremetal-operator \ - operator-sdk up local \ + operator-sdk up local \ --namespace=$(RUN_NAMESPACE) \ --operator-flags="-dev -demo-mode" diff --git a/vendor/github.com/metalkube/baremetal-operator/README.md b/vendor/github.com/metal3-io/baremetal-operator/README.md similarity index 83% rename from vendor/github.com/metalkube/baremetal-operator/README.md rename to vendor/github.com/metal3-io/baremetal-operator/README.md index 028089a93..6969aa7c7 100644 --- a/vendor/github.com/metalkube/baremetal-operator/README.md +++ b/vendor/github.com/metal3-io/baremetal-operator/README.md @@ -3,5 +3,6 @@ Bare Metal Operator * [API documentation](docs/api.md) * [Setup Development Environment](docs/dev-setup.md) +* [Configuration](docs/configuration.md) * [Testing](docs/testing.md) * [Publishing Images](docs/publishing-images.md) diff --git a/vendor/github.com/metalkube/baremetal-operator/build/Dockerfile b/vendor/github.com/metal3-io/baremetal-operator/build/Dockerfile similarity index 53% rename from vendor/github.com/metalkube/baremetal-operator/build/Dockerfile rename to vendor/github.com/metal3-io/baremetal-operator/build/Dockerfile index 076bfa4a3..9f978e323 100644 --- a/vendor/github.com/metalkube/baremetal-operator/build/Dockerfile +++ b/vendor/github.com/metal3-io/baremetal-operator/build/Dockerfile @@ -1,11 +1,11 @@ FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 AS builder -WORKDIR /go/src/github.com/metalkube/baremetal-operator +WORKDIR /go/src/github.com/metal3-io/baremetal-operator COPY . . RUN go build -o build/_output/bin/baremetal-operator cmd/manager/main.go -FROM quay.io/metalkube/base-image +FROM quay.io/metal3-io/base-image -COPY --from=builder /go/src/github.com/metalkube/baremetal-operator/build/_output/bin/baremetal-operator / +COPY --from=builder /go/src/github.com/metal3-io/baremetal-operator/build/_output/bin/baremetal-operator / RUN if ! rpm -q genisoimage; \ then yum install -y genisoimage && \ @@ -13,5 +13,5 @@ RUN if ! rpm -q genisoimage; \ rm -rf /var/cache/yum/*; \ fi -LABEL io.k8s.display-name="MetalKube BareMetal Operator" \ - io.k8s.description="This is the image for the MetalKube BareMetal Operator." +LABEL io.k8s.display-name="Metal3 BareMetal Operator" \ + io.k8s.description="This is the image for the Metal3 BareMetal Operator." diff --git a/vendor/github.com/metalkube/baremetal-operator/cmd/make-bm-worker/main.go b/vendor/github.com/metal3-io/baremetal-operator/cmd/make-bm-worker/main.go similarity index 63% rename from vendor/github.com/metalkube/baremetal-operator/cmd/make-bm-worker/main.go rename to vendor/github.com/metal3-io/baremetal-operator/cmd/make-bm-worker/main.go index afbac4a0c..a00008aae 100644 --- a/vendor/github.com/metalkube/baremetal-operator/cmd/make-bm-worker/main.go +++ b/vendor/github.com/metal3-io/baremetal-operator/cmd/make-bm-worker/main.go @@ -20,7 +20,7 @@ data: password: {{ .EncodedPassword }} --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: {{ .Name }} @@ -29,14 +29,22 @@ spec: bmc: address: {{ .BMCAddress }} credentialsName: {{ .Name }}-bmc-secret +{{- if .WithMachine }} + machineRef: + name: {{ .Machine }} + namespace: {{ .MachineNamespace }} +{{- end }} ` // TemplateArgs holds the arguments to pass to the template. type TemplateArgs struct { - Name string - BMCAddress string - EncodedUsername string - EncodedPassword string + Name string + BMCAddress string + EncodedUsername string + EncodedPassword string + WithMachine bool + Machine string + MachineNamespace string } func encodeToSecret(input string) string { @@ -48,6 +56,10 @@ func main() { var password = flag.String("password", "", "password for BMC") var bmcAddress = flag.String("address", "", "address URL for BMC") var verbose = flag.Bool("v", false, "turn on verbose output") + var machine = flag.String( + "machine", "", "specify name of a related, existing, machine to link") + var machineNamespace = flag.String( + "machine-namespace", "", "specify namespace of a related, existing, machine to link") flag.Parse() @@ -70,10 +82,15 @@ func main() { } args := TemplateArgs{ - Name: strings.Replace(hostName, "_", "-", -1), - BMCAddress: *bmcAddress, - EncodedUsername: encodeToSecret(*username), - EncodedPassword: encodeToSecret(*password), + Name: strings.Replace(hostName, "_", "-", -1), + BMCAddress: *bmcAddress, + EncodedUsername: encodeToSecret(*username), + EncodedPassword: encodeToSecret(*password), + Machine: strings.TrimSpace(*machine), + MachineNamespace: strings.TrimSpace(*machineNamespace), + } + if args.Machine != "" { + args.WithMachine = true } if *verbose { fmt.Fprintf(os.Stderr, "%v", args) diff --git a/vendor/github.com/metalkube/baremetal-operator/cmd/make-virt-host/main.go b/vendor/github.com/metal3-io/baremetal-operator/cmd/make-virt-host/main.go similarity index 99% rename from vendor/github.com/metalkube/baremetal-operator/cmd/make-virt-host/main.go rename to vendor/github.com/metal3-io/baremetal-operator/cmd/make-virt-host/main.go index d6da7d094..a257417fa 100644 --- a/vendor/github.com/metalkube/baremetal-operator/cmd/make-virt-host/main.go +++ b/vendor/github.com/metal3-io/baremetal-operator/cmd/make-virt-host/main.go @@ -62,7 +62,7 @@ data: password: cGFzc3dvcmQ= --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: {{ .Domain }} diff --git a/vendor/github.com/metalkube/baremetal-operator/cmd/manager/main.go b/vendor/github.com/metal3-io/baremetal-operator/cmd/manager/main.go similarity index 95% rename from vendor/github.com/metalkube/baremetal-operator/cmd/manager/main.go rename to vendor/github.com/metal3-io/baremetal-operator/cmd/manager/main.go index ca9da75a1..cfa09a8cc 100644 --- a/vendor/github.com/metalkube/baremetal-operator/cmd/manager/main.go +++ b/vendor/github.com/metal3-io/baremetal-operator/cmd/manager/main.go @@ -7,8 +7,8 @@ import ( "os" "runtime" - "github.com/metalkube/baremetal-operator/pkg/apis" - "github.com/metalkube/baremetal-operator/pkg/controller" + "github.com/metal3-io/baremetal-operator/pkg/apis" + "github.com/metal3-io/baremetal-operator/pkg/controller" "github.com/operator-framework/operator-sdk/pkg/k8sutil" "github.com/operator-framework/operator-sdk/pkg/leader" sdkVersion "github.com/operator-framework/operator-sdk/version" diff --git a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/demo-hosts.yaml b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/demo-hosts.yaml similarity index 74% rename from vendor/github.com/metalkube/baremetal-operator/deploy/crds/demo-hosts.yaml rename to vendor/github.com/metal3-io/baremetal-operator/deploy/crds/demo-hosts.yaml index 74b3c0913..7f381512e 100644 --- a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/demo-hosts.yaml +++ b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/demo-hosts.yaml @@ -10,11 +10,44 @@ data: username: YWRtaW4= password: MWYyZDFlMmU2N2Rm --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: demo-discovered +--- +apiVersion: machine.openshift.io/v1beta1 +kind: Machine +metadata: + labels: + sigs.k8s.io/cluster-api-cluster: ostest + sigs.k8s.io/cluster-api-machine-role: worker + sigs.k8s.io/cluster-api-machine-type: worker + name: demo-ostest-worker + namespace: openshift-machine-api +--- +apiVersion: v1 +kind: Secret +metadata: + name: demo-externally-provisioned-secret +type: Opaque +data: + username: YWRtaW4= + password: MWYyZDFlMmU2N2Rm +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: demo-externally-provisioned +spec: + online: false + bmc: + address: ipmi://192.168.122.1:6233 + credentialsName: demo-externally-provisioned-secret + machineRef: + name: demo-ostest-worker + namespace: openshift-machine-api + --- apiVersion: v1 kind: Secret @@ -25,12 +58,12 @@ data: username: YWRtaW4= password: MWYyZDFlMmU2N2Rm --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: demo-registration-error labels: - metalkubedemo: "" + metal3demo: "" spec: online: true bmc: @@ -47,12 +80,12 @@ data: username: YWRtaW4= password: MWYyZDFlMmU2N2Rm --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: demo-registering labels: - metalkubedemo: "" + metal3demo: "" spec: online: true bmc: @@ -69,12 +102,12 @@ data: username: YWRtaW4= password: MWYyZDFlMmU2N2Rm --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: demo-ready labels: - metalkubedemo: "" + metal3demo: "" spec: online: true bmc: @@ -91,12 +124,12 @@ data: username: YWRtaW4= password: MWYyZDFlMmU2N2Rm --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: demo-inspecting labels: - metalkubedemo: "" + metal3demo: "" spec: online: true bmc: @@ -113,12 +146,12 @@ data: username: YWRtaW4= password: MWYyZDFlMmU2N2Rm --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: demo-provisioning labels: - metalkubedemo: "" + metal3demo: "" spec: online: true bmc: @@ -142,12 +175,12 @@ data: username: YWRtaW4= password: MWYyZDFlMmU2N2Rm --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: demo-provisioned labels: - metalkubedemo: "" + metal3demo: "" spec: online: true bmc: @@ -171,12 +204,12 @@ data: username: YWRtaW4= password: MWYyZDFlMmU2N2Rm --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: demo-validation-error labels: - metalkubedemo: "" + metal3demo: "" spec: online: true bmc: diff --git a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/example-host-bad-credentials.yaml b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/example-host-bad-credentials.yaml similarity index 93% rename from vendor/github.com/metalkube/baremetal-operator/deploy/crds/example-host-bad-credentials.yaml rename to vendor/github.com/metal3-io/baremetal-operator/deploy/crds/example-host-bad-credentials.yaml index 4069e7948..c495a517c 100644 --- a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/example-host-bad-credentials.yaml +++ b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/example-host-bad-credentials.yaml @@ -19,7 +19,7 @@ data: password: "" --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: example-baremetalhost diff --git a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/example-host.yaml b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/example-host.yaml similarity index 90% rename from vendor/github.com/metalkube/baremetal-operator/deploy/crds/example-host.yaml rename to vendor/github.com/metal3-io/baremetal-operator/deploy/crds/example-host.yaml index 366aef367..5551c86db 100644 --- a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/example-host.yaml +++ b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/example-host.yaml @@ -9,7 +9,7 @@ data: password: MWYyZDFlMmU2N2Rm --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: example-baremetalhost diff --git a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/metalkube_v1alpha1_baremetalhost_crd.yaml b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/metal3_v1alpha1_baremetalhost_crd.yaml similarity index 69% rename from vendor/github.com/metalkube/baremetal-operator/deploy/crds/metalkube_v1alpha1_baremetalhost_crd.yaml rename to vendor/github.com/metal3-io/baremetal-operator/deploy/crds/metal3_v1alpha1_baremetalhost_crd.yaml index cb4b55b6d..5b02d4281 100644 --- a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/metalkube_v1alpha1_baremetalhost_crd.yaml +++ b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/metal3_v1alpha1_baremetalhost_crd.yaml @@ -1,9 +1,9 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: baremetalhosts.metalkube.org + name: baremetalhosts.metal3.io spec: - group: metalkube.org + group: metal3.io names: kind: BareMetalHost listKind: BareMetalHostList @@ -23,13 +23,21 @@ spec: name: Provisioning Status type: string - JSONPath: .spec.machineRef.name - description: Machine - name: Machine using this host + description: Machine using this host + name: Machine type: string - JSONPath: .spec.bmc.address description: Address of management controler name: BMC type: string + - JSONPath: .status.hardwareProfile + description: The type of hardware detected + name: Hardware Profile + type: string + - JSONPath: .spec.online + description: Whether the host is online or not + name: Online + type: string - JSONPath: .status.errorMessage description: Most recent error name: Error diff --git a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/worker-0.yaml b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/worker-0.yaml similarity index 93% rename from vendor/github.com/metalkube/baremetal-operator/deploy/crds/worker-0.yaml rename to vendor/github.com/metal3-io/baremetal-operator/deploy/crds/worker-0.yaml index 16013b850..d1b28cf22 100644 --- a/vendor/github.com/metalkube/baremetal-operator/deploy/crds/worker-0.yaml +++ b/vendor/github.com/metal3-io/baremetal-operator/deploy/crds/worker-0.yaml @@ -9,7 +9,7 @@ data: password: cGFzc3dvcmQ= --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: worker-0 diff --git a/vendor/github.com/metalkube/baremetal-operator/deploy/operator.yaml b/vendor/github.com/metal3-io/baremetal-operator/deploy/operator.yaml similarity index 65% rename from vendor/github.com/metalkube/baremetal-operator/deploy/operator.yaml rename to vendor/github.com/metal3-io/baremetal-operator/deploy/operator.yaml index 491441268..19d1eff7b 100644 --- a/vendor/github.com/metalkube/baremetal-operator/deploy/operator.yaml +++ b/vendor/github.com/metal3-io/baremetal-operator/deploy/operator.yaml @@ -1,21 +1,21 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: metalkube-baremetal-operator + name: metal3-baremetal-operator spec: replicas: 1 selector: matchLabels: - name: metalkube-baremetal-operator + name: metal3-baremetal-operator template: metadata: labels: - name: metalkube-baremetal-operator + name: metal3-baremetal-operator spec: - serviceAccountName: metalkube-baremetal-operator + serviceAccountName: metal3-baremetal-operator containers: - name: baremetal-operator - image: quay.io/metalkube/baremetal-operator + image: quay.io/metal3-io/baremetal-operator ports: - containerPort: 60000 name: metrics @@ -33,6 +33,12 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: "baremetal-operator" + - name: DEPLOY_KERNEL_URL + value: "http://172.22.0.1/images/ironic-python-agent.kernel" + - name: DEPLOY_RAMDISK_URL + value: "http://172.22.0.1/images/ironic-python-agent.initramfs" + - name: IRONIC_ENDPOINT + value: "http://localhost:6385/v1/" # Temporary workaround to talk to an external Ironic process until Ironic is running in this pod. - name: ironic-proxy image: alpine/socat diff --git a/vendor/github.com/metalkube/baremetal-operator/deploy/role.yaml b/vendor/github.com/metal3-io/baremetal-operator/deploy/role.yaml similarity index 91% rename from vendor/github.com/metalkube/baremetal-operator/deploy/role.yaml rename to vendor/github.com/metal3-io/baremetal-operator/deploy/role.yaml index e1a1b5139..9d842135b 100644 --- a/vendor/github.com/metalkube/baremetal-operator/deploy/role.yaml +++ b/vendor/github.com/metal3-io/baremetal-operator/deploy/role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null - name: metalkube-baremetal-operator + name: metal3-baremetal-operator rules: - apiGroups: - "" @@ -39,7 +39,7 @@ rules: - get - create - apiGroups: - - metalkube.org + - metal3.io resources: - '*' verbs: diff --git a/vendor/github.com/metalkube/baremetal-operator/deploy/role_binding.yaml b/vendor/github.com/metal3-io/baremetal-operator/deploy/role_binding.yaml similarity index 59% rename from vendor/github.com/metalkube/baremetal-operator/deploy/role_binding.yaml rename to vendor/github.com/metal3-io/baremetal-operator/deploy/role_binding.yaml index dd09aec54..82c2860ee 100644 --- a/vendor/github.com/metalkube/baremetal-operator/deploy/role_binding.yaml +++ b/vendor/github.com/metal3-io/baremetal-operator/deploy/role_binding.yaml @@ -1,14 +1,14 @@ kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: metalkube-baremetal-operator - namespace: metalkube + name: metal3-baremetal-operator + namespace: metal3 subjects: - kind: ServiceAccount - name: metalkube-baremetal-operator + name: metal3-baremetal-operator - kind: User name: developer roleRef: kind: Role - name: metalkube-baremetal-operator + name: metal3-baremetal-operator apiGroup: rbac.authorization.k8s.io diff --git a/vendor/github.com/metalkube/baremetal-operator/deploy/service_account.yaml b/vendor/github.com/metal3-io/baremetal-operator/deploy/service_account.yaml similarity index 55% rename from vendor/github.com/metalkube/baremetal-operator/deploy/service_account.yaml rename to vendor/github.com/metal3-io/baremetal-operator/deploy/service_account.yaml index b215ec158..b5000fe51 100644 --- a/vendor/github.com/metalkube/baremetal-operator/deploy/service_account.yaml +++ b/vendor/github.com/metal3-io/baremetal-operator/deploy/service_account.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: metalkube-baremetal-operator + name: metal3-baremetal-operator diff --git a/vendor/github.com/metalkube/baremetal-operator/docs/BaremetalHost_ProvisioningState.dot b/vendor/github.com/metal3-io/baremetal-operator/docs/BaremetalHost_ProvisioningState.dot similarity index 90% rename from vendor/github.com/metalkube/baremetal-operator/docs/BaremetalHost_ProvisioningState.dot rename to vendor/github.com/metal3-io/baremetal-operator/docs/BaremetalHost_ProvisioningState.dot index b4a685754..656b2d036 100644 --- a/vendor/github.com/metalkube/baremetal-operator/docs/BaremetalHost_ProvisioningState.dot +++ b/vendor/github.com/metal3-io/baremetal-operator/docs/BaremetalHost_ProvisioningState.dot @@ -2,6 +2,7 @@ digraph BaremetalHost { Created [shape=house] Created -> Discovered [label="BMC.* == \"\""] Created -> Registering [label="BMC.* != \"\""] + Created -> ExternallyProvisioned [label="image.URL = '' && machineRef != nil"] Discovered [shape=doublecircle] Discovered -> Registering [label="BMC.* != \"\""] @@ -44,6 +45,9 @@ digraph BaremetalHost { Provisioned -> Deprovisioning [label="NeedsDeprovisioning()"] Provisioned -> Deprovisioning [label="!DeletionTimestamp.IsZero()"] + ExternallyProvisioned [shape=doublecircle] + ExternallyProvisioned -> Deleted [label="!DeletionTimestamp.IsZero()"] + Deprovisioning -> Provisioning [label="NeedsProvisioning()"] Deprovisioning -> Ready [label="!NeedsProvisioning()"] Deprovisioning -> Deleted [label="!DeletionTimestamp.IsZero()"] diff --git a/vendor/github.com/metal3-io/baremetal-operator/docs/BaremetalHost_ProvisioningState.png b/vendor/github.com/metal3-io/baremetal-operator/docs/BaremetalHost_ProvisioningState.png new file mode 100644 index 000000000..3a25d68ca Binary files /dev/null and b/vendor/github.com/metal3-io/baremetal-operator/docs/BaremetalHost_ProvisioningState.png differ diff --git a/vendor/github.com/metalkube/baremetal-operator/docs/api.md b/vendor/github.com/metal3-io/baremetal-operator/docs/api.md similarity index 86% rename from vendor/github.com/metalkube/baremetal-operator/docs/api.md rename to vendor/github.com/metal3-io/baremetal-operator/docs/api.md index 7801d31ea..a1d01df23 100644 --- a/vendor/github.com/metalkube/baremetal-operator/docs/api.md +++ b/vendor/github.com/metal3-io/baremetal-operator/docs/api.md @@ -48,7 +48,7 @@ data: password: cGFzc3dvcmQ= --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: openshift-worker-1 @@ -98,20 +98,20 @@ the host. *hardware.cpus.speed* -- The speed in GHz of the CPU. ``` -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: creationTimestamp: 2019-02-08T20:10:32Z finalizers: - - baremetalhost.metalkube.org + - baremetalhost.metal3.io generation: 9 labels: - metalkube.org/hardware-profile: unknown - metalkube.org/operational-status: online + metal3.io/hardware-profile: unknown + metal3.io/operational-status: online name: example-baremetalhost namespace: bmo-project resourceVersion: "1750818" - selfLink: /apis/metalkube.org/v1alpha1/namespaces/bmo-project/baremetalhosts/example-baremetalhost + selfLink: /apis/metal3.io/v1alpha1/namespaces/bmo-project/baremetalhosts/example-baremetalhost uid: 96837048-2bdd-11e9-8df7-525400f68198 spec: bmc: @@ -135,12 +135,12 @@ status: The BareMetalHost operator manages several labels with host status and settings to make it easier to find specific hosts. -*metalkube.org/hardware-profile* -- The name of the hardware profile +*metal3.io/hardware-profile* -- The name of the hardware profile that matches the hardware discovered on the host. Details about the hardware are saved to the *hardware* section of the status. If the hardware does not match a known profile, the value "unknown" is used. -*metalkube.org/operational-status* -- The status of the server. +*metal3.io/operational-status* -- The status of the server. *online* -- The server is powered on and running. @@ -158,10 +158,7 @@ Several conditions must be met in order to initiate provisioning. 1. The host `spec.image.url` field must contain a URL for a valid image file that is visible from within the cluster and from the host receiving the image. -2. The host must not have an image provisioned, as reflected by the - `status.provisioning.image.URL` field being empty. To reuse an - existing host with a different image, deprovision the host first. -3. The host must have `online` set to `true` so that the operator will +2. The host must have `online` set to `true` so that the operator will keep the host powered on. To initiate deprovisioning, clear the image URL from the host spec. diff --git a/vendor/github.com/metal3-io/baremetal-operator/docs/baremetalhost-states.md b/vendor/github.com/metal3-io/baremetal-operator/docs/baremetalhost-states.md new file mode 100644 index 000000000..ab4968fc5 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/docs/baremetalhost-states.md @@ -0,0 +1,67 @@ +# BaremetalHost Provisioning States + +The following diagram shows the possible Provisioning State transitions for the BaremetalHost object: + +![BaremetalHost ProvisioningState transitions](BaremetalHost_ProvisioningState.png) + +## Created + +Newly created hosts move immediately to Discovered or Registering. No +host stays in the Created state while the operator is working +properly. + +## Discovered + +A Discovered host is missing either the BMC address or credentials +secret name, and does not have enough information to access the BMC +for registration. + +## Externally Provisioned + +An Externally Provisioned host was deployed using another tool and +then a host object was created with a link to an existing Machine +object and without an Image setting. Hosts in this state are +monitored, and only their power status is managed. + +## Registering + +The host will stay in the Registering state while the BMC access +details are being validated. + +## Inspecting + +After the host is registered, an agent image will be booted on it +using a ramdisk. The agent collects information about the available +hardware components, and this process is called "inspection." The host +will stay in the Inspecting state until this process is completed. + +## Ready + +A host in the Ready state is available to be provisioned. + +## Provisioning + +While an image is being copied to the host and it is being configured +to run the image the host will be in the Provisioning state. + +## Provisioned + +After an image is copied to the host and the host is running the +image, it will be in the Provisioned state. + +## Deprovisioning + +When the previously provisioned image is being removed from the host, +it will be in the Deprovisioning state. + +## Error + +If an error occurs during one of the processing states (Registering, +Inspecting, Provisioning, Deprovisioning) the host will enter the +Error state. + +## Deleted + +When the host is marked to be deleted, it will move from its current +state to Deleted, at which point the resource record is deleted from +kubernetes. diff --git a/vendor/github.com/metal3-io/baremetal-operator/docs/configuration.md b/vendor/github.com/metal3-io/baremetal-operator/docs/configuration.md new file mode 100644 index 000000000..3cfe4edf7 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/docs/configuration.md @@ -0,0 +1,14 @@ +Configuration Settings +====================== + +The operator supports several configuration options for controlling +its interaction with Ironic. + +`DEPLOY_RAMDISK_URL` -- The URL for the ramdisk of the image +containing the Ironic agent. + +`DEPLOY_KERNEL_URL` -- The URL for the kernel to go with the deploy +ramdisk. + +`IRONIC_ENDPOINT` -- The URL for the operator to use when talking to +Ironic. diff --git a/vendor/github.com/metalkube/baremetal-operator/docs/dev-setup.md b/vendor/github.com/metal3-io/baremetal-operator/docs/dev-setup.md similarity index 82% rename from vendor/github.com/metalkube/baremetal-operator/docs/dev-setup.md rename to vendor/github.com/metal3-io/baremetal-operator/docs/dev-setup.md index 92d4c1a06..2704f0889 100644 --- a/vendor/github.com/metalkube/baremetal-operator/docs/dev-setup.md +++ b/vendor/github.com/metal3-io/baremetal-operator/docs/dev-setup.md @@ -16,28 +16,28 @@ install the operator-sdk tools. 3. Create a namespace to host the operator ``` - kubectl create namespace metalkube + kubectl create namespace metal3 ``` 4. Install operator-sdk ``` eval $(go env) - mkdir -p $GOPATH/src/github.com/metalkube - cd $GOPATH/src/github.com/metalkube - git clone https://github.com/metalkube/baremetal-operator.git + mkdir -p $GOPATH/src/github.com/metal3-io + cd $GOPATH/src/github.com/metal3-io + git clone https://github.com/metal3-io/baremetal-operator.git cd baremetal-operator kubectl apply -f deploy/service_account.yaml kubectl apply -f deploy/role.yaml kubectl apply -f deploy/role_binding.yaml - kubectl apply -f deploy/crds/metalkube_v1alpha1_baremetalhost_crd.yaml + kubectl apply -f deploy/crds/metal3_v1alpha1_baremetalhost_crd.yaml ``` 5. Launch the operator locally ``` export OPERATOR_NAME=baremetal-operator - operator-sdk up local --namespace=metalkube + operator-sdk up local --namespace=metal3 ``` 6. Create the CR @@ -57,6 +57,17 @@ operator when launching it. operator-sdk up local --operator-flags "-test-mode" ``` +## Running a local instance of Ironic + +There is a script available that will run a set of containers locally using +`podman` to stand up Ironic for development and testing. + +See `tools/run_local_ironic.sh`. + +Note that this script may need customizations to some of the `podman run` +commands, to include environment variables that configure the containers for +your environment. + ## Using libvirt VMs with Ironic In order to use VMs as hosts, they need to be connected to [vbmc](https://docs.openstack.org/tripleo-docs/latest/install/environments/virtualbmc.html) and @@ -66,7 +77,7 @@ network interface that will PXE boot. For example: ```yaml -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: worker-0 @@ -96,7 +107,7 @@ data: password: cGFzc3dvcmQ= --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: openshift-worker-1 @@ -131,7 +142,7 @@ data: password: cGFzc3dvcmQ= --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: openshift-master-1 @@ -164,7 +175,7 @@ data: password: cGFzc3dvcmQ= --- -apiVersion: metalkube.org/v1alpha1 +apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: worker-99 diff --git a/vendor/github.com/metalkube/baremetal-operator/docs/publishing-images.md b/vendor/github.com/metal3-io/baremetal-operator/docs/publishing-images.md similarity index 94% rename from vendor/github.com/metalkube/baremetal-operator/docs/publishing-images.md rename to vendor/github.com/metal3-io/baremetal-operator/docs/publishing-images.md index 7579dd354..d56f62b17 100644 --- a/vendor/github.com/metalkube/baremetal-operator/docs/publishing-images.md +++ b/vendor/github.com/metal3-io/baremetal-operator/docs/publishing-images.md @@ -2,12 +2,12 @@ Publishing Images ================= Images for changes merged into master are automatically built through -the [MetalKube org on -quay.io](https://quay.io/repository/metalkube/baremetal-operator). It +the [metal3-io org on +quay.io](https://quay.io/repository/metal3-io/baremetal-operator). It is also easy to set up your own builds to test images from branches in your development fork. -1. Fork `metalkube/baremetal-operator` on GitHub. +1. Fork `metal3-io/baremetal-operator` on GitHub. 2. Set up your account on [quay.io](https://quay.io). 3. Link your repository from step 1 to quay.io by following the instructions to "Create New Repository" from @@ -49,7 +49,7 @@ your development fork. build because the UI seems to cache pretty aggressively. 5. Create a dev deployment file that uses your image instead of the - one from the metalkube organization. + one from the metal3-io organization. 1. Copy `deploy/operator.yaml` to `deploy/dev-operator.yaml`. 2. Edit `deploy/dev-operator.yaml` and change the `image` setting diff --git a/vendor/github.com/metalkube/baremetal-operator/docs/testing.md b/vendor/github.com/metal3-io/baremetal-operator/docs/testing.md similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/docs/testing.md rename to vendor/github.com/metal3-io/baremetal-operator/docs/testing.md diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/addtoscheme_metalkube_v1alpha1.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/addtoscheme_metal3_v1alpha1.go similarity index 75% rename from vendor/github.com/metalkube/baremetal-operator/pkg/apis/addtoscheme_metalkube_v1alpha1.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/apis/addtoscheme_metal3_v1alpha1.go index 2c1f474f2..085e3f214 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/addtoscheme_metalkube_v1alpha1.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/addtoscheme_metal3_v1alpha1.go @@ -1,7 +1,7 @@ package apis import ( - "github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1" + "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" ) func init() { diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/apis.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/apis.go similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/pkg/apis/apis.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/apis/apis.go diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/baremetalhost_types.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/baremetalhost_types.go similarity index 94% rename from vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/baremetalhost_types.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/baremetalhost_types.go index 9de57e3a7..4a78d2bbd 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/baremetalhost_types.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/baremetalhost_types.go @@ -15,7 +15,7 @@ const ( // BareMetalHostFinalizer is the name of the finalizer added to // hosts to block delete operations until the physical host can be // deprovisioned. - BareMetalHostFinalizer string = "baremetalhost.metalkube.org" + BareMetalHostFinalizer string = "baremetalhost.metal3.io" ) // OperationalStatus represents the state of the host @@ -74,6 +74,10 @@ const ( // disk(s) StateProvisioned ProvisioningState = "provisioned" + // StateExternallyProvisioned means something else is managing the + // image on the host + StateExternallyProvisioned ProvisioningState = "externally provisioned" + // StateDeprovisioning means we are removing an image from the // host's disk(s) StateDeprovisioning ProvisioningState = "deprovisioning" @@ -114,6 +118,11 @@ type BareMetalHostSpec struct { // How do we connect to the BMC? BMC BMCDetails `json:"bmc"` + // What is the name of the hardware profile for this host? It + // should only be necessary to set this when inspection cannot + // automatically determine the profile. + HardwareProfile string `json:"hardwareProfile"` + // Which MAC address will PXE boot? This is optional for some // types, but required for libvirt VMs driven by vbmc. BootMACAddress string `json:"bootMACAddress"` @@ -424,8 +433,6 @@ func (host *BareMetalHost) NeedsProvisioning() bool { // We have an image set, but not provisioned. return true } - // FIXME(dhellmann): Compare the provisioned image against the one - // we are supposed to have to make sure they match. return false } @@ -439,6 +446,15 @@ func (host *BareMetalHost) WasProvisioned() bool { return false } +// WasExternallyProvisioned returns true when we think something else +// is managing the image running on the host. +func (host *BareMetalHost) WasExternallyProvisioned() bool { + if host.Spec.Image == nil && host.Spec.MachineRef != nil { + return true + } + return false +} + // NeedsDeprovisioning compares the settings with the provisioning // status and returns true when the host should be deprovisioned. func (host *BareMetalHost) NeedsDeprovisioning() bool { @@ -448,7 +464,7 @@ func (host *BareMetalHost) NeedsDeprovisioning() bool { if host.Spec.Image == nil { return true } - if host.Spec.Image.URL == "" { + if host.Spec.Image.URL != host.Status.Provisioning.Image.URL { return true } return false @@ -484,13 +500,13 @@ func (host *BareMetalHost) NewEvent(reason, message string) corev1.Event { Reason: reason, Message: message, Source: corev1.EventSource{ - Component: "metalkube-baremetal-controller", + Component: "metal3-baremetal-controller", }, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: corev1.EventTypeNormal, - ReportingController: "metalkube.org/baremetal-controller", + ReportingController: "metal3.io/baremetal-controller", Related: host.Spec.MachineRef, } } diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/doc.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/doc.go similarity index 69% rename from vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/doc.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/doc.go index 859075d19..8e47c2938 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/doc.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/doc.go @@ -1,4 +1,4 @@ -// Package v1alpha1 contains API Schema definitions for the metalkube v1alpha1 API group +// Package v1alpha1 contains API Schema definitions for the metal3 v1alpha1 API group // +k8s:deepcopy-gen=package,register -// +groupName=metalkube.org +// +groupName=metal3.io package v1alpha1 diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/register.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/register.go similarity index 77% rename from vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/register.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/register.go index ddc3ddc3c..f50d5a53a 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/register.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/register.go @@ -1,8 +1,8 @@ // NOTE: Boilerplate only. Ignore this file. -// Package v1alpha1 contains API Schema definitions for the metalkube v1alpha1 API group +// Package v1alpha1 contains API Schema definitions for the metal3 v1alpha1 API group // +k8s:deepcopy-gen=package,register -// +groupName=metalkube.org +// +groupName=metal3.io package v1alpha1 import ( @@ -12,7 +12,7 @@ import ( var ( // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "metalkube.org", Version: "v1alpha1"} + SchemeGroupVersion = schema.GroupVersion{Group: "metal3.io", Version: "v1alpha1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/zz_generated.deepcopy.go similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/zz_generated.deepcopy.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/zz_generated.deepcopy.go diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/zz_generated.defaults.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/zz_generated.defaults.go similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1/zz_generated.defaults.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/zz_generated.defaults.go diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/bmc/access.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/access.go similarity index 91% rename from vendor/github.com/metalkube/baremetal-operator/pkg/bmc/access.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/access.go index 37506ca7a..f6b0e5561 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/bmc/access.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/access.go @@ -1,7 +1,6 @@ package bmc import ( - "fmt" "net" "net/url" "strings" @@ -34,18 +33,6 @@ type AccessDetails interface { DriverInfo(bmcCreds Credentials) map[string]interface{} } -// UnknownBMCTypeError is returned when the provided BMC address cannot be -// mapped to a driver. -type UnknownBMCTypeError struct { - address string - bmcType string -} - -func (e UnknownBMCTypeError) Error() string { - return fmt.Sprintf("Unknown BMC type '%s' for address %s", - e.bmcType, e.address) -} - func getTypeHostPort(address string) (bmcType, host, port, path string, err error) { // Start by assuming "type://host:port" parsedURL, err := url.Parse(address) diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/credentials.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/credentials.go new file mode 100644 index 000000000..cbf28a50f --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/credentials.go @@ -0,0 +1,18 @@ +package bmc + +// Credentials holds the information for authenticating with the BMC. +type Credentials struct { + Username string + Password string +} + +// Validate returns an error if the credentials are invalid +func (creds Credentials) Validate() error { + if creds.Username == "" { + return &CredentialsValidationError{message: "Missing BMC connection detail 'username' in credentials"} + } + if creds.Password == "" { + return &CredentialsValidationError{message: "Missing BMC connection details 'password' in credentials"} + } + return nil +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/errors.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/errors.go new file mode 100644 index 000000000..e018fa66b --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/errors.go @@ -0,0 +1,28 @@ +package bmc + +import ( + "fmt" +) + +// UnknownBMCTypeError is returned when the provided BMC address cannot be +// mapped to a driver. +type UnknownBMCTypeError struct { + address string + bmcType string +} + +func (e UnknownBMCTypeError) Error() string { + return fmt.Sprintf("Unknown BMC type '%s' for address %s", + e.bmcType, e.address) +} + +// CredentialsValidationError is returned when the provided BMC credentials +// are invalid (e.g. null) +type CredentialsValidationError struct { + message string +} + +func (e CredentialsValidationError) Error() string { + return fmt.Sprintf("Validation error with BMC credentials: %s", + e.message) +} diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/bmc/idrac.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/idrac.go similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/pkg/bmc/idrac.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/idrac.go diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/bmc/ipmi.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/ipmi.go similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/pkg/bmc/ipmi.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/bmc/ipmi.go diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/controller/add_baremetalhost.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/controller/add_baremetalhost.go similarity index 77% rename from vendor/github.com/metalkube/baremetal-operator/pkg/controller/add_baremetalhost.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/controller/add_baremetalhost.go index 0bc93c8f5..fe56b37ca 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/controller/add_baremetalhost.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/controller/add_baremetalhost.go @@ -1,7 +1,7 @@ package controller import ( - "github.com/metalkube/baremetal-operator/pkg/controller/baremetalhost" + "github.com/metal3-io/baremetal-operator/pkg/controller/baremetalhost" ) func init() { diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/controller/baremetalhost/baremetalhost_controller.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/controller/baremetalhost/baremetalhost_controller.go similarity index 67% rename from vendor/github.com/metalkube/baremetal-operator/pkg/controller/baremetalhost/baremetalhost_controller.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/controller/baremetalhost/baremetalhost_controller.go index 941914f5b..623bc1aaf 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/controller/baremetalhost/baremetalhost_controller.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/controller/baremetalhost/baremetalhost_controller.go @@ -4,17 +4,19 @@ import ( "context" "flag" "fmt" + "strings" "time" "github.com/pkg/errors" - metalkubev1alpha1 "github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1" - "github.com/metalkube/baremetal-operator/pkg/bmc" - "github.com/metalkube/baremetal-operator/pkg/provisioner" - "github.com/metalkube/baremetal-operator/pkg/provisioner/demo" - "github.com/metalkube/baremetal-operator/pkg/provisioner/fixture" - "github.com/metalkube/baremetal-operator/pkg/provisioner/ironic" - "github.com/metalkube/baremetal-operator/pkg/utils" + metal3v1alpha1 "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" + "github.com/metal3-io/baremetal-operator/pkg/bmc" + "github.com/metal3-io/baremetal-operator/pkg/hardware" + "github.com/metal3-io/baremetal-operator/pkg/provisioner" + "github.com/metal3-io/baremetal-operator/pkg/provisioner/demo" + "github.com/metal3-io/baremetal-operator/pkg/provisioner/fixture" + "github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic" + "github.com/metal3-io/baremetal-operator/pkg/utils" "github.com/go-logr/logr" @@ -80,14 +82,14 @@ func newReconciler(mgr manager.Manager) reconcile.Reconciler { // add adds a new Controller to mgr with r as the reconcile.Reconciler func add(mgr manager.Manager, r reconcile.Reconciler) error { // Create a new controller - c, err := controller.New("metalkube-baremetalhost-controller", mgr, + c, err := controller.New("metal3-baremetalhost-controller", mgr, controller.Options{Reconciler: r}) if err != nil { return err } // Watch for changes to primary resource BareMetalHost - err = c.Watch(&source.Kind{Type: &metalkubev1alpha1.BareMetalHost{}}, + err = c.Watch(&source.Kind{Type: &metal3v1alpha1.BareMetalHost{}}, &handler.EnqueueRequestForObject{}) if err != nil { return err @@ -97,7 +99,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { err = c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{ IsController: true, - OwnerType: &metalkubev1alpha1.BareMetalHost{}, + OwnerType: &metal3v1alpha1.BareMetalHost{}, }) return err } @@ -117,7 +119,7 @@ type ReconcileBareMetalHost struct { // hold them in a context type reconcileInfo struct { log logr.Logger - host *metalkubev1alpha1.BareMetalHost + host *metal3v1alpha1.BareMetalHost request reconcile.Request bmcCredsSecret *corev1.Secret events []corev1.Event @@ -157,7 +159,7 @@ func (r *ReconcileBareMetalHost) Reconcile(request reconcile.Request) (result re reqLogger.Info("Reconciling BareMetalHost") // Fetch the BareMetalHost - host := &metalkubev1alpha1.BareMetalHost{} + host := &metal3v1alpha1.BareMetalHost{} err = r.client.Get(context.TODO(), request.NamespacedName, host) if err != nil { if k8serrors.IsNotFound(err) { @@ -181,10 +183,10 @@ func (r *ReconcileBareMetalHost) Reconcile(request reconcile.Request) (result re reqLogger.Info( "adding finalizer", "existingFinalizers", host.Finalizers, - "newValue", metalkubev1alpha1.BareMetalHostFinalizer, + "newValue", metal3v1alpha1.BareMetalHostFinalizer, ) host.Finalizers = append(host.Finalizers, - metalkubev1alpha1.BareMetalHostFinalizer) + metal3v1alpha1.BareMetalHostFinalizer) err := r.client.Update(context.TODO(), host) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") @@ -198,78 +200,92 @@ func (r *ReconcileBareMetalHost) Reconcile(request reconcile.Request) (result re return result, err } - // Check for a "discovered" host vs. one that we have all the info for. - if host.Spec.BMC.Address == "" { - reqLogger.Info(bmc.MissingAddressMsg) - dirty := host.SetOperationalStatus(metalkubev1alpha1.OperationalStatusDiscovered) - if dirty { - err = r.saveStatus(host) - if err != nil { + // Retrieve the BMC details from the host spec and validate host + // BMC details and build the credentials for talking to the + // management controller. + bmcCreds, bmcCredsSecret, err := r.buildAndValidateBMCCredentials(request, host) + if err != nil { + switch err.(type) { + // We treat an empty bmc address and empty bmc credentials fields as a + // trigger the host needs to be put into a discovered status. We also set + // an error message (but not an error state) on the host so we can understand + // what we may be waiting on. Editing the host to set these values will + // cause the host to be reconciled again so we do not Requeue. + case *EmptyBMCAddressError, *EmptyBMCSecretError: + dirty := host.SetOperationalStatus(metal3v1alpha1.OperationalStatusDiscovered) + if dirty { + // Set the host error message directly + // as we cannot use SetErrorCondition which + // overwrites our discovered state + host.Status.ErrorMessage = err.Error() + saveErr := r.saveStatus(host) + if saveErr != nil { + return reconcile.Result{Requeue: true}, saveErr + } // Only publish the event if we do not have an error // after saving so that we only publish one time. r.publishEvent(request, - host.NewEvent("Discovered", "Discovered host without BMC address")) + host.NewEvent("Discovered", fmt.Sprintf("Discovered host with unusable BMC details: %s", err.Error()))) } - // Without the address we can't do any more so we return here - // without checking for an error. - return reconcile.Result{Requeue: true}, err - } - reqLogger.Info("nothing to do for discovered host without BMC address") - return reconcile.Result{}, nil - } - if host.Spec.BMC.CredentialsName == "" { - reqLogger.Info(bmc.MissingCredentialsMsg) - dirty := host.SetOperationalStatus(metalkubev1alpha1.OperationalStatusDiscovered) - if dirty { - err = r.saveStatus(host) - if err != nil { - // Only publish the event if we do not have an error - // after saving so that we only publish one time. - r.publishEvent(request, - host.NewEvent("Discovered", "Discovered host without BMC credentials")) + return reconcile.Result{}, nil + // In the event a credential secret is defined, but we cannot find it + // we requeue the host as we will not know if they create the secret + // at some point in the future. + case *ResolveBMCSecretRefError: + saveErr := r.setErrorCondition(request, host, err.Error()) + if saveErr != nil { + return reconcile.Result{Requeue: true}, saveErr + } + // Only publish the event if we do not have an error + // after saving so that we only publish one time. + r.publishEvent(request, host.NewEvent("BMCCredentialError", err.Error())) + return reconcile.Result{Requeue: true, RequeueAfter: hostErrorRetryDelay}, nil + // If we have found the secret but it is missing the required fields + // or the BMC address is defined but malformed we set the + // host into an error state but we do not Requeue it + // as fixing the secret or the host BMC info will trigger + // the host to be reconciled again + case *bmc.CredentialsValidationError, *bmc.UnknownBMCTypeError: + saveErr := r.setErrorCondition(request, host, err.Error()) + if saveErr != nil { + return reconcile.Result{Requeue: true}, saveErr } - // Without any credentials we can't do any more so we return - // here without checking for an error. - return reconcile.Result{Requeue: true}, err + // Only publish the event if we do not have an error + // after saving so that we only publish one time. + r.publishEvent(request, host.NewEvent("BMCCredentialError", err.Error())) + return reconcile.Result{}, nil + default: + return reconcile.Result{}, errors.Wrap(err, "An unhandled failure occurred with the BMC secret") } - reqLogger.Info("nothing to do for discovered host without BMC credentials") - return reconcile.Result{}, nil - } - - // Load the credentials for talking to the management controller. - bmcCreds, bmcCredsSecret, err := r.getValidBMCCredentials(request, host) - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "BMC credentials are invalid") - } - if bmcCreds == nil { - // We do not have valid credentials, but did not encounter a - // retriable error in determining that. Reconciliation is - // complete until something about the secrets change. - return reconcile.Result{}, nil } // Pick the action to perform - var actionName metalkubev1alpha1.ProvisioningState + var actionName metal3v1alpha1.ProvisioningState switch { + case host.WasExternallyProvisioned(): + actionName = metal3v1alpha1.StateExternallyProvisioned case host.CredentialsNeedValidation(*bmcCredsSecret): - actionName = metalkubev1alpha1.StateRegistering + actionName = metal3v1alpha1.StateRegistering case host.NeedsHardwareInspection(): - actionName = metalkubev1alpha1.StateInspecting - case host.HardwareProfile() == "": - actionName = metalkubev1alpha1.StateMatchProfile + actionName = metal3v1alpha1.StateInspecting + case host.NeedsHardwareProfile(): + actionName = metal3v1alpha1.StateMatchProfile case host.NeedsProvisioning(): - actionName = metalkubev1alpha1.StateProvisioning + actionName = metal3v1alpha1.StateProvisioning case host.NeedsDeprovisioning(): - actionName = metalkubev1alpha1.StateDeprovisioning + actionName = metal3v1alpha1.StateDeprovisioning case host.WasProvisioned(): - actionName = metalkubev1alpha1.StateProvisioned + actionName = metal3v1alpha1.StateProvisioned default: - actionName = metalkubev1alpha1.StateReady + actionName = metal3v1alpha1.StateReady } if actionName != host.Status.Provisioning.State { + reqLogger.Info("changing provisioning state", + "old", host.Status.Provisioning.State, + "new", actionName, + ) host.Status.Provisioning.State = actionName - reqLogger.Info(fmt.Sprintf("setting provisioning state to %q", actionName)) if err := r.saveStatus(host); err != nil { return reconcile.Result{}, errors.Wrap(err, fmt.Sprintf("failed to save host status after handling %q", actionName)) @@ -278,7 +294,7 @@ func (r *ReconcileBareMetalHost) Reconcile(request reconcile.Request) (result re } info := &reconcileInfo{ - log: reqLogger.WithValues("actionName", actionName), + log: reqLogger.WithValues("provisioningState", actionName), host: host, request: request, bmcCredsSecret: bmcCredsSecret, @@ -289,19 +305,21 @@ func (r *ReconcileBareMetalHost) Reconcile(request reconcile.Request) (result re } switch actionName { - case metalkubev1alpha1.StateRegistering: + case metal3v1alpha1.StateRegistering: result, err = r.actionRegistering(prov, info) - case metalkubev1alpha1.StateInspecting: + case metal3v1alpha1.StateInspecting: result, err = r.actionInspecting(prov, info) - case metalkubev1alpha1.StateMatchProfile: + case metal3v1alpha1.StateMatchProfile: result, err = r.actionMatchProfile(prov, info) - case metalkubev1alpha1.StateProvisioning: + case metal3v1alpha1.StateProvisioning: result, err = r.actionProvisioning(prov, info) - case metalkubev1alpha1.StateDeprovisioning: + case metal3v1alpha1.StateDeprovisioning: result, err = r.actionDeprovisioning(prov, info) - case metalkubev1alpha1.StateProvisioned: + case metal3v1alpha1.StateProvisioned: + result, err = r.actionManageHostPower(prov, info) + case metal3v1alpha1.StateReady: result, err = r.actionManageHostPower(prov, info) - case metalkubev1alpha1.StateReady: + case metal3v1alpha1.StateExternallyProvisioned: result, err = r.actionManageHostPower(prov, info) default: // Probably a provisioning error state? @@ -334,7 +352,7 @@ func (r *ReconcileBareMetalHost) Reconcile(request reconcile.Request) (result re // We have tried to do something that failed in a way we // assume is not retryable, so do not proceed to any other // steps. - info.log.Info("stopping on host error") + info.log.Info("stopping on host error", "message", host.Status.ErrorMessage) return reconcile.Result{}, nil } @@ -346,7 +364,7 @@ func (r *ReconcileBareMetalHost) Reconcile(request reconcile.Request) (result re } // Handle all delete cases -func (r *ReconcileBareMetalHost) deleteHost(request reconcile.Request, host *metalkubev1alpha1.BareMetalHost) (result reconcile.Result, err error) { +func (r *ReconcileBareMetalHost) deleteHost(request reconcile.Request, host *metal3v1alpha1.BareMetalHost) (result reconcile.Result, err error) { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) @@ -357,18 +375,18 @@ func (r *ReconcileBareMetalHost) deleteHost(request reconcile.Request, host *met ) // no-op if finalizer has been removed. - if !utils.StringInList(host.Finalizers, metalkubev1alpha1.BareMetalHostFinalizer) { + if !utils.StringInList(host.Finalizers, metal3v1alpha1.BareMetalHostFinalizer) { reqLogger.Info("ready to be deleted") // There is nothing to save and no reason to requeue since we // are being deleted. return reconcile.Result{}, nil } - bmcCreds, _, err := r.getValidBMCCredentials(request, host) - // We ignore the error, because we are deleting this host anyway. - if bmcCreds == nil { - // There are no valid credentials, so create an empty - // credentials object to give to the provisioner. + // Retrieve the BMC secret from Kubernetes for this host and + // try and build credentials. If we fail, resort to an empty + // credentials object to give the provisioner + bmcCreds, _, err := r.buildAndValidateBMCCredentials(request, host) + if err != nil || bmcCreds == nil { bmcCreds = &bmc.Credentials{} } @@ -398,7 +416,7 @@ func (r *ReconcileBareMetalHost) deleteHost(request reconcile.Request, host *met // Remove finalizer to allow deletion host.Finalizers = utils.FilterStringFromList( - host.Finalizers, metalkubev1alpha1.BareMetalHostFinalizer) + host.Finalizers, metal3v1alpha1.BareMetalHostFinalizer) reqLogger.Info("cleanup is complete, removed finalizer", "remaining", host.Finalizers) if err := r.client.Update(context.Background(), host); err != nil { @@ -420,9 +438,11 @@ func (r *ReconcileBareMetalHost) actionRegistering(prov provisioner.Provisioner, } if provResult.ErrorMessage != "" { - info.host.Status.Provisioning.State = metalkubev1alpha1.StateRegistrationError - info.host.SetErrorMessage(provResult.ErrorMessage) - info.publishEvent("RegistrationError", provResult.ErrorMessage) + info.host.Status.Provisioning.State = metal3v1alpha1.StateRegistrationError + if info.host.SetErrorMessage(provResult.ErrorMessage) { + info.publishEvent("RegistrationError", provResult.ErrorMessage) + result.Requeue = true + } return result, nil } @@ -460,7 +480,7 @@ func (r *ReconcileBareMetalHost) actionInspecting(prov provisioner.Provisioner, } if provResult.ErrorMessage != "" { - info.host.Status.Provisioning.State = metalkubev1alpha1.StateRegistrationError + info.host.Status.Provisioning.State = metal3v1alpha1.StateRegistrationError info.host.SetErrorMessage(provResult.ErrorMessage) info.publishEvent("RegistrationError", provResult.ErrorMessage) return result, nil @@ -487,8 +507,39 @@ func (r *ReconcileBareMetalHost) actionInspecting(prov provisioner.Provisioner, func (r *ReconcileBareMetalHost) actionMatchProfile(prov provisioner.Provisioner, info *reconcileInfo) (result reconcile.Result, err error) { - // FIXME(dhellmann): Insert logic to match hardware profiles here. - hardwareProfile := "unknown" + var hardwareProfile string + + info.log.Info("determining hardware profile") + + // Start by looking for an override value from the user + if info.host.Spec.HardwareProfile != "" { + info.log.Info("using spec value for profile name", + "name", info.host.Spec.HardwareProfile) + hardwareProfile = info.host.Spec.HardwareProfile + _, err = hardware.GetProfile(hardwareProfile) + if err != nil { + info.log.Info("invalid hardware profile", "profile", hardwareProfile) + return result, err + } + } + + // Now do a bit of matching. + // + // FIXME(dhellmann): Insert more robust logic to match + // hardware profiles here. + if hardwareProfile == "" { + if strings.HasPrefix(info.host.Spec.BMC.Address, "libvirt") { + hardwareProfile = "libvirt" + info.log.Info("determining from BMC address", "name", hardwareProfile) + } + } + + // Now default to a value just in case there is no match + if hardwareProfile == "" { + hardwareProfile = hardware.DefaultProfileName + info.log.Info("using the default", "name", hardwareProfile) + } + if info.host.SetHardwareProfile(hardwareProfile) { info.log.Info("updating hardware profile", "profile", hardwareProfile) info.publishEvent("ProfileSet", fmt.Sprintf("Hardware profile set: %s", hardwareProfile)) @@ -508,6 +559,10 @@ func (r *ReconcileBareMetalHost) actionProvisioning(prov provisioner.Provisioner var provResult provisioner.Result getUserData := func() (string, error) { + if info.host.Spec.UserData == nil { + info.log.Info("no user data for host") + return "", nil + } info.log.Info("fetching user data before provisioning") userDataSecret := &corev1.Secret{} key := types.NamespacedName{ @@ -531,7 +586,7 @@ func (r *ReconcileBareMetalHost) actionProvisioning(prov provisioner.Provisioner if provResult.ErrorMessage != "" { info.log.Info("handling provisioning error in controller") - info.host.Status.Provisioning.State = metalkubev1alpha1.StateProvisioningError + info.host.Status.Provisioning.State = metal3v1alpha1.StateProvisioningError if info.host.SetErrorMessage(provResult.ErrorMessage) { info.publishEvent("ProvisioningError", provResult.ErrorMessage) result.Requeue = true @@ -572,7 +627,7 @@ func (r *ReconcileBareMetalHost) actionDeprovisioning(prov provisioner.Provision } if provResult.ErrorMessage != "" { - info.host.Status.Provisioning.State = metalkubev1alpha1.StateProvisioningError + info.host.Status.Provisioning.State = metal3v1alpha1.StateProvisioningError if info.host.SetErrorMessage(provResult.ErrorMessage) { info.publishEvent("ProvisioningError", provResult.ErrorMessage) result.Requeue = true @@ -589,7 +644,7 @@ func (r *ReconcileBareMetalHost) actionDeprovisioning(prov provisioner.Provision // After the provisioner is done, clear the image settings so we // transition to the next state. - info.host.Status.Provisioning.Image = metalkubev1alpha1.Image{} + info.host.Status.Provisioning.Image = metal3v1alpha1.Image{} // After deprovisioning we always requeue to ensure we enter the // "ready" state and start monitoring power status. @@ -608,7 +663,7 @@ func (r *ReconcileBareMetalHost) actionManageHostPower(prov provisioner.Provisio } if provResult.ErrorMessage != "" { - info.host.Status.Provisioning.State = metalkubev1alpha1.StatePowerManagementError + info.host.Status.Provisioning.State = metal3v1alpha1.StatePowerManagementError if info.host.SetErrorMessage(provResult.ErrorMessage) { info.publishEvent("PowerManagementError", provResult.ErrorMessage) result.Requeue = true @@ -646,7 +701,7 @@ func (r *ReconcileBareMetalHost) actionManageHostPower(prov provisioner.Provisio } if provResult.ErrorMessage != "" { - info.host.Status.Provisioning.State = metalkubev1alpha1.StatePowerManagementError + info.host.Status.Provisioning.State = metal3v1alpha1.StatePowerManagementError if info.host.SetErrorMessage(provResult.ErrorMessage) { info.publishEvent("PowerManagementError", provResult.ErrorMessage) result.Requeue = true @@ -671,13 +726,13 @@ func (r *ReconcileBareMetalHost) actionManageHostPower(prov provisioner.Provisio } -func (r *ReconcileBareMetalHost) saveStatus(host *metalkubev1alpha1.BareMetalHost) error { +func (r *ReconcileBareMetalHost) saveStatus(host *metal3v1alpha1.BareMetalHost) error { t := metav1.Now() host.Status.LastUpdated = &t return r.client.Status().Update(context.TODO(), host) } -func (r *ReconcileBareMetalHost) setErrorCondition(request reconcile.Request, host *metalkubev1alpha1.BareMetalHost, message string) error { +func (r *ReconcileBareMetalHost) setErrorCondition(request reconcile.Request, host *metal3v1alpha1.BareMetalHost, message string) error { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) @@ -694,53 +749,75 @@ func (r *ReconcileBareMetalHost) setErrorCondition(request reconcile.Request, ho return nil } -// Make sure the credentials for the management controller look -// right. This does not actually try to use the credentials. -func (r *ReconcileBareMetalHost) getValidBMCCredentials(request reconcile.Request, host *metalkubev1alpha1.BareMetalHost) (bmcCreds *bmc.Credentials, bmcCredsSecret *corev1.Secret, err error) { - reqLogger := log.WithValues("Request.Namespace", - request.Namespace, "Request.Name", request.Name) +// Retrieve the secret containing the credentials for talking to the BMC. +func (r *ReconcileBareMetalHost) getBMCSecretAndSetOwner(request reconcile.Request, host *metal3v1alpha1.BareMetalHost) (bmcCredsSecret *corev1.Secret, err error) { - // Load the secret containing the credentials for talking to the - // BMC. This assumes we have a reference to the secret, otherwise - // Reconcile() should not have let us be called. + if host.Spec.BMC.CredentialsName == "" { + return nil, &EmptyBMCSecretError{message: "The BMC secret reference is empty"} + } secretKey := host.CredentialsKey() bmcCredsSecret = &corev1.Secret{} err = r.client.Get(context.TODO(), secretKey, bmcCredsSecret) if err != nil { - return nil, nil, errors.Wrap(err, - "failed to fetch BMC credentials from secret reference") + if k8serrors.IsNotFound(err) { + return nil, &ResolveBMCSecretRefError{message: fmt.Sprintf("The BMC secret %s does not exist", secretKey)} + } + return nil, err } - bmcCreds = &bmc.Credentials{ - Username: string(bmcCredsSecret.Data["username"]), - Password: string(bmcCredsSecret.Data["password"]), + + // Make sure the secret has the correct owner as soon as we can. + // This can return an SaveBMCSecretOwnerError + // which isn't handled causing us to immediately try again + // which seems fine as we expect this to be a transient failure + err = r.setBMCCredentialsSecretOwner(request, host, bmcCredsSecret) + if err != nil { + return bmcCredsSecret, err } - // Verify that the secret contains the expected info. - if validCreds, reason := bmcCreds.AreValid(); !validCreds { - reqLogger.Info("invalid BMC Credentials", "reason", reason) - err := r.setErrorCondition(request, host, reason) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to set error condition") - } + return bmcCredsSecret, nil +} - // Only publish the event if we do not have an error - // after saving so that we only publish one time. - r.publishEvent(request, host.NewEvent("BMCCredentialError", reason)) +// Make sure the credentials for the management controller look +// right and manufacture bmc.Credentials. This does not actually try +// to use the credentials. +func (r *ReconcileBareMetalHost) buildAndValidateBMCCredentials(request reconcile.Request, host *metal3v1alpha1.BareMetalHost) (bmcCreds *bmc.Credentials, bmcCredsSecret *corev1.Secret, err error) { - // This is not an error we can retry from, so stop reconciling. - return nil, nil, nil + // Retrieve the BMC secret from Kubernetes for this host + bmcCredsSecret, err = r.getBMCSecretAndSetOwner(request, host) + if err != nil { + return nil, nil, err } - // Make sure the secret has the correct owner. - if err = r.setBMCCredentialsSecretOwner(request, host, bmcCredsSecret); err != nil { - return nil, nil, errors.Wrap(err, - "failed to update owner of credentials secret") + // Check for a "discovered" host vs. one that we have all the info for + // and find empty Address or CredentialsName fields + if host.Spec.BMC.Address == "" { + return nil, nil, &EmptyBMCAddressError{message: "Missing BMC connection detail 'Address'"} + } + + // pass the bmc address to bmc.NewAccessDetails which will do + // more in-depth checking on the url to ensure it is + // a valid bmc address, returning a bmc.UnknownBMCTypeError + // if it is not conformant + _, err = bmc.NewAccessDetails(host.Spec.BMC.Address) + if err != nil { + return nil, nil, err + } + + bmcCreds = &bmc.Credentials{ + Username: string(bmcCredsSecret.Data["username"]), + Password: string(bmcCredsSecret.Data["password"]), + } + + // Verify that the secret contains the expected info. + err = bmcCreds.Validate() + if err != nil { + return nil, bmcCredsSecret, err } return bmcCreds, bmcCredsSecret, nil } -func (r *ReconcileBareMetalHost) setBMCCredentialsSecretOwner(request reconcile.Request, host *metalkubev1alpha1.BareMetalHost, secret *corev1.Secret) (err error) { +func (r *ReconcileBareMetalHost) setBMCCredentialsSecretOwner(request reconcile.Request, host *metal3v1alpha1.BareMetalHost, secret *corev1.Secret) (err error) { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) if metav1.IsControlledBy(secret, host) { @@ -749,11 +826,11 @@ func (r *ReconcileBareMetalHost) setBMCCredentialsSecretOwner(request reconcile. reqLogger.Info("updating owner of secret") err = controllerutil.SetControllerReference(host, secret, r.scheme) if err != nil { - return errors.Wrap(err, "failed to set owner") + return &SaveBMCSecretOwnerError{message: fmt.Sprintf("cannot set owner: %q", err.Error())} } err = r.client.Update(context.TODO(), secret) if err != nil { - return errors.Wrap(err, "failed to save owner") + return &SaveBMCSecretOwnerError{message: fmt.Sprintf("cannot save owner: %q", err.Error())} } return nil } @@ -770,6 +847,6 @@ func (r *ReconcileBareMetalHost) publishEvent(request reconcile.Request, event c return } -func hostHasFinalizer(host *metalkubev1alpha1.BareMetalHost) bool { - return utils.StringInList(host.Finalizers, metalkubev1alpha1.BareMetalHostFinalizer) +func hostHasFinalizer(host *metal3v1alpha1.BareMetalHost) bool { + return utils.StringInList(host.Finalizers, metal3v1alpha1.BareMetalHostFinalizer) } diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/controller/baremetalhost/errors.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/controller/baremetalhost/errors.go new file mode 100644 index 000000000..b09864e28 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/controller/baremetalhost/errors.go @@ -0,0 +1,49 @@ +package baremetalhost + +import ( + "fmt" +) + +// EmptyBMCAddressError is returned when the BMC address field +// for a host is empty +type EmptyBMCAddressError struct { + message string +} + +func (e EmptyBMCAddressError) Error() string { + return fmt.Sprintf("Empty BMC address %s", + e.message) +} + +// EmptyBMCSecretError is returned when the BMC secret +// for a host is empty +type EmptyBMCSecretError struct { + message string +} + +func (e EmptyBMCSecretError) Error() string { + return fmt.Sprintf("No BMC CredentialsName defined %s", + e.message) +} + +// ResolveBMCSecretRefError is returned when the BMC secret +// for a host is defined but cannot be found +type ResolveBMCSecretRefError struct { + message string +} + +func (e ResolveBMCSecretRefError) Error() string { + return fmt.Sprintf("BMC CredentialsName secret doesn't exist %s", + e.message) +} + +// SaveBMCSecretOwnerError is returned when we +// fail to set the owner of a secret +type SaveBMCSecretOwnerError struct { + message string +} + +func (e SaveBMCSecretOwnerError) Error() string { + return fmt.Sprintf("Failed to set owner of BMC secret %s", + e.message) +} diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/controller/controller.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/controller/controller.go similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/pkg/controller/controller.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/controller/controller.go diff --git a/vendor/github.com/metal3-io/baremetal-operator/pkg/hardware/profile.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardware/profile.go new file mode 100644 index 000000000..2bcc17ce1 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/hardware/profile.go @@ -0,0 +1,88 @@ +package hardware + +import ( + "fmt" +) + +const ( + // DefaultProfileName is the default hardware profile to use when + // no other profile matches. + DefaultProfileName string = "unknown" +) + +// Profile holds the settings for a class of hardware. +type Profile struct { + // Name holds the profile name + Name string + + // RootDeviceHints holds the suggestions for placing the storage + // for the root filesystem. + RootDeviceHints RootDeviceHints + + // RootGB is the size of the root volume in GB + RootGB int + + // LocalGB is the size of something(?) + LocalGB int + + // CPUArch is the architecture of the CPU. + CPUArch string +} + +// RootDeviceHints holds the hints for specifying the storage location +// for the root filesystem for the image. +// +// NOTE(dhellmann): Valid ironic hints are: "vendor, +// wwn_vendor_extension, wwn_with_extension, by_path, serial, wwn, +// size, rotational, name, hctl, model" +type RootDeviceHints struct { + // A device name like "/dev/vda" + DeviceName string + + // A SCSI bus address like 0:0:0:0 + HCTL string +} + +var profiles = make(map[string]Profile) + +func init() { + profiles[DefaultProfileName] = Profile{ + Name: DefaultProfileName, + RootDeviceHints: RootDeviceHints{ + DeviceName: "/dev/sda", + }, + RootGB: 10, + LocalGB: 50, + CPUArch: "x86_64", + } + + profiles["libvirt"] = Profile{ + Name: "libvirt", + RootDeviceHints: RootDeviceHints{ + DeviceName: "/dev/vda", + }, + RootGB: 10, + LocalGB: 50, + CPUArch: "x86_64", + } + + profiles["dell"] = Profile{ + Name: "dell", + RootDeviceHints: RootDeviceHints{ + HCTL: "0:0:0:0", + }, + RootGB: 10, + LocalGB: 50, + CPUArch: "x86_64", + } + +} + +// GetProfile returns the named profile +func GetProfile(name string) (Profile, error) { + profile, ok := profiles[name] + if !ok { + return Profile{}, fmt.Errorf("No hardware profile named %q", name) + } + return profile, nil +} diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/demo/demo.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/demo/demo.go similarity index 77% rename from vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/demo/demo.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/demo/demo.go index 74b68f650..8745e85cd 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/demo/demo.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/demo/demo.go @@ -1,15 +1,14 @@ package demo import ( - "fmt" "time" "github.com/go-logr/logr" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" - metalkubev1alpha1 "github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1" - "github.com/metalkube/baremetal-operator/pkg/bmc" - "github.com/metalkube/baremetal-operator/pkg/provisioner" + metal3v1alpha1 "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" + "github.com/metal3-io/baremetal-operator/pkg/bmc" + "github.com/metal3-io/baremetal-operator/pkg/provisioner" ) var log = logf.Log.WithName("demo") @@ -17,20 +16,37 @@ var deprovisionRequeueDelay = time.Second * 10 var provisionRequeueDelay = time.Second * 10 const ( - registrationErrorHost string = "demo-registration-error" - registeringHost string = "demo-registering" - readyHost string = "demo-ready" - inspectingHost string = "demo-inspecting" - validationErrorHost string = "demo-validation-error" - provisioningHost string = "demo-provisioning" - provisionedHost string = "demo-provisioned" + // RegistrationErrorHost is a host that fails the registration + // process. + RegistrationErrorHost string = "demo-registration-error" + + // RegisteringHost is a host that is in the process of being + // registered. + RegisteringHost string = "demo-registering" + + // ReadyHost is a host that is ready to be used. + ReadyHost string = "demo-ready" + + // InspectingHost is a host that is having its hardware scanned. + InspectingHost string = "demo-inspecting" + + // ValidationErrorHost is a host that started provisioning but + // failed validation. + ValidationErrorHost string = "demo-validation-error" + + // ProvisioningHost is a host that is in the middle of + // provisioning. + ProvisioningHost string = "demo-provisioning" + + // ProvisionedHost is a host that has had an image provisioned. + ProvisionedHost string = "demo-provisioned" ) // Provisioner implements the provisioning.Provisioner interface // and uses Ironic to manage the host. type demoProvisioner struct { // the host to be managed by this provisioner - host *metalkubev1alpha1.BareMetalHost + host *metal3v1alpha1.BareMetalHost // the bmc credentials bmcCreds bmc.Credentials // a logger configured for this host @@ -40,7 +56,7 @@ type demoProvisioner struct { } // New returns a new Ironic Provisioner -func New(host *metalkubev1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publisher provisioner.EventPublisher) (provisioner.Provisioner, error) { +func New(host *metal3v1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publisher provisioner.EventPublisher) (provisioner.Provisioner, error) { p := &demoProvisioner{ host: host, bmcCreds: bmcCreds, @@ -59,15 +75,12 @@ func (p *demoProvisioner) ValidateManagementAccess() (result provisioner.Result, switch hostName { - case registrationErrorHost: - if p.host.SetErrorMessage("failed to register new host") { - p.log.Info("setting registration error") - p.publisher("RegistrationError", "Failed to register new host") - result.Dirty = true - } + case RegistrationErrorHost: // We have set an error, so Reconcile() will stop + result.ErrorMessage = "failed to register new host" + p.log.Info("setting registration error") - case registeringHost: + case RegisteringHost: // Always mark the host as dirty so it never moves past this // point. result.Dirty = true @@ -79,7 +92,6 @@ func (p *demoProvisioner) ValidateManagementAccess() (result provisioner.Result, p.log.Info("setting provisioning id", "provisioningID", p.host.Status.Provisioning.ID) result.Dirty = true - p.publisher("Registered", "Registered new host") } } @@ -95,8 +107,8 @@ func (p *demoProvisioner) InspectHardware() (result provisioner.Result, err erro hostName := p.host.ObjectMeta.Name - if hostName == inspectingHost { - p.host.Status.Provisioning.State = metalkubev1alpha1.StateInspecting + if hostName == InspectingHost { + p.host.Status.Provisioning.State = metal3v1alpha1.StateInspecting // set dirty so we don't allow the host to progress past this // state in Reconcile() result.Dirty = true @@ -111,10 +123,10 @@ func (p *demoProvisioner) InspectHardware() (result provisioner.Result, err erro if p.host.Status.HardwareDetails == nil { p.log.Info("continuing inspection by setting details") p.host.Status.HardwareDetails = - &metalkubev1alpha1.HardwareDetails{ + &metal3v1alpha1.HardwareDetails{ RAMGiB: 128, - NIC: []metalkubev1alpha1.NIC{ - metalkubev1alpha1.NIC{ + NIC: []metal3v1alpha1.NIC{ + metal3v1alpha1.NIC{ Name: "nic-1", Model: "virt-io", Network: "Pod Networking", @@ -122,7 +134,7 @@ func (p *demoProvisioner) InspectHardware() (result provisioner.Result, err erro IP: "192.168.100.1", SpeedGbps: 1, }, - metalkubev1alpha1.NIC{ + metal3v1alpha1.NIC{ Name: "nic-2", Model: "e1000", Network: "Pod Networking", @@ -131,29 +143,29 @@ func (p *demoProvisioner) InspectHardware() (result provisioner.Result, err erro SpeedGbps: 1, }, }, - Storage: []metalkubev1alpha1.Storage{ - metalkubev1alpha1.Storage{ + Storage: []metal3v1alpha1.Storage{ + metal3v1alpha1.Storage{ Name: "disk-1 (boot)", Type: "SSD", SizeGiB: 1024 * 93, Model: "Dell CFJ61", }, - metalkubev1alpha1.Storage{ + metal3v1alpha1.Storage{ Name: "disk-2", Type: "SSD", SizeGiB: 1024 * 93, Model: "Dell CFJ61", }, }, - CPUs: []metalkubev1alpha1.CPU{ - metalkubev1alpha1.CPU{ + CPUs: []metal3v1alpha1.CPU{ + metal3v1alpha1.CPU{ Type: "x86", SpeedGHz: 3, }, }, } p.publisher("InspectionComplete", "Hardware inspection completed") - p.host.SetOperationalStatus(metalkubev1alpha1.OperationalStatusOK) + p.host.SetOperationalStatus(metal3v1alpha1.OperationalStatusOK) result.Dirty = true return result, nil } @@ -182,22 +194,17 @@ func (p *demoProvisioner) Provision(getUserData provisioner.UserDataSource) (res switch hostName { - case validationErrorHost: - p.log.Info("validation error host") - p.publisher("HostValidationError", "validation failed") - p.host.SetErrorMessage("validation failed") - result.Dirty = true + case ValidationErrorHost: + p.log.Info("setting validation error") + result.ErrorMessage = "validation failed" - case provisioningHost: + case ProvisioningHost: p.log.Info("provisioning host") result.Dirty = true result.RequeueAfter = time.Second * 5 default: - p.publisher("ProvisioningComplete", - fmt.Sprintf("Image provisioning completed for %s", p.host.Spec.Image.URL)) p.log.Info("finished provisioning") - result.Dirty = true } return result, nil diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/fixture/fixture.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/fixture/fixture.go similarity index 90% rename from vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/fixture/fixture.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/fixture/fixture.go index 2c073b0d1..d70d229ad 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/fixture/fixture.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/fixture/fixture.go @@ -6,9 +6,9 @@ import ( "github.com/go-logr/logr" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" - metalkubev1alpha1 "github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1" - "github.com/metalkube/baremetal-operator/pkg/bmc" - "github.com/metalkube/baremetal-operator/pkg/provisioner" + metal3v1alpha1 "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" + "github.com/metal3-io/baremetal-operator/pkg/bmc" + "github.com/metal3-io/baremetal-operator/pkg/provisioner" ) var log = logf.Log.WithName("fixture") @@ -19,7 +19,7 @@ var provisionRequeueDelay = time.Second * 10 // and uses Ironic to manage the host. type fixtureProvisioner struct { // the host to be managed by this provisioner - host *metalkubev1alpha1.BareMetalHost + host *metal3v1alpha1.BareMetalHost // the bmc credentials bmcCreds bmc.Credentials // a logger configured for this host @@ -29,7 +29,7 @@ type fixtureProvisioner struct { } // New returns a new Ironic Provisioner -func New(host *metalkubev1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publisher provisioner.EventPublisher) (provisioner.Provisioner, error) { +func New(host *metal3v1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publisher provisioner.EventPublisher) (provisioner.Provisioner, error) { p := &fixtureProvisioner{ host: host, bmcCreds: bmcCreds, @@ -75,10 +75,10 @@ func (p *fixtureProvisioner) InspectHardware() (result provisioner.Result, err e if p.host.Status.HardwareDetails == nil { p.log.Info("continuing inspection by setting details") p.host.Status.HardwareDetails = - &metalkubev1alpha1.HardwareDetails{ + &metal3v1alpha1.HardwareDetails{ RAMGiB: 128, - NIC: []metalkubev1alpha1.NIC{ - metalkubev1alpha1.NIC{ + NIC: []metal3v1alpha1.NIC{ + metal3v1alpha1.NIC{ Name: "nic-1", Model: "virt-io", Network: "Pod Networking", @@ -86,7 +86,7 @@ func (p *fixtureProvisioner) InspectHardware() (result provisioner.Result, err e IP: "192.168.100.1", SpeedGbps: 1, }, - metalkubev1alpha1.NIC{ + metal3v1alpha1.NIC{ Name: "nic-2", Model: "e1000", Network: "Pod Networking", @@ -95,22 +95,22 @@ func (p *fixtureProvisioner) InspectHardware() (result provisioner.Result, err e SpeedGbps: 1, }, }, - Storage: []metalkubev1alpha1.Storage{ - metalkubev1alpha1.Storage{ + Storage: []metal3v1alpha1.Storage{ + metal3v1alpha1.Storage{ Name: "disk-1 (boot)", Type: "SSD", SizeGiB: 1024 * 93, Model: "Dell CFJ61", }, - metalkubev1alpha1.Storage{ + metal3v1alpha1.Storage{ Name: "disk-2", Type: "SSD", SizeGiB: 1024 * 93, Model: "Dell CFJ61", }, }, - CPUs: []metalkubev1alpha1.CPU{ - metalkubev1alpha1.CPU{ + CPUs: []metal3v1alpha1.CPU{ + metal3v1alpha1.CPU{ Type: "x86", SpeedGHz: 3, }, diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/ironic/ironic.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic/ironic.go similarity index 80% rename from vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/ironic/ironic.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic/ironic.go index 73dca4ab4..be35f9022 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/ironic/ironic.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/ironic/ironic.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "net/http" "net/url" + "os" "strings" "time" @@ -20,31 +21,54 @@ import ( "github.com/go-logr/logr" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" - metalkubev1alpha1 "github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1" - "github.com/metalkube/baremetal-operator/pkg/bmc" - "github.com/metalkube/baremetal-operator/pkg/provisioner" + metal3v1alpha1 "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" + "github.com/metal3-io/baremetal-operator/pkg/bmc" + "github.com/metal3-io/baremetal-operator/pkg/hardware" + "github.com/metal3-io/baremetal-operator/pkg/provisioner" ) var log = logf.Log.WithName("baremetalhost_ironic") var deprovisionRequeueDelay = time.Second * 10 var provisionRequeueDelay = time.Second * 10 var powerRequeueDelay = time.Second * 10 +var deployKernelURL string +var deployRamdiskURL string +var ironicEndpoint string const ( - ironicEndpoint = "http://localhost:6385/v1/" // See nodes.Node.PowerState for details powerOn = "power on" powerOff = "power off" powerNone = "None" ) +func init() { + // NOTE(dhellmann): Use Fprintf() to report errors instead of + // logging, because logging is not configured yet in init(). + deployKernelURL = os.Getenv("DEPLOY_KERNEL_URL") + if deployKernelURL == "" { + fmt.Fprintf(os.Stderr, "Cannot start: No DEPLOY_KERNEL_URL variable set\n") + os.Exit(1) + } + deployRamdiskURL = os.Getenv("DEPLOY_RAMDISK_URL") + if deployRamdiskURL == "" { + fmt.Fprintf(os.Stderr, "Cannot start: No DEPLOY_RAMDISK_URL variable set\n") + os.Exit(1) + } + ironicEndpoint = os.Getenv("IRONIC_ENDPOINT") + if ironicEndpoint == "" { + fmt.Fprintf(os.Stderr, "Cannot start: No IRONIC_ENDPOINT variable set\n") + os.Exit(1) + } +} + // Provisioner implements the provisioning.Provisioner interface // and uses Ironic to manage the host. type ironicProvisioner struct { // the host to be managed by this provisioner - host *metalkubev1alpha1.BareMetalHost + host *metal3v1alpha1.BareMetalHost // a shorter path to the provisioning status data structure - status *metalkubev1alpha1.ProvisionStatus + status *metal3v1alpha1.ProvisionStatus // access parameters for the BMC bmcAccess bmc.AccessDetails // credentials to log in to the BMC @@ -57,8 +81,14 @@ type ironicProvisioner struct { publisher provisioner.EventPublisher } -// New returns a new Ironic Provisioner -func New(host *metalkubev1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publisher provisioner.EventPublisher) (provisioner.Provisioner, error) { +// A private function to construct an ironicProvisioner (rather than a +// Provisioner interface) in a consistent way for tests. +func newProvisioner(host *metal3v1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publisher provisioner.EventPublisher) (*ironicProvisioner, error) { + log.Info("ironic settings", + "endpoint", ironicEndpoint, + "deployKernelURL", deployKernelURL, + "deployRamdiskURL", deployRamdiskURL, + ) client, err := noauth.NewBareMetalNoAuth(noauth.EndpointOpts{ IronicEndpoint: ironicEndpoint, }) @@ -84,6 +114,11 @@ func New(host *metalkubev1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publis return p, nil } +// New returns a new Ironic Provisioner +func New(host *metal3v1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publisher provisioner.EventPublisher) (provisioner.Provisioner, error) { + return newProvisioner(host, bmcCreds, publisher) +} + func (p *ironicProvisioner) validateNode(ironicNode *nodes.Node) (errorMessage string, err error) { var validationErrors []string @@ -169,8 +204,8 @@ func (p *ironicProvisioner) ValidateManagementAccess() (result provisioner.Resul // // FIXME(dhellmann): We need to get our IP on the // provisioning network from somewhere. - driverInfo["deploy_kernel"] = "http://172.22.0.1/images/ironic-python-agent.kernel" - driverInfo["deploy_ramdisk"] = "http://172.22.0.1/images/ironic-python-agent.initramfs" + driverInfo["deploy_kernel"] = deployKernelURL + driverInfo["deploy_ramdisk"] = deployRamdiskURL ironicNode, err = nodes.Create( p.client, @@ -334,10 +369,10 @@ func (p *ironicProvisioner) InspectHardware() (result provisioner.Result, err er if p.host.Status.HardwareDetails == nil { p.log.Info("continuing inspection by setting details") p.host.Status.HardwareDetails = - &metalkubev1alpha1.HardwareDetails{ + &metal3v1alpha1.HardwareDetails{ RAMGiB: 128, - NIC: []metalkubev1alpha1.NIC{ - metalkubev1alpha1.NIC{ + NIC: []metal3v1alpha1.NIC{ + metal3v1alpha1.NIC{ Name: "nic-1", Model: "virt-io", Network: "Pod Networking", @@ -345,7 +380,7 @@ func (p *ironicProvisioner) InspectHardware() (result provisioner.Result, err er IP: "192.168.100.1", SpeedGbps: 1, }, - metalkubev1alpha1.NIC{ + metal3v1alpha1.NIC{ Name: "nic-2", Model: "e1000", Network: "Pod Networking", @@ -354,22 +389,22 @@ func (p *ironicProvisioner) InspectHardware() (result provisioner.Result, err er SpeedGbps: 1, }, }, - Storage: []metalkubev1alpha1.Storage{ - metalkubev1alpha1.Storage{ + Storage: []metal3v1alpha1.Storage{ + metal3v1alpha1.Storage{ Name: "disk-1 (boot)", Type: "SSD", SizeGiB: 1024 * 93, Model: "Dell CFJ61", }, - metalkubev1alpha1.Storage{ + metal3v1alpha1.Storage{ Name: "disk-2", Type: "SSD", SizeGiB: 1024 * 93, Model: "Dell CFJ61", }, }, - CPUs: []metalkubev1alpha1.CPU{ - metalkubev1alpha1.CPU{ + CPUs: []metal3v1alpha1.CPU{ + metal3v1alpha1.CPU{ Type: "x86", SpeedGHz: 3, }, @@ -454,79 +489,170 @@ func (p *ironicProvisioner) getImageChecksum() (string, error) { return checksum, nil } -func (p *ironicProvisioner) startProvisioning(ironicNode *nodes.Node, checksum string, getUserData provisioner.UserDataSource) (result provisioner.Result, err error) { +func (p *ironicProvisioner) getUpdateOptsForNode(ironicNode *nodes.Node, checksum string) (updates nodes.UpdateOpts, err error) { - // Ensure the instance_info properties for the host are set to - // tell Ironic where to get the image to be provisioned. - ironicHasSameImage := (ironicNode.InstanceInfo["image_source"] == p.host.Spec.Image.URL && - ironicNode.InstanceInfo["image_checksum"] == checksum) + hwProf, err := hardware.GetProfile(p.host.HardwareProfile()) + if err != nil { + return updates, errors.Wrap(err, + fmt.Sprintf("Could not start provisioning with bad hardware profile %s", + p.host.HardwareProfile())) + } + + // image_source var op nodes.UpdateOp if _, ok := ironicNode.InstanceInfo["image_source"]; !ok { - // no source, need to add op = nodes.AddOp - p.log.Info("adding host settings in ironic") - } else if !ironicHasSameImage { - // have a different source or checksum, need to update + p.log.Info("adding image_source") + } else { op = nodes.ReplaceOp - p.log.Info("updating host settings in ironic") + p.log.Info("updating image_source") + } + updates = append( + updates, + nodes.UpdateOperation{ + Op: op, + Path: "/instance_info/image_source", + Value: p.host.Spec.Image.URL, + }, + ) + + // image_checksum + if _, ok := ironicNode.InstanceInfo["image_checksum"]; !ok { + op = nodes.AddOp + p.log.Info("adding image_checksum") } else { - p.log.Info("not making any change to host settings", - "ok", ok, "same", ironicHasSameImage) - } + op = nodes.ReplaceOp + p.log.Info("updating image_checksum") + } + updates = append( + updates, + nodes.UpdateOperation{ + Op: op, + Path: "/instance_info/image_checksum", + Value: checksum, + }, + ) - if op != "" { - _, err = nodes.Update( - p.client, - ironicNode.UUID, - nodes.UpdateOpts{ - nodes.UpdateOperation{ - Op: op, - Path: "/instance_info/image_source", - Value: p.host.Spec.Image.URL, - }, - nodes.UpdateOperation{ - Op: op, - Path: "/instance_info/image_checksum", - Value: checksum, - }, - // FIXME(dhellmann): We have to provide something for - // the disk size until - // https://storyboard.openstack.org/#!/story/2005165 - // is fixed in ironic. - nodes.UpdateOperation{ - Op: op, - Path: "/instance_info/root_gb", - Value: 10, - }, - // NOTE(dhellmann): We must fill in *some* value so - // that Ironic will monitor the host. We don't have a - // nova instance at all, so just give the node it's - // UUID again. - nodes.UpdateOperation{ - Op: op, - Path: "/instance_uuid", - Value: p.host.Status.Provisioning.ID, - }, - // FIXME(dhellmann): We need to specify the root - // device to receive the image. That should come from - // some combination of inspecting the host to see what - // is available and the hardware profile to give us - // instructions. - // nodes.UpdateOperation{ - // Op: nodes.AddOp, - // Path: "/properties/root_device", - // Value: map[string]interface{}, - // }, - }).Extract() - switch err.(type) { - case nil: - case gophercloud.ErrDefault409: - p.log.Info("could not update host settings in ironic, busy") - result.Dirty = true - return result, nil - default: - return result, errors.Wrap(err, "failed to update host settings in ironic") - } + // instance_uuid + // + // NOTE(dhellmann): We must fill in *some* value so that Ironic + // will monitor the host. We don't have a nova instance at all, so + // just give the node it's UUID again. + p.log.Info("setting instance_uuid") + updates = append( + updates, + nodes.UpdateOperation{ + Op: nodes.ReplaceOp, + Path: "/instance_uuid", + Value: p.host.Status.Provisioning.ID, + }, + ) + + // root_gb + // + // FIXME(dhellmann): We have to provide something for the disk + // size until https://storyboard.openstack.org/#!/story/2005165 is + // fixed in ironic. + if _, ok := ironicNode.InstanceInfo["root_gb"]; !ok { + op = nodes.AddOp + p.log.Info("adding root_gb") + } else if ironicNode.InstanceInfo["root_gb"] != 10 { + op = nodes.ReplaceOp + p.log.Info("updating root_gb") + } + updates = append( + updates, + nodes.UpdateOperation{ + Op: op, + Path: "/instance_info/root_gb", + Value: hwProf.RootGB, + }, + ) + + // root_device + // + // FIXME(dhellmann): We need to specify the root device to receive + // the image. That should come from some combination of inspecting + // the host to see what is available and the hardware profile to + // give us instructions. + if _, ok := ironicNode.Properties["root_device"]; !ok { + op = nodes.AddOp + p.log.Info("adding root_device") + } else { + op = nodes.ReplaceOp + p.log.Info("updating root_device") + } + hints := map[string]string{} + switch { + case hwProf.RootDeviceHints.DeviceName != "": + hints["name"] = hwProf.RootDeviceHints.DeviceName + case hwProf.RootDeviceHints.HCTL != "": + hints["hctl"] = hwProf.RootDeviceHints.HCTL + } + p.log.Info("using root device", "hints", hints) + updates = append( + updates, + nodes.UpdateOperation{ + Op: op, + Path: "/properties/root_device", + Value: hints, + }, + ) + + // cpu_arch + // + // FIXME(dhellmann): This should come from inspecting the + // host. + if _, ok := ironicNode.Properties["cpu_arch"]; !ok { + op = nodes.AddOp + p.log.Info("adding cpu_arch") + } else { + op = nodes.ReplaceOp + p.log.Info("updating cpu_arch") + } + updates = append( + updates, + nodes.UpdateOperation{ + Op: op, + Path: "/properties/cpu_arch", + Value: hwProf.CPUArch, + }, + ) + + // local_gb + if _, ok := ironicNode.Properties["local_gb"]; !ok { + op = nodes.AddOp + p.log.Info("adding local_gb") + } else { + op = nodes.ReplaceOp + p.log.Info("updating local_gb") + } + updates = append( + updates, + nodes.UpdateOperation{ + Op: op, + Path: "/properties/local_gb", + Value: hwProf.LocalGB, + }, + ) + + return updates, nil +} + +func (p *ironicProvisioner) startProvisioning(ironicNode *nodes.Node, checksum string, getUserData provisioner.UserDataSource) (result provisioner.Result, err error) { + + p.log.Info("starting provisioning") + + updates, err := p.getUpdateOptsForNode(ironicNode, checksum) + _, err = nodes.Update(p.client, ironicNode.UUID, updates).Extract() + switch err.(type) { + case nil: + case gophercloud.ErrDefault409: + p.log.Info("could not update host settings in ironic, busy") + result.Dirty = true + return result, nil + default: + return result, errors.Wrap(err, "failed to update host settings in ironic") } p.log.Info("validating host settings") @@ -597,7 +723,8 @@ func (p *ironicProvisioner) Provision(getUserData provisioner.UserDataSource) (r p.log.Info("checking image settings", "source", ironicNode.InstanceInfo["image_source"], "checksum", checksum, - "same", ironicHasSameImage) + "same", ironicHasSameImage, + "provisionState", ironicNode.ProvisionState) result.RequeueAfter = provisionRequeueDelay diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/provisioner.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/provisioner.go similarity index 90% rename from vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/provisioner.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/provisioner.go index 8fa6e76a1..d61743a95 100644 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/provisioner/provisioner.go +++ b/vendor/github.com/metal3-io/baremetal-operator/pkg/provisioner/provisioner.go @@ -3,8 +3,8 @@ package provisioner import ( "time" - metalkubev1alpha1 "github.com/metalkube/baremetal-operator/pkg/apis/metalkube/v1alpha1" - "github.com/metalkube/baremetal-operator/pkg/bmc" + metal3v1alpha1 "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1" + "github.com/metal3-io/baremetal-operator/pkg/bmc" ) /* @@ -16,7 +16,7 @@ Package provisioning defines the API for talking to the provisioning backend. type EventPublisher func(reason, message string) // Factory is the interface for creating new Provisioner objects. -type Factory func(host *metalkubev1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publish EventPublisher) (Provisioner, error) +type Factory func(host *metal3v1alpha1.BareMetalHost, bmcCreds bmc.Credentials, publish EventPublisher) (Provisioner, error) // UserDataSource is the interface for a function to retrieve user // data for a host being provisioned. diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/utils/stringlist.go b/vendor/github.com/metal3-io/baremetal-operator/pkg/utils/stringlist.go similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/pkg/utils/stringlist.go rename to vendor/github.com/metal3-io/baremetal-operator/pkg/utils/stringlist.go diff --git a/vendor/github.com/metalkube/baremetal-operator/test/e2e/role_binding.yaml b/vendor/github.com/metal3-io/baremetal-operator/test/e2e/role_binding.yaml similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/test/e2e/role_binding.yaml rename to vendor/github.com/metal3-io/baremetal-operator/test/e2e/role_binding.yaml diff --git a/vendor/github.com/metal3-io/baremetal-operator/tools/clean_demo_hosts.sh b/vendor/github.com/metal3-io/baremetal-operator/tools/clean_demo_hosts.sh new file mode 100755 index 000000000..0e3334a36 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/tools/clean_demo_hosts.sh @@ -0,0 +1,3 @@ +#!/bin/bash -x + +oc delete baremetalhost -l metal3demo diff --git a/vendor/github.com/metalkube/baremetal-operator/tools/clean_host.sh b/vendor/github.com/metal3-io/baremetal-operator/tools/clean_host.sh similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/tools/clean_host.sh rename to vendor/github.com/metal3-io/baremetal-operator/tools/clean_host.sh diff --git a/vendor/github.com/metal3-io/baremetal-operator/tools/run_local_ironic.sh b/vendor/github.com/metal3-io/baremetal-operator/tools/run_local_ironic.sh new file mode 100755 index 000000000..9b25acef5 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/tools/run_local_ironic.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +set -ex + +IRONIC_IMAGE=${IRONIC_IMAGE:-"quay.io/metal3-io/ironic"} +IRONIC_INSPECTOR_IMAGE=${IRONIC_INSPECTOR_IMAGE:-"quay.io/metal3-io/ironic-inspector"} +IRONIC_DATA_DIR="$PWD/ironic" + +sudo podman pull $IRONIC_IMAGE +sudo podman pull $IRONIC_INSPECTOR_IMAGE + +mkdir -p "$IRONIC_DATA_DIR/html/images" +pushd $IRONIC_DATA_DIR/html/images + +# The images directory should contain images and an associated md5sum. +# - image.qcow2 +# - image.qcow2.md5sum + +for name in ironic ironic-inspector dnsmasq httpd mariadb; do + sudo podman ps | grep -w "$name$" && sudo podman kill $name + sudo podman ps --all | grep -w "$name$" && sudo podman rm $name -f +done + +# Remove existing pod +if sudo podman pod exists ironic-pod ; then + sudo podman pod rm ironic-pod -f +fi + +# set password for mariadb +mariadb_password=$(echo $(date;hostname)|sha256sum |cut -c-20) + +# Create pod +sudo podman pod create -n ironic-pod + +# Start dnsmasq, http, mariadb, and ironic containers using same image + +# See this file for env vars you can set, like IP, DHCP_RANGE, INTERFACE +# https://github.com/metal3-io/ironic/blob/master/rundnsmasq.sh +sudo podman run -d --net host --privileged --name dnsmasq --pod ironic-pod \ + -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/rundnsmasq ${IRONIC_IMAGE} + +# For available env vars, see: +# https://github.com/metal3-io/ironic/blob/master/runhttpd.sh +sudo podman run -d --net host --privileged --name httpd --pod ironic-pod \ + -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runhttpd ${IRONIC_IMAGE} + +# https://github.com/metal3-io/ironic/blob/master/runmariadb.sh +sudo podman run -d --net host --privileged --name mariadb --pod ironic-pod \ + -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runmariadb \ + --env MARIADB_PASSWORD=$mariadb_password ${IRONIC_IMAGE} + +# See this file for additional env vars you may want to pass, like IP and INTERFACE +# https://github.com/metal3-io/ironic/blob/master/runironic.sh +sudo podman run -d --net host --privileged --name ironic --pod ironic-pod \ + --env MARIADB_PASSWORD=$mariadb_password \ + -v $IRONIC_DATA_DIR:/shared ${IRONIC_IMAGE} + +# Start Ironic Inspector +sudo podman run -d --net host --privileged --name ironic-inspector --pod ironic-pod "${IRONIC_INSPECTOR_IMAGE}" diff --git a/vendor/github.com/metalkube/baremetal-operator/tools/show_host_status.sh b/vendor/github.com/metal3-io/baremetal-operator/tools/show_host_status.sh similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/tools/show_host_status.sh rename to vendor/github.com/metal3-io/baremetal-operator/tools/show_host_status.sh diff --git a/vendor/github.com/metalkube/baremetal-operator/version/version.go b/vendor/github.com/metal3-io/baremetal-operator/version/version.go similarity index 100% rename from vendor/github.com/metalkube/baremetal-operator/version/version.go rename to vendor/github.com/metal3-io/baremetal-operator/version/version.go diff --git a/vendor/github.com/metalkube/baremetal-operator/docs/BaremetalHost_ProvisioningState.png b/vendor/github.com/metalkube/baremetal-operator/docs/BaremetalHost_ProvisioningState.png deleted file mode 100644 index 0611258f5..000000000 Binary files a/vendor/github.com/metalkube/baremetal-operator/docs/BaremetalHost_ProvisioningState.png and /dev/null differ diff --git a/vendor/github.com/metalkube/baremetal-operator/docs/baremetalhost-states.md b/vendor/github.com/metalkube/baremetal-operator/docs/baremetalhost-states.md deleted file mode 100644 index a98432ec6..000000000 --- a/vendor/github.com/metalkube/baremetal-operator/docs/baremetalhost-states.md +++ /dev/null @@ -1,5 +0,0 @@ -# BaremetalHost Provisioning States - -The following diagram shows the possible Provisioning State transitions for the BaremetalHost object: - -![BaremetalHost ProvisioningState transitions](BaremetalHost_ProvisioningState.png) diff --git a/vendor/github.com/metalkube/baremetal-operator/pkg/bmc/credentials.go b/vendor/github.com/metalkube/baremetal-operator/pkg/bmc/credentials.go deleted file mode 100644 index e8360f274..000000000 --- a/vendor/github.com/metalkube/baremetal-operator/pkg/bmc/credentials.go +++ /dev/null @@ -1,37 +0,0 @@ -package bmc - -const ( - // MissingCredentialsMsg is returned as a validation failure - // reason when there are no credentials at all. - MissingCredentialsMsg string = "Missing BMC connection details: Credentials" - - // MissingAddressMsg is returned as a validation failure - // reason when there is no address for the BMC. - MissingAddressMsg string = "Missing BMC connection details: Address" - - // MissingUsernameMsg is returned as a validation failure reason - // when the credentials do not include a "username" field. - MissingUsernameMsg string = "Missing BMC connection details: 'username' in credentials" - - // MissingPasswordMsg is returned as a validation failure reason - // when the credentials do not include a "password" field. - MissingPasswordMsg string = "Missing BMC connection details: 'password' in credentials" -) - -// Credentials holds the information for authenticating with the BMC. -type Credentials struct { - Username string - Password string -} - -// AreValid returns a boolean indicating whether the credentials are -// valid, and if false a string explaining why not. -func (creds Credentials) AreValid() (bool, string) { - if creds.Username == "" { - return false, MissingUsernameMsg - } - if creds.Password == "" { - return false, MissingPasswordMsg - } - return true, "" -} diff --git a/vendor/github.com/metalkube/baremetal-operator/tools/clean_demo_hosts.sh b/vendor/github.com/metalkube/baremetal-operator/tools/clean_demo_hosts.sh deleted file mode 100755 index 0a7183057..000000000 --- a/vendor/github.com/metalkube/baremetal-operator/tools/clean_demo_hosts.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -x - -oc delete baremetalhost -l metalkubedemo diff --git a/vendor/github.com/openshift/cluster-api/CONTRIBUTING.md b/vendor/github.com/openshift/cluster-api/CONTRIBUTING.md index cc52280e0..b0be9a45f 100644 --- a/vendor/github.com/openshift/cluster-api/CONTRIBUTING.md +++ b/vendor/github.com/openshift/cluster-api/CONTRIBUTING.md @@ -26,6 +26,10 @@ All changes must be code reviewed. Coding conventions and standards are explaine Cluster API maintainers may add "LGTM" (Looks Good To Me) or an equivalent comment to indicate that a PR is acceptable. Any change requires at least one LGTM. No pull requests can be merged until at least one Cluster API maintainer signs off with an LGTM. +### Google Doc Viewing Permissions + +To gain viewing permissions to google docs in this project, please join either the [kubernetes-dev](https://groups.google.com/forum/#!forum/kubernetes-dev) or [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) google group. + ## Cloud Provider Developer Guide ### Overview diff --git a/vendor/github.com/openshift/cluster-api/Gopkg.lock b/vendor/github.com/openshift/cluster-api/Gopkg.lock index d979caf4c..4f8d3e273 100644 --- a/vendor/github.com/openshift/cluster-api/Gopkg.lock +++ b/vendor/github.com/openshift/cluster-api/Gopkg.lock @@ -1098,6 +1098,14 @@ pruneopts = "UT" revision = "3a2206dd6a78497deceb3ae058417fdeb2036c7e" +[[projects]] + branch = "master" + digest = "1:dc54d24e166e75a89145883b6080306f5684db2e8ab7a068625f010dee604c6e" + name = "k8s.io/component-base" + packages = ["cli/flag"] + pruneopts = "UT" + revision = "fd5d14dd6d20b9f3b0c066e2702d877b0b8c049f" + [[projects]] branch = "master" digest = "1:28514fabca4356625720ffb012408790a9d00d31963a9bd9daf7b5ccd894c301" @@ -1115,12 +1123,12 @@ revision = "f8a0810f38afb8478882b3835a615aebfda39afa" [[projects]] - digest = "1:e2999bf1bb6eddc2a6aa03fe5e6629120a53088926520ca3b4765f77d7ff7eab" + digest = "1:c696379ad201c1e86591785579e16bf6cf886c362e9a7534e8eb0d1028b20582" name = "k8s.io/klog" packages = ["."] pruneopts = "UT" - revision = "a5bc97fbc634d635061f3146511332c7e313a55a" - version = "v0.1.0" + revision = "e531227889390a39d9533dde61f590fe9f4b0035" + version = "v0.3.0" [[projects]] branch = "master" @@ -1275,6 +1283,7 @@ "k8s.io/code-generator/cmd/deepcopy-gen", "k8s.io/code-generator/cmd/informer-gen", "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/component-base/cli/flag", "k8s.io/klog", "sigs.k8s.io/controller-runtime/pkg/client", "sigs.k8s.io/controller-runtime/pkg/client/config", diff --git a/vendor/github.com/openshift/cluster-api/Makefile b/vendor/github.com/openshift/cluster-api/Makefile index d72c81957..51183e885 100644 --- a/vendor/github.com/openshift/cluster-api/Makefile +++ b/vendor/github.com/openshift/cluster-api/Makefile @@ -21,9 +21,18 @@ export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT ?=60s export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT ?=60s +# This option is for running docker manifest command +export DOCKER_CLI_EXPERIMENTAL := enabled + # Image URL to use all building/pushing image targets -export CONTROLLER_IMG ?= gcr.io/k8s-cluster-api/cluster-api-controller:latest -export EXAMPLE_PROVIDER_IMG ?= gcr.io/k8s-cluster-api/example-provider-controller:latest +REGISTRY ?= gcr.io/k8s-cluster-api +CONTROLLER_IMG ?= $(REGISTRY)/cluster-api-controller +EXAMPLE_PROVIDER_IMG ?= $(REGISTRY)/example-provider-controller + +TAG ?= latest + +ARCH?=amd64 +ALL_ARCH = amd64 arm arm64 ppc64le s390x all: test manager clusterctl @@ -109,25 +118,49 @@ clientset: ## Generate a typed clientset clean: ## Remove all generated files rm -f bazel-* +.PHONY: all-docker-build +all-docker-build: $(addprefix sub-docker-build-,$(ALL_ARCH)) + @echo "updating kustomize image patch file for manager resource" + hack/sed.sh -i.tmp -e 's@image: .*@image: '"$(CONTROLLER_IMG):$(TAG)"'@' ./config/default/manager_image_patch.yaml + +sub-docker-build-%: + $(MAKE) ARCH=$* docker-build + .PHONY: docker-build docker-build: generate fmt vet manifests ## Build the docker image for controller-manager - docker build . -t ${CONTROLLER_IMG} + docker build --build-arg ARCH=$(ARCH) . -t $(CONTROLLER_IMG)-$(ARCH):$(TAG) @echo "updating kustomize image patch file for manager resource" - sed -i.tmp -e 's@image: .*@image: '"${CONTROLLER_IMG}"'@' ./config/default/manager_image_patch.yaml + hack/sed.sh -i.tmp -e 's@image: .*@image: '"${CONTROLLER_IMG}-$(ARCH):$(TAG)"'@' ./config/default/manager_image_patch.yaml + +.PHONY:all-push ## Push all the architecture docker images and fat manifest docker image +all-push: all-docker-push push-manifest + +.PHONY:all-docker-push ## Push all the architecture docker images +all-docker-push: $(addprefix sub-docker-push-,$(ALL_ARCH)) + +sub-docker-push-%: + $(MAKE) ARCH=$* docker-push + +.PHONY: push-manifest +push-manifest: ## Push the fat manifest docker image. TODO: Update bazel build to push manifest once https://github.com/bazelbuild/rules_docker/issues/300 get merged + ## Minimum docker version 18.06.0 is required for creating and pushing manifest images + docker manifest create --amend $(CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CONTROLLER_IMG)\-&:$(TAG)~g") + @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CONTROLLER_IMG}:${TAG} ${CONTROLLER_IMG}-$${arch}:${TAG}; done + docker manifest push --purge ${CONTROLLER_IMG}:${TAG} .PHONY: docker-push docker-push: docker-build ## Push the docker image - docker push "$(CONTROLLER_IMG)" + docker push $(CONTROLLER_IMG)-$(ARCH):$(TAG) .PHONY: docker-build-ci docker-build-ci: generate fmt vet manifests ## Build the docker image for example provider - docker build . -f ./pkg/provider/example/container/Dockerfile -t ${EXAMPLE_PROVIDER_IMG} + docker build --build-arg ARCH=$(ARCH) . -f ./pkg/provider/example/container/Dockerfile -t $(EXAMPLE_PROVIDER_IMG)-$(ARCH):$(TAG) @echo "updating kustomize image patch file for ci" - sed -i.tmp -e 's@image: .*@image: '"${EXAMPLE_PROVIDER_IMG}"'@' ./config/ci/manager_image_patch.yaml + hack/sed.sh -i.tmp -e 's@image: .*@image: '"${EXAMPLE_PROVIDER_IMG}-$(ARCH):$(TAG)"'@' ./config/ci/manager_image_patch.yaml .PHONY: docker-push-ci docker-push-ci: docker-build-ci ## Build the docker image for ci - docker push "$(EXAMPLE_PROVIDER_IMG)" + docker push "$(EXAMPLE_PROVIDER_IMG)-$(ARCH):$(TAG)" .PHONY: verify verify: diff --git a/vendor/github.com/openshift/cluster-api/OWNERS_ALIASES b/vendor/github.com/openshift/cluster-api/OWNERS_ALIASES index d8f6e3cea..e7059b003 100644 --- a/vendor/github.com/openshift/cluster-api/OWNERS_ALIASES +++ b/vendor/github.com/openshift/cluster-api/OWNERS_ALIASES @@ -3,16 +3,13 @@ aliases: sig-cluster-lifecycle-leads: - luxas - - roberthbailey + - justinsb - timothysc cluster-api-admins: - justinsb - - kris-nova - - krousey - - luxas - - roberthbailey + - detiber + - davidewatson cluster-api-maintainers: - justinsb - - krousey - - roberthbailey + - detiber - vincepri diff --git a/vendor/github.com/openshift/cluster-api/SECURITY_CONTACTS b/vendor/github.com/openshift/cluster-api/SECURITY_CONTACTS index 686757663..7da23c405 100644 --- a/vendor/github.com/openshift/cluster-api/SECURITY_CONTACTS +++ b/vendor/github.com/openshift/cluster-api/SECURITY_CONTACTS @@ -10,7 +10,7 @@ # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ -lukemarsden +detiber +justinsb luxas -roberthbailey timothysc diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/README.md b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/README.md index 9499ef26b..5b3147030 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/README.md +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/README.md @@ -2,7 +2,7 @@ `clusterctl` is the SIG-cluster-lifecycle sponsored tool that implements the Cluster API. -Read the [experience doc here](https://docs.google.com/document/d/1-sYb3EdkRga49nULH1kSwuQFf1o6GvAw_POrsNo5d8c/edit#). +Read the [experience doc here](https://docs.google.com/document/d/1-sYb3EdkRga49nULH1kSwuQFf1o6GvAw_POrsNo5d8c/edit#). To gain viewing permissions, please join either the [kubernetes-dev](https://groups.google.com/forum/#!forum/kubernetes-dev) or [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) google group. ## Getting Started @@ -13,8 +13,11 @@ this repository.** ### Prerequisites -1. Install [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) -2. Install a [driver](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md) for minikube. For Linux, we recommend kvm2. For MacOS, we recommend VirtualBox. +1. Cluster API runs its operations in Kubernetes. A pre-existing or temporary bootstrap cluster is required. Currently, we support multiple methods to bootstrap Cluster API: `kind` (preferred), `minikube` or any pre-existing cluster. + - If you want to use container, install [kind](https://github.com/kubernetes-sigs/kind#installation-and-usage). This is preferred. + - If you want to use VM, install [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/), version 0.30.0 or greater. + - If you want to use existing Kubernetes cluster, prepare your kubeconfig. +2. If you are using `kind` or existing Kubernetes cluster, go to step 3. If you are using `minikube`, install a [driver](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md). For Linux, we recommend `kvm2`. For MacOS, we recommend VirtualBox. 2. Build the `clusterctl` tool ```bash @@ -40,20 +43,39 @@ https://github.com/kubernetes-sigs/cluster-api/issues/158 and https://github.com 1. Create a cluster: + - __Bootstrap Cluster__: Use `bootstrap-type`, currently only `kind` and `minikube` are supported. + ```shell - ./clusterctl create cluster --provider -c cluster.yaml -m machines.yaml -p provider-components.yaml -a addons.yaml + ./clusterctl create cluster --provider --bootstrap-type -c cluster.yaml \ + -m machines.yaml -p provider-components.yaml -a addons.yaml ``` -To choose a specific minikube driver, please use the `--vm-driver` command line parameter. For example to use the kvm2 driver with clusterctl you would add `--vm-driver kvm2` + If you are using minikube, to choose a specific minikube driver, please use the `--bootstrap-flags vm-driver=xxx` command line parameter. For example to use the kvm2 driver with clusterctl you woud add `--bootstrap-flags vm-driver=kvm2`. + + - __Existing Cluster__: Use `bootstrap-cluster-kubeconfig`. This flag is used when you have an existing Kubernetes cluster. + + ```shell + ./clusterctl create cluster --provider --bootstrap-cluster-kubeconfig \ + -c cluster.yaml -m machines.yaml -p provider-components.yaml -a addons.yaml + ``` Additional advanced flags can be found via help. +Also, some environment variables are supported: +`CLUSTER_API_MACHINE_READY_TIMEOUT`: set this value to adjust the timeout value in minutes for a machine to become ready, The default timeout is currently 30 minutes, `export CLUSTER_API_MACHINE_READY_TIMEOUT=45` will extend the timeout value to 45 minutes. + ```shell ./clusterctl create cluster --help ``` ### Interacting with your cluster +If you are using kind, set the `KUBECONFIG` environment variable first before using kubectl: + +``` +export KUBECONFIG="$(kind get kubeconfig-path --name="clusterapi")" +``` + Once you have created a cluster, you can interact with the cluster and machine resources using kubectl: @@ -63,6 +85,8 @@ $ kubectl --kubeconfig kubeconfig get machines $ kubectl --kubeconfig kubeconfig get machines -o yaml ``` +**NOTE:** There is no need to specify `--kubeconfig` if your `kubeconfig` was located in the default directory under `$HOME/.kube/config` or if you have already exposed env variable `KUBECONFIG`. + #### Scaling your cluster You can scale your cluster by adding additional individual Machines, or by adding a MachineSet or MachineDeployment diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go index cf2ea4d83..f37214df5 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go @@ -55,6 +55,10 @@ const ( machineClusterLabelName = "cluster.k8s.io/cluster-name" ) +const ( + TimeoutMachineReady = "CLUSTER_API_MACHINE_READY_TIMEOUT" +) + // Provides interaction with a cluster type Client interface { Apply(string) error @@ -919,7 +923,7 @@ func (c *client) waitForKubectlApply(manifest string) error { klog.V(2).Infof("Waiting for kubectl apply...") err := c.kubectlApply(manifest) if err != nil { - if strings.Contains(err.Error(), io.EOF.Error()) || strings.Contains(err.Error(), "refused") { + if strings.Contains(err.Error(), io.EOF.Error()) || strings.Contains(err.Error(), "refused") || strings.Contains(err.Error(), "no such host") { // Connection was refused, probably because the API server is not ready yet. klog.V(4).Infof("Waiting for kubectl apply... server not yet available: %v", err) return false, nil @@ -968,7 +972,17 @@ func waitForClusterResourceReady(cs clientset.Interface) error { } func waitForMachineReady(cs clientset.Interface, machine *clusterv1.Machine) error { - err := util.PollImmediate(retryIntervalResourceReady, timeoutMachineReady, func() (bool, error) { + timeout := timeoutMachineReady + if p := os.Getenv(TimeoutMachineReady); p != "" { + t, err := strconv.Atoi(p) + if err == nil { + // only valid value will be used + timeout = time.Duration(t) * time.Minute + klog.V(4).Info("Setting wait for machine timeout value to ", timeout) + } + } + + err := util.PollImmediate(retryIntervalResourceReady, timeout, func() (bool, error) { klog.V(2).Infof("Waiting for Machine %v to become ready...", machine.Name) m, err := cs.ClusterV1alpha1().Machines(machine.Namespace).Get(machine.Name, metav1.GetOptions{}) if err != nil { @@ -1019,17 +1033,16 @@ func GetClusterAPIObject(client Client, clusterName, namespace string) (*cluster return nil, nil, nil, errors.Wrapf(err, "unable to fetch cluster %s/%s", namespace, clusterName) } - controlPlane, nodes, err := ExtractControlPlaneMachine(machines) + controlPlane, nodes, err := ExtractControlPlaneMachines(machines) if err != nil { return nil, nil, nil, errors.Wrapf(err, "unable to fetch control plane machine in cluster %s/%s", namespace, clusterName) } - return cluster, controlPlane, nodes, nil + return cluster, controlPlane[0], nodes, nil } -// ExtractControlPlaneMachine separates the machines running the control plane (singular) from the incoming machines. +// ExtractControlPlaneMachines separates the machines running the control plane from the incoming machines. // This is currently done by looking at which machine specifies the control plane version. -// TODO: Cleanup. -func ExtractControlPlaneMachine(machines []*clusterv1.Machine) (*clusterv1.Machine, []*clusterv1.Machine, error) { +func ExtractControlPlaneMachines(machines []*clusterv1.Machine) ([]*clusterv1.Machine, []*clusterv1.Machine, error) { nodes := []*clusterv1.Machine{} controlPlaneMachines := []*clusterv1.Machine{} for _, machine := range machines { @@ -1039,8 +1052,8 @@ func ExtractControlPlaneMachine(machines []*clusterv1.Machine) (*clusterv1.Machi nodes = append(nodes, machine) } } - if len(controlPlaneMachines) != 1 { - return nil, nil, errors.Errorf("expected one control plane machine, got: %v", len(controlPlaneMachines)) + if len(controlPlaneMachines) < 1 { + return nil, nil, errors.Errorf("expected one or more control plane machines, got: %v", len(controlPlaneMachines)) } - return controlPlaneMachines[0], nodes, nil + return controlPlaneMachines, nodes, nil } diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go index 10cb8750e..5df4b8d8f 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go @@ -57,7 +57,7 @@ func New( // Create the cluster from the provided cluster definition and machine list. func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*clusterv1.Machine, provider provider.Deployer, kubeconfigOutput string, providerComponentsStoreFactory provider.ComponentsStoreFactory) error { - controlPlaneMachine, nodes, err := clusterclient.ExtractControlPlaneMachine(machines) + controlPlaneMachines, nodes, err := clusterclient.ExtractControlPlaneMachines(machines) if err != nil { return errors.Wrap(err, "unable to separate control plane machines from node machines") } @@ -89,12 +89,12 @@ func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*cluster cluster.Namespace = bootstrapClient.GetContextNamespace() } - klog.Infof("Creating control plane %v in namespace %q", controlPlaneMachine.Name, cluster.Namespace) - if err := phases.ApplyMachines(bootstrapClient, cluster.Namespace, []*clusterv1.Machine{controlPlaneMachine}); err != nil { + klog.Infof("Creating control plane %v in namespace %q", controlPlaneMachines[0].Name, cluster.Namespace) + if err := phases.ApplyMachines(bootstrapClient, cluster.Namespace, []*clusterv1.Machine{controlPlaneMachines[0]}); err != nil { return errors.Wrap(err, "unable to create control plane machine") } - klog.Infof("Updating bootstrap cluster object for cluster %v in namespace %q with control plane endpoint running on %s", cluster.Name, cluster.Namespace, controlPlaneMachine.Name) + klog.Infof("Updating bootstrap cluster object for cluster %v in namespace %q with control plane endpoint running on %s", cluster.Name, cluster.Namespace, controlPlaneMachines[0].Name) if err := d.updateClusterEndpoint(bootstrapClient, provider, cluster.Name, cluster.Namespace); err != nil { return errors.Wrap(err, "unable to update bootstrap cluster endpoint") } @@ -130,11 +130,22 @@ func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*cluster // For some reason, endpoint doesn't get updated in bootstrap cluster sometimes. So we // update the target cluster endpoint as well to be sure. - klog.Infof("Updating target cluster object with control plane endpoint running on %s", controlPlaneMachine.Name) + klog.Infof("Updating target cluster object with control plane endpoint running on %s", controlPlaneMachines[0].Name) if err := d.updateClusterEndpoint(targetClient, provider, cluster.Name, cluster.Namespace); err != nil { return errors.Wrap(err, "unable to update target cluster endpoint") } + if len(controlPlaneMachines) > 1 { + // TODO(h0tbird) Done serially until kubernetes/kubeadm#1097 is resolved and all + // supported versions of k8s we are deploying (using kubeadm) have the fix. + klog.Info("Creating additional control plane machines in target cluster.") + for _, controlPlaneMachine := range controlPlaneMachines[1:] { + if err := phases.ApplyMachines(targetClient, cluster.Namespace, []*clusterv1.Machine{controlPlaneMachine}); err != nil { + return errors.Wrap(err, "unable to create additional control plane machines") + } + } + } + klog.Info("Creating node machines in target cluster.") if err := phases.ApplyMachines(targetClient, cluster.Namespace, nodes); err != nil { return errors.Wrap(err, "unable to create node machines") diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/cmd/BUILD.bazel b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/cmd/BUILD.bazel index ba60291d0..ab8feff62 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/cmd/BUILD.bazel +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/cmd/BUILD.bazel @@ -43,6 +43,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/component-base/cli/flag:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client/config:go_default_library", diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/cmd/root.go b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/cmd/root.go index 8266a6030..b584efb16 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/cmd/root.go +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/cmd/root.go @@ -22,6 +22,7 @@ import ( "os" "github.com/spf13/cobra" + cliflag "k8s.io/component-base/cli/flag" "k8s.io/klog" ) @@ -54,6 +55,7 @@ func exitWithHelp(cmd *cobra.Command, err string) { func init() { klog.InitFlags(flag.CommandLine) flag.CommandLine.Set("logtostderr", "true") + RootCmd.SetGlobalNormalizationFunc(cliflag.WordSepNormalizeFunc) RootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) InitLogs() } diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/phases/createbootstrapcluster.go b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/phases/createbootstrapcluster.go index 766da316c..1ef06233b 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/phases/createbootstrapcluster.go +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/phases/createbootstrapcluster.go @@ -24,7 +24,7 @@ import ( ) func CreateBootstrapCluster(provisioner bootstrap.ClusterProvisioner, cleanupBootstrapCluster bool, clientFactory clusterclient.Factory) (clusterclient.Client, func(), error) { - klog.Info("Creating bootstrap cluster") + klog.Info("Preparing bootstrap cluster") cleanupFn := func() {} if err := provisioner.Create(); err != nil { diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args-invalid-flag.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args-invalid-flag.golden index d1dae5312..4e89d69d1 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args-invalid-flag.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args-invalid-flag.golden @@ -19,15 +19,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging unknown flag: --invalid-flag diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args.golden index 520a03db6..c3b2f2d26 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args.golden @@ -19,15 +19,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging required flag(s) "cluster", "machines", "provider", "provider-components" not set diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-no-args-invalid-flag.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-no-args-invalid-flag.golden index c13e2242f..b6ca97cd1 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-no-args-invalid-flag.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-no-args-invalid-flag.golden @@ -11,15 +11,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging Use "clusterctl create [command] --help" for more information about a command. diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-no-args.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-no-args.golden index 436766ad1..5369b32f3 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-no-args.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/create-no-args.golden @@ -12,15 +12,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging Use "clusterctl create [command] --help" for more information about a command. diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args-invalid-flag.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args-invalid-flag.golden index dc7a15fb3..4c2c40b13 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args-invalid-flag.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args-invalid-flag.golden @@ -16,15 +16,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging unknown flag: --invalid-flag diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args.golden index 3eaea4c89..82e0add99 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args.golden @@ -18,13 +18,15 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-no-args-invalid-flag.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-no-args-invalid-flag.golden index 2641ea0f9..54ed0e0eb 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-no-args-invalid-flag.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-no-args-invalid-flag.golden @@ -11,15 +11,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging Use "clusterctl delete [command] --help" for more information about a command. diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-no-args.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-no-args.golden index a0f13986c..86cc1d355 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-no-args.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/delete-no-args.golden @@ -12,15 +12,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging Use "clusterctl delete [command] --help" for more information about a command. diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/no-args-invalid-flag.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/no-args-invalid-flag.golden index d96b83c83..cbe4f8e4e 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/no-args-invalid-flag.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/no-args-invalid-flag.golden @@ -14,15 +14,17 @@ Flags: --alsologtostderr log to standard error as well as files -h, --help help for clusterctl --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging Use "clusterctl [command] --help" for more information about a command. diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/no-args.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/no-args.golden index 3394e22f2..005a7c359 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/no-args.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/no-args.golden @@ -15,15 +15,17 @@ Flags: --alsologtostderr log to standard error as well as files -h, --help help for clusterctl --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging Use "clusterctl [command] --help" for more information about a command. diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-cluster-no-args-invalid-flag.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-cluster-no-args-invalid-flag.golden index 75243978b..9ca44dee7 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-cluster-no-args-invalid-flag.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-cluster-no-args-invalid-flag.golden @@ -11,15 +11,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging unknown flag: --invalid-flag diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-no-args-invalid-flag.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-no-args-invalid-flag.golden index fe1fd2036..943573942 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-no-args-invalid-flag.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-no-args-invalid-flag.golden @@ -11,15 +11,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging Use "clusterctl validate [command] --help" for more information about a command. diff --git a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-no-args.golden b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-no-args.golden index ba2421051..e88396b03 100644 --- a/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-no-args.golden +++ b/vendor/github.com/openshift/cluster-api/cmd/clusterctl/testdata/validate-no-args.golden @@ -12,15 +12,17 @@ Flags: Global Flags: --alsologtostderr log to standard error as well as files --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_file string If non-empty, use this log file --logtostderr log to standard error instead of files (default true) --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. - --skip_headers If true, avoid header prefixes in the log messages + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when openning log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) - -v, --v Level log level for V logs + -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging Use "clusterctl validate [command] --help" for more information about a command. diff --git a/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machine.yaml b/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machine.yaml index d513c9c1a..ecc0f00fa 100644 --- a/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machine.yaml +++ b/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machine.yaml @@ -6,6 +6,20 @@ metadata: controller-tools.k8s.io: "1.0" name: machines.cluster.k8s.io spec: + additionalPrinterColumns: + - JSONPath: .spec.providerID + description: Provider ID + name: ProviderID + type: string + - JSONPath: .status.phase + description: Machine status such as Terminating/Pending/Running/Failed etc + name: Phase + type: string + - JSONPath: .status.nodeRef.name + description: Node name associated with this machine + name: NodeName + priority: 1 + type: string group: cluster.k8s.io names: kind: Machine @@ -47,16 +61,16 @@ spec: by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster - autoscaler with cluster-api as provider. Clean-up login in the autoscaler - compares machines v/s nodes to find out machines at provider which + autoscaler with cluster-api as provider. Clean-up logic in the autoscaler + compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. - Another list of nodes is queries from the k8s apiserver and then comparison - is done to find out unregistered machines and are marked for delete. - This field will be set by the actuators and consumed by higher level - entities like autoscaler who will be interfacing with cluster-api - as generic provider. + Another list of nodes is queried from the k8s apiserver and then a + comparison is done to find out unregistered machines and are marked + for delete. This field will be set by the actuators and consumed by + higher level entities like autoscaler that will be interfacing with + cluster-api as generic provider. type: string providerSpec: description: ProviderSpec details Provider-specific configuration to @@ -84,9 +98,12 @@ spec: type: object type: object taints: - description: Taints is the full, authoritative list of taints to apply - to the corresponding Node. This list will overwrite any modifications - made to the Node on an ongoing basis. + description: The list of the taints to be applied to the corresponding + Node in additive manner. This list will not overwrite any other taints + added to the Node on an ongoing basis by other entities. These taints + should be actively reconciled e.g. if you ask the machine controller + to apply a taint and then manually remove the taint the machine controller + will put it back) but not have the machine controller remove any taints items: type: object type: array diff --git a/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml b/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml index f0d5bf730..853225344 100644 --- a/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml +++ b/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml @@ -142,16 +142,16 @@ spec: ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as - provider. Clean-up login in the autoscaler compares machines - v/s nodes to find out machines at provider which could not + provider. Clean-up logic in the autoscaler compares machines + to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view - of the list of machines. Another list of nodes is queries - from the k8s apiserver and then comparison is done to find + of the list of machines. Another list of nodes is queried + from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher - level entities like autoscaler who will be interfacing with + level entities like autoscaler that will be interfacing with cluster-api as generic provider. type: string providerSpec: @@ -181,9 +181,13 @@ spec: type: object type: object taints: - description: Taints is the full, authoritative list of taints - to apply to the corresponding Node. This list will overwrite - any modifications made to the Node on an ongoing basis. + description: The list of the taints to be applied to the corresponding + Node in additive manner. This list will not overwrite any + other taints added to the Node on an ongoing basis by other + entities. These taints should be actively reconciled e.g. + if you ask the machine controller to apply a taint and then + manually remove the taint the machine controller will put + it back) but not have the machine controller remove any taints items: type: object type: array diff --git a/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machineset.yaml b/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machineset.yaml index dcc20f00d..73fda8c33 100644 --- a/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machineset.yaml +++ b/vendor/github.com/openshift/cluster-api/config/crds/cluster_v1alpha1_machineset.yaml @@ -34,6 +34,15 @@ spec: type: object spec: properties: + deletePolicy: + description: DeletePolicy defines the policy used to identify nodes + to delete when downscaling. Defaults to "Random". Valid values are + "Random, "Newest", "Oldest" + enum: + - Random + - Newest + - Oldest + type: string minReadySeconds: description: MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will @@ -82,16 +91,16 @@ spec: ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as - provider. Clean-up login in the autoscaler compares machines - v/s nodes to find out machines at provider which could not + provider. Clean-up logic in the autoscaler compares machines + to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view - of the list of machines. Another list of nodes is queries - from the k8s apiserver and then comparison is done to find + of the list of machines. Another list of nodes is queried + from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher - level entities like autoscaler who will be interfacing with + level entities like autoscaler that will be interfacing with cluster-api as generic provider. type: string providerSpec: @@ -121,9 +130,13 @@ spec: type: object type: object taints: - description: Taints is the full, authoritative list of taints - to apply to the corresponding Node. This list will overwrite - any modifications made to the Node on an ongoing basis. + description: The list of the taints to be applied to the corresponding + Node in additive manner. This list will not overwrite any + other taints added to the Node on an ongoing basis by other + entities. These taints should be actively reconciled e.g. + if you ask the machine controller to apply a taint and then + manually remove the taint the machine controller will put + it back) but not have the machine controller remove any taints items: type: object type: array diff --git a/vendor/github.com/openshift/cluster-api/config/rbac/rbac_role.yaml b/vendor/github.com/openshift/cluster-api/config/rbac/rbac_role.yaml index 19fa4a3fe..100571871 100644 --- a/vendor/github.com/openshift/cluster-api/config/rbac/rbac_role.yaml +++ b/vendor/github.com/openshift/cluster-api/config/rbac/rbac_role.yaml @@ -29,6 +29,7 @@ rules: - machine.openshift.io resources: - machines + - machines/status verbs: - get - list @@ -41,6 +42,7 @@ rules: - machine.openshift.io resources: - machinedeployments + - machinedeployments/status verbs: - get - list @@ -53,6 +55,7 @@ rules: - machine.openshift.io resources: - machinesets + - machinesets/status verbs: - get - list @@ -89,7 +92,6 @@ rules: - machine.openshift.io resources: - machines - - machines/status verbs: - get - list diff --git a/vendor/github.com/openshift/cluster-api/docs/book/SUMMARY.md b/vendor/github.com/openshift/cluster-api/docs/book/SUMMARY.md index dfdf2b3a2..30ada6bdf 100644 --- a/vendor/github.com/openshift/cluster-api/docs/book/SUMMARY.md +++ b/vendor/github.com/openshift/cluster-api/docs/book/SUMMARY.md @@ -14,6 +14,7 @@ * [Machine Controller](common_code/machine_controller.md) * [MachineSet Controller](common_code/machineset_controller.md) * [MachineDeployment Controller](common_code/machinedeployment_controller.md) +* [Node Controller](common_code/node_controller.md) ## Creating a New Provider diff --git a/vendor/github.com/openshift/cluster-api/docs/book/common_code/architecture.md b/vendor/github.com/openshift/cluster-api/docs/book/common_code/architecture.md index cc88459c1..658ad54d2 100644 --- a/vendor/github.com/openshift/cluster-api/docs/book/common_code/architecture.md +++ b/vendor/github.com/openshift/cluster-api/docs/book/common_code/architecture.md @@ -4,8 +4,8 @@ It may be useful to read at least the following chapters of the Kubebuilder book in order to better understand this section. -- [What is a Resource](https://github.com/kubernetes-sigs/kubebuilder/blob/master/docs/book/basics/what_is_a_resource.md) -- [What is a Controller](https://github.com/kubernetes-sigs/kubebuilder/blob/master/docs/book/basics/what_is_a_controller.md) +- [What is a Resource](https://book.kubebuilder.io/basics/what_is_a_resource.html) +- [What is a Controller](https://book.kubebuilder.io/basics/what_is_a_controller.html) {% endpanel %} {% panel style="warning", title="Architecture Diagram" %} diff --git a/vendor/github.com/openshift/cluster-api/docs/book/common_code/cluster_controller.md b/vendor/github.com/openshift/cluster-api/docs/book/common_code/cluster_controller.md index e24bfd82e..32a248f4d 100644 --- a/vendor/github.com/openshift/cluster-api/docs/book/common_code/cluster_controller.md +++ b/vendor/github.com/openshift/cluster-api/docs/book/common_code/cluster_controller.md @@ -87,13 +87,6 @@ If a `Cluster` resource is deleted, the controller will call the actuator's ## Cluster Controller Semantics -{% panel style="info", title="Logic sequence" %} -We need a diagram tracing the logic from resource creation through updates -and finally deletion. This was done using the sequences GitBook plugin. -Unfortunately there are (possibly personal) problems with phantomjs which -are making this difficult. -{% endpanel %} - 0. If the `Cluster` hasn't been deleted and doesn't have a finalizer, add one. - If the `Cluster` is being deleted, and there is no finalizer, we're done. - Call the provider specific `Delete()` method. @@ -101,3 +94,15 @@ are making this difficult. - If the `Cluster` has not been deleted, call the `Reconcile()` method. [cluster_source]: https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/cluster/v1alpha1/cluster_types.go + +#### cluster object reconciliation logic + +![cluster object reconciliation logic](images/activity_cluster_reconciliation.svg) + +#### cluster object creation sequence + +![Cluster object creation](images/sequence_cluster_creation.svg) + +#### cluster object deletion sequence + +![Cluster object deletion](images/sequence_cluster_deletion.svg) diff --git a/vendor/github.com/openshift/cluster-api/docs/book/common_code/machine_controller.md b/vendor/github.com/openshift/cluster-api/docs/book/common_code/machine_controller.md index 198b3baea..b4770eeb8 100644 --- a/vendor/github.com/openshift/cluster-api/docs/book/common_code/machine_controller.md +++ b/vendor/github.com/openshift/cluster-api/docs/book/common_code/machine_controller.md @@ -109,11 +109,6 @@ The definition of `Exists()` is determined by the provider. ## Machine Controller Semantics -{% panel style="info", title="Logic sequence" %} -We need a diagram tracing the logic from resource creation through updates -and finally deletion. -{% endpanel %} - 0. Determine the `Cluster` associated with the `Machine` from its `cluster.k8s.io/cluster-name` label. - If the `Machine` hasn't been deleted and doesn't have a finalizer, add one. - If the `Machine` is being deleted, and there is no finalizer, we're done @@ -142,6 +137,20 @@ There are two consequences of this: delete the `Machine`. Therefore `Machine`s must be deleted before `Cluster`s. {% endpanel %} +#### machine reconciliation logic +![machine reconciliation logic](images/activity_machine_controller_reconciliation.svg) + +#### machine deletion block +![machine deletion block](images/activity_machine_deletion_block.svg) + +#### machine object creation sequence + +![machine object creation](images/sequence_machine_creation.svg) + +#### machine object deletion sequence + +![machine object deletion](images/sequence_machine_deletion.svg) + --- [^1] One reason a `Machine` may not be deleted is if it corresponds to the diff --git a/vendor/github.com/openshift/cluster-api/docs/book/common_code/machinedeployment_controller.md b/vendor/github.com/openshift/cluster-api/docs/book/common_code/machinedeployment_controller.md index e8d868ce6..510ec5102 100644 --- a/vendor/github.com/openshift/cluster-api/docs/book/common_code/machinedeployment_controller.md +++ b/vendor/github.com/openshift/cluster-api/docs/book/common_code/machinedeployment_controller.md @@ -15,8 +15,28 @@ {% endmethod %} {% method %} +## MachineDeploymentStrategy + +{% sample lang="go" %} +[import:'MachineDeploymentStrategy'](../../../pkg/apis/cluster/v1alpha1/machinedeployment_types.go) +{% endmethod %} + +{% method %} +## MachineRollingUpdateDeployment + +{% sample lang="go" %} +[import:'MachineRollingUpdateDeployment'](../../../pkg/apis/cluster/v1alpha1/machinedeployment_types.go) +{% endmethod %} + +{% method %} + ## MachineDeploymentStatus {% sample lang="go" %} [import:'MachineDeploymentStatus'](../../../pkg/apis/cluster/v1alpha1/machinedeployment_types.go) {% endmethod %} + +## MachineDeployment Controller Semantics + +![machinedeployment object reconciliation logic](images/activity_machinedeployment_reconciliation.svg) + diff --git a/vendor/github.com/openshift/cluster-api/docs/book/common_code/machineset_controller.md b/vendor/github.com/openshift/cluster-api/docs/book/common_code/machineset_controller.md index c4ca6014d..37cbeea99 100644 --- a/vendor/github.com/openshift/cluster-api/docs/book/common_code/machineset_controller.md +++ b/vendor/github.com/openshift/cluster-api/docs/book/common_code/machineset_controller.md @@ -32,3 +32,24 @@ which implement their intent by modifying provider-specific `Cluster` and {% sample lang="go" %} [import:'MachineSetStatus'](../../../pkg/apis/cluster/v1alpha1/machineset_types.go) {% endmethod %} + +## MachineSet Controller Semantics + +![machineset object reconciliation logic](images/activity_machineset_reconciliation.svg) + +#### filter machine BLOCK + +This code block examines all machines in the namespace of the machineset and filters out machines that do NOT +have all the following conditions (in this order): + +1. The machine has a controller and is controlled by the machineset. +2. The machine is not scheduled for deletion. +3. The machine's label selector matches that of the machineset. + +For machines that fails condition 1, an attempt is made to adopt the machine into the machineset. The result +of this code block is a filtered list of machines that will be processed in the next code block. + +#### sync replica BLOCK + +This code block looks at the filtered machine list and determines whether to scale up or down the number of +machines to match the replica count defined in the machineset. diff --git a/vendor/github.com/openshift/cluster-api/docs/book/provider_implementations/register_controllers.md b/vendor/github.com/openshift/cluster-api/docs/book/provider_implementations/register_controllers.md index b72032423..aba469542 100644 --- a/vendor/github.com/openshift/cluster-api/docs/book/provider_implementations/register_controllers.md +++ b/vendor/github.com/openshift/cluster-api/docs/book/provider_implementations/register_controllers.md @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" ) -//+kubebuilder:rbac:groups=solas.k8s.io,resources=solasclusterproviderspecs;solasclusterproviderstatuses,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=solas.cluster.k8s.io,resources=solasclusterproviderspecs;solasclusterproviderstatuses,verbs=get;list;watch;create;update;patch;delete func init() { // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. AddToManagerFuncs = append(AddToManagerFuncs, func(m manager.Manager) error { @@ -68,7 +68,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" ) -//+kubebuilder:rbac:groups=solas.k8s.io,resources=solasmachineproviderspecs;solasmachineproviderstatuses,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=solas.cluster.k8s.io,resources=solasmachineproviderspecs;solasmachineproviderstatuses,verbs=get;list;watch;create;update;patch;delete func init() { // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. AddToManagerFuncs = append(AddToManagerFuncs, func(m manager.Manager) error { diff --git a/vendor/github.com/openshift/cluster-api/docs/book/provider_implementations/register_schemes.md b/vendor/github.com/openshift/cluster-api/docs/book/provider_implementations/register_schemes.md index 08d42988e..50b532645 100644 --- a/vendor/github.com/openshift/cluster-api/docs/book/provider_implementations/register_schemes.md +++ b/vendor/github.com/openshift/cluster-api/docs/book/provider_implementations/register_schemes.md @@ -28,6 +28,7 @@ import ( "fmt" "os" + "k8s.io/klog" "sigs.k8s.io/cluster-api-provider-solas/pkg/apis" "sigs.k8s.io/cluster-api-provider-solas/pkg/cloud/solas/actuators/cluster" "sigs.k8s.io/cluster-api-provider-solas/pkg/cloud/solas/actuators/machine" @@ -43,6 +44,8 @@ import ( ) func main() { + klog.InitFlags(nil) + cfg := config.GetConfigOrDie() if cfg == nil { panic(fmt.Errorf("GetConfigOrDie didn't die")) diff --git a/vendor/github.com/openshift/cluster-api/docs/developer/releasing.md b/vendor/github.com/openshift/cluster-api/docs/developer/releasing.md index 979d82f76..d6c2bee64 100644 --- a/vendor/github.com/openshift/cluster-api/docs/developer/releasing.md +++ b/vendor/github.com/openshift/cluster-api/docs/developer/releasing.md @@ -16,18 +16,25 @@ ## Process -1. Create a pull request that contains two changes: +For version 0.x.y: + +1. We will target a branch called `release-0.x`. If this is `0.x.0` then we'll + create a branch from master using `git push origin master:release-0.x`, otherwise + simply checkout the existing branch `git checkout release-0.x` +2. Make two changes: 1. Change [the cluster api controller manager image tag][managerimg] from `:latest` to whatever version is being released - 2. Change the `CONTROLLER_IMAGE` variable in the [Makefile][makefile] to the + 2. Change the `CONTROLLER_IMG` variable in the [Makefile][makefile] to the version being released -2. Get the pull request merged -3. From the commit in step 1 (that is now in the master branch), build and push - the container image with `make docker-push` -4. Create a tag from this same commit and push the tag to the github repository -5. Revert the commit made in step 1 -6. Open a pull request with the revert change -7. Get that pull request merged + (Note that we do not release the example-provider image, so we don't tag that) +3. Commit it using `git commit -m "Release 0.x.y"` +4. Submit a PR to the `release-0.x` branch, e.g. `git push $USER; hub pull-request -b release-0.x` +5. Get the pull request merged +6. Switch to the release branch and update to pick up the commit. (e.g. `git + checkout release 0.x && git pull`). From there build and push the container + images and fat manifest with `make all-push` (on the 0.1 release branch, we + do `make docker-push`) +7. Create a tag from this same commit `git tag 0.x.y` and push the tag to the github repository `git push origin 0.x.y` 8. Create a release in github based on the tag created above 9. Manually create the release notes by going through the merged PRs since the last release diff --git a/vendor/github.com/openshift/cluster-api/docs/proposals/machine-api-proposal.md b/vendor/github.com/openshift/cluster-api/docs/proposals/machine-api-proposal.md deleted file mode 100644 index 971973a87..000000000 --- a/vendor/github.com/openshift/cluster-api/docs/proposals/machine-api-proposal.md +++ /dev/null @@ -1,132 +0,0 @@ -Minimalistic Machines API -========================= - -This proposal is for a minimalistic start to a new Machines API, as part of the -overall Cluster API project. It is intended to live outside of core Kubernetes -and add optional machine management features to Kubernetes clusters. - -## Capabilities - -This API strives to be able to add these capabilities: - -1. A new Node can be created in a declarative way, including Kubernetes version. - It should also be able to specify provider-specific information such as OS image, - instance type, disk configuration, etc., though this will not be portable. - -1. A specific Node can be deleted, freeing external resources associated with - it. - -1. A specific Node can have its kubelet version upgraded or downgraded in a - declarative way\*. - -1. A specific Node can have its OS image upgraded or downgraded in a declarative - way\*. - -\* It is an implementation detail of the provider if these operations are -performed in-place or via Node replacement. - -## Proposal - -This proposal introduces a new API type: Machine. See the full definition in -[types.go](types.go). - -A "Machine" is the declarative spec for a Node, as represented in Kuberenetes -core. If a new Machine object is created, a provider-specific controller will -handle provisioning and installing a new host to register as a new Node matching -the Machine spec. If the Machine's spec is updated, a provider-specific -controller is responsible for updating the Node in-place or replacing the host -with a new one matching the updated spec. If a Machine object is deleted, the -corresponding Node should have its external resources released by the -provider-specific controller, and should be deleted as well. - -Fields like the kubelet version are modeled as fields on the Machine's spec. -Any other information that is provider-specific, though, is part of an opaque -ProviderSpec string that is not portable between different providers. - -The ProviderSpec is recommended to be a serialized API object in a format -owned by that provider, akin to the [Component Config](https://goo.gl/opSc2o) -pattern. This will allow the configuration to be strongly typed, versioned, and -have as much nested depth as appropriate. These provider-specific API -definitions are meant to live outside of the Machines API, which will allow them -to evolve independently of it. Attributes like instance type, which network to -use, and the OS image all belong in the ProviderSpec. - -## In-place vs. Replace - -One simplification that might be controversial in this proposal is the lack of -API control over "in-place" versus "replace" reconciliation strategies. For -instance, if a Machine's spec is updated with a different version of kubelet -than is actually running, it is up to the provider-specific controller whether -the request would best be fulfilled by performing an in-place upgrade on the -Node, or by deleting the Node and creating a new one in its place (or reporting -an error if this particular update is not supported). One can force a Node -replacement by deleting and recreating the Machine object rather than updating -it, but no similar mechanism exists to force an in-place change. - -Another approach considered was that modifying an existing Machine should only -ever attempt an in-place modification to the Node, and Node replacement should -only occur by deleting and creating a new Machine. In that case, a provider -would set an error field in the status if it wasn't able to fulfill the -requested in-place change (such as changing the OS image or instance type in a -cloud provider). - -The reason this approach wasn't used was because most cluster upgrade tools -built on top of the Machines API would follow the same pattern: - - for machine in machines: - attempt to upgrade machine in-place - if error: - create new machine - delete old machine - -Since updating a Node in-place is likely going to be faster than completely -replacing it, most tools would opt to use this pattern to attempt an in-place -modification first, before falling back to a full replacement. - -It seems like a much more powerful concept to allow every tool to instead say: - - for machine in machines: - update machine - -and allow the provider to decide if it is capable of performing an in-place -update, or if a full Node replacement is necessary. - -## Omitted Capabilities - -### A provider-agnostic mechanism to request new nodes - -In this proposal, only certain attributes of Machines are provider-agnostic and -can be operated on in a generic way. In other iterations of similar proposals, -much care had been taken to allow the creation of truly provider-agnostic -Machines that could be mapped to provider-specific attributes in order to better -support usecases around automated Machine scaling. This introduced a lot of -upfront complexity in the API proposals. - -This proposal starts much more minimalistic, but doesn't preclude the option of -extending the API to support these advanced concepts in the future (see -https://github.com/kubernetes-sigs/cluster-api/issues/22). - -### Dynamic API endpoint - -This proposal lacks the ability to declaratively update the kube-apiserver -endpoint for the kubelet to register with. This feature could be added later, -but doesn't seem to have demand now. Rather than modeling the kube-apiserver -endpoint in the Machine object, it is expected that the cluster installation -tool resolves the correct endpoint to use, starts a provider-specific Machines -controller configured with this endpoint, and that the controller injects the -endpoint into any hosts it provisions. - -## Conditions - -Brian Grant (@bgrant0607) and Eric Tune (@erictune) have indicated that the API pattern of having -"Conditions" lists in object statuses is soon to be deprecated. These have -generally been used as a timeline of state transitions for the object's -reconcilation, and difficult to consume for clients that just want a meaningful -representation of the object's current state. There are no existing examples of -the new pattern to follow instead, just the guidance that we should use -top-level fields in the status to reprensent meaningful information. We can -revisit the specifics when new patterns start to emerge in core. - -## Types - -Please see the full types [here](https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/cluster/v1alpha1/machine_types.go). diff --git a/vendor/github.com/openshift/cluster-api/hack/update-bazel.sh b/vendor/github.com/openshift/cluster-api/hack/update-bazel.sh index b32b85096..a00bd54e3 100755 --- a/vendor/github.com/openshift/cluster-api/hack/update-bazel.sh +++ b/vendor/github.com/openshift/cluster-api/hack/update-bazel.sh @@ -17,8 +17,9 @@ set -o errexit set -o nounset set -o pipefail -export KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +export KUBE_ROOT -cd $KUBE_ROOT -find $KUBE_ROOT/vendor -name 'BUILD' -delete +cd "${KUBE_ROOT}" +find "${KUBE_ROOT}/vendor" -name 'BUILD' -delete bazel run //:gazelle diff --git a/vendor/github.com/openshift/cluster-api/hack/verify_clientset.sh b/vendor/github.com/openshift/cluster-api/hack/verify_clientset.sh index 939c87348..8554bbac0 100755 --- a/vendor/github.com/openshift/cluster-api/hack/verify_clientset.sh +++ b/vendor/github.com/openshift/cluster-api/hack/verify_clientset.sh @@ -18,11 +18,12 @@ set -o errexit set -o nounset set -o pipefail -SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. DIFFROOT="${SCRIPT_ROOT}/pkg/client" TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" _tmp="${SCRIPT_ROOT}/_tmp" GOPATH=$(go env GOPATH) +export GOPATH cleanup() { rm -rf "${_tmp}" diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go index af6c17e0d..f4d5a18a1 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go @@ -38,6 +38,9 @@ const ( // Machine is the Schema for the machines API // +k8s:openapi-gen=true // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Machine status such as Terminating/Pending/Running/Failed etc" +// +kubebuilder:printcolumn:name="NodeName",type="string",JSONPath=".status.nodeRef.name",description="Node name associated with this machine",priority=1 type Machine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -57,9 +60,12 @@ type MachineSpec struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // Taints is the full, authoritative list of taints to apply to the corresponding - // Node. This list will overwrite any modifications made to the Node on - // an ongoing basis. + // The list of the taints to be applied to the corresponding Node in additive + // manner. This list will not overwrite any other taints added to the Node on + // an ongoing basis by other entities. These taints should be actively reconciled + // e.g. if you ask the machine controller to apply a taint and then manually remove + // the taint the machine controller will put it back) but not have the machine controller + // remove any taints // +optional Taints []corev1.Taint `json:"taints,omitempty"` @@ -87,12 +93,12 @@ type MachineSpec struct { // ProviderID is the identification ID of the machine provided by the provider. // This field must match the provider ID as seen on the node object corresponding to this machine. // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler - // with cluster-api as provider. Clean-up login in the autoscaler compares machines v/s nodes to find out + // with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out // machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a // generic out-of-tree provider for autoscaler, this field is required by autoscaler to be - // able to have a provider view of the list of machines. Another list of nodes is queries from the k8s apiserver - // and then comparison is done to find out unregistered machines and are marked for delete. - // This field will be set by the actuators and consumed by higher level entities like autoscaler who will + // able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + // and then a comparison is done to find out unregistered machines and are marked for delete. + // This field will be set by the actuators and consumed by higher level entities like autoscaler that will // be interfacing with cluster-api as generic provider. // +optional ProviderID *string `json:"providerID,omitempty"` diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go index 56aa44b26..0e7ec62af 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go @@ -59,6 +59,11 @@ type MachineSetSpec struct { // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + // DeletePolicy defines the policy used to identify nodes to delete when downscaling. + // Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + // +kubebuilder:validation:Enum=Random,Newest,Oldest + DeletePolicy string `json:"deletePolicy,omitempty"` + // Selector is a label query over machines that should match the replica count. // Label keys and values that must match in order to be controlled by this MachineSet. // It must match the machine template's labels. @@ -71,6 +76,30 @@ type MachineSetSpec struct { Template MachineTemplateSpec `json:"template,omitempty"` } +// MachineSetDeletePolicy defines how priority is assigned to nodes to delete when +// downscaling a MachineSet. Defaults to "Random". +type MachineSetDeletePolicy string + +const ( + // RandomMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // Finally, it picks Machines at random to delete. + RandomMachineSetDeletePolicy MachineSetDeletePolicy = "Random" + + // NewestMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // It then prioritizes the newest Machines for deletion based on the Machine's CreationTimestamp. + NewestMachineSetDeletePolicy MachineSetDeletePolicy = "Newest" + + // OldestMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // It then prioritizes the oldest Machines for deletion based on the Machine's CreationTimestamp. + OldestMachineSetDeletePolicy MachineSetDeletePolicy = "Oldest" +) + /// [MachineSetSpec] // doxygen marker /// [MachineTemplateSpec] // doxygen marker @@ -171,6 +200,12 @@ func (m *MachineSet) Default() { if len(m.Namespace) == 0 { m.Namespace = metav1.NamespaceDefault } + + if m.Spec.DeletePolicy == "" { + randomPolicy := string(RandomMachineSetDeletePolicy) + log.Printf("Defaulting to %s\n", randomPolicy) + m.Spec.DeletePolicy = randomPolicy + } } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/status.go b/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/status.go index 51e3758fe..88ad65181 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/status.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/status.go @@ -112,7 +112,7 @@ func updateMachineSetStatus(c client.Client, ms *v1beta1.MachineSet, newStatus v break } // Update the MachineSet with the latest resource version for the next poll - if getErr = c.Get(context.Background(), client.ObjectKey{Name: ms.Name}, ms); getErr != nil { + if getErr = c.Get(context.Background(), client.ObjectKey{Namespace: ms.Namespace, Name: ms.Name}, ms); getErr != nil { // If the GET fails we can't trust status.Replicas anymore. This error // is bound to be more interesting than the update failure. return nil, getErr diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/noderefutil/util.go b/vendor/github.com/openshift/cluster-api/pkg/controller/noderefutil/util.go index a5bf0b842..d7dd79b8d 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/noderefutil/util.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/noderefutil/util.go @@ -60,7 +60,7 @@ func GetReadyCondition(status *corev1.NodeStatus) *corev1.NodeCondition { // IsNodeReady returns true if a node is ready; false otherwise. func IsNodeReady(node *corev1.Node) bool { - if node == nil || &node.Status == nil { + if node == nil { return false } for _, c := range node.Status.Conditions { diff --git a/vendor/github.com/openshift/cluster-api/pkg/provider/example/container/Dockerfile b/vendor/github.com/openshift/cluster-api/pkg/provider/example/container/Dockerfile index 3306494e1..3b3f64a9a 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/provider/example/container/Dockerfile +++ b/vendor/github.com/openshift/cluster-api/pkg/provider/example/container/Dockerfile @@ -15,6 +15,8 @@ # Build the manager binary FROM golang:1.11.5 as builder +ARG ARCH + # Copy in the go src WORKDIR $GOPATH/src/sigs.k8s.io/cluster-api COPY pkg/ pkg/ @@ -22,7 +24,7 @@ COPY vendor/ vendor/ COPY cmd/ cmd/ # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -ldflags '-extldflags "-static"' -o ./cmd/example-provider/manager sigs.k8s.io/cluster-api/cmd/example-provider +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} go build -a -ldflags '-extldflags "-static"' -o ./cmd/example-provider/manager sigs.k8s.io/cluster-api/cmd/example-provider # Copy the controller-manager into a thin image FROM gcr.io/distroless/static:latest diff --git a/vendor/github.com/openshift/cluster-api/pkg/util/util.go b/vendor/github.com/openshift/cluster-api/pkg/util/util.go index a0fcf6df5..368f071a7 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/util/util.go +++ b/vendor/github.com/openshift/cluster-api/pkg/util/util.go @@ -85,16 +85,6 @@ func GetControlPlaneMachines(machines []*clusterv1.Machine) (res []*clusterv1.Ma return } -// MachineP converts a slice of machines into a slice of machine pointers. -func MachineP(machines []clusterv1.Machine) []*clusterv1.Machine { - // Convert to list of pointers - ret := make([]*clusterv1.Machine, 0, len(machines)) - for _, machine := range machines { - ret = append(ret, machine.DeepCopy()) - } - return ret -} - // Home returns the user home directory. func Home() string { home := os.Getenv("HOME") @@ -247,8 +237,7 @@ func ParseMachinesYaml(file string) ([]*clusterv1.Machine, error) { var ( bytes [][]byte machineList clusterv1.MachineList - machine clusterv1.Machine - machines = []clusterv1.Machine{} + machines = []*clusterv1.Machine{} ) // TODO: use the universal decoder instead of doing this. @@ -264,7 +253,8 @@ func ParseMachinesYaml(file string) ([]*clusterv1.Machine, error) { if err := json.Unmarshal(ml, &machineList); err != nil { return nil, err } - for _, machine := range machineList.Items { + for i := range machineList.Items { + machine := &machineList.Items[i] if machine.APIVersion == "" || machine.Kind == "" { return nil, errors.New(MachineListFormatDeprecationMessage) } @@ -282,13 +272,14 @@ func ParseMachinesYaml(file string) ([]*clusterv1.Machine, error) { } for _, m := range bytes { - if err := json.Unmarshal(m, &machine); err != nil { + machine := &clusterv1.Machine{} + if err := json.Unmarshal(m, machine); err != nil { return nil, err } machines = append(machines, machine) } - return MachineP(machines), nil + return machines, nil } // isMissingKind reimplements runtime.IsMissingKind as the YAMLOrJSONDecoder diff --git a/vendor/github.com/openshift/cluster-api/scripts/ci-build.sh b/vendor/github.com/openshift/cluster-api/scripts/ci-build.sh index 132ec8260..fbd9fca08 100755 --- a/vendor/github.com/openshift/cluster-api/scripts/ci-build.sh +++ b/vendor/github.com/openshift/cluster-api/scripts/ci-build.sh @@ -18,6 +18,6 @@ set -o errexit set -o nounset set -o pipefail -REPO_ROOT=$(dirname "${BASH_SOURCE}")/.. +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -cd $REPO_ROOT && make manager clusterctl +cd "$REPO_ROOT" && make manager clusterctl diff --git a/vendor/github.com/openshift/cluster-api/scripts/ci-integration.sh b/vendor/github.com/openshift/cluster-api/scripts/ci-integration.sh index 5d664c336..e569a9c78 100755 --- a/vendor/github.com/openshift/cluster-api/scripts/ci-integration.sh +++ b/vendor/github.com/openshift/cluster-api/scripts/ci-integration.sh @@ -21,31 +21,32 @@ set -o pipefail MAKE="make" KUSTOMIZE="kustomize" KUBECTL="kubectl" +KUBECTL_VERSION="v1.13.2" CRD_YAML="crd.yaml" BOOTSTRAP_CLUSTER_NAME="clusterapi-bootstrap" CONTROLLER_REPO="controller-ci" # use arbitrary repo name since we don't need to publish it EXAMPLE_PROVIDER_REPO="example-provider-ci" INTEGRATION_TEST_DIR="./test/integration" +ARCH=${ARCH:=amd64} + install_kustomize() { go get sigs.k8s.io/kustomize } install_kubectl() { - wget https://storage.googleapis.com/kubernetes-release/release/v1.10.2/bin/linux/amd64/kubectl \ + wget https://storage.googleapis.com/kubernetes-release/release/"${KUBECTL_VERSION}"/bin/linux/amd64/kubectl \ --no-verbose -O /usr/local/bin/kubectl chmod +x /usr/local/bin/kubectl } build_containers() { - VERSION=$(git describe --exact-match 2> /dev/null || git describe --match=$(git rev-parse --short=8 HEAD) --always --dirty --abbrev=8) - CONTROLLER_IMG="${CONTROLLER_REPO}:${VERSION}" - EXAMPLE_PROVIDER_IMG="${EXAMPLE_PROVIDER_REPO}:${VERSION}" - export CONTROLLER_IMG="${CONTROLLER_IMG}" - export EXAMPLE_PROVIDER_IMG="${EXAMPLE_PROVIDER_IMG}" - - "${MAKE}" docker-build - "${MAKE}" docker-build-ci + VERSION="$(git describe --exact-match 2> /dev/null || git describe --match="$(git rev-parse --short=8 HEAD)" --always --dirty --abbrev=8)" + export CONTROLLER_IMG="${CONTROLLER_REPO}" + export EXAMPLE_PROVIDER_IMG="${EXAMPLE_PROVIDER_REPO}" + + "${MAKE}" docker-build TAG=${VERSION} ARCH=${ARCH} + "${MAKE}" docker-build-ci TAG=${VERSION} ARCH=${ARCH} } prepare_crd_yaml() { @@ -58,10 +59,11 @@ prepare_crd_yaml() { create_bootstrap() { go get sigs.k8s.io/kind kind create cluster --name "${BOOTSTRAP_CLUSTER_NAME}" - export KUBECONFIG="$(kind get kubeconfig-path --name="${BOOTSTRAP_CLUSTER_NAME}")" + KUBECONFIG="$(kind get kubeconfig-path --name="${BOOTSTRAP_CLUSTER_NAME}")" + export KUBECONFIG - kind load docker-image "${CONTROLLER_IMG}" --name "${BOOTSTRAP_CLUSTER_NAME}" - kind load docker-image "${EXAMPLE_PROVIDER_IMG}" --name "${BOOTSTRAP_CLUSTER_NAME}" + kind load docker-image "${CONTROLLER_IMG}-${ARCH}:${VERSION}" --name "${BOOTSTRAP_CLUSTER_NAME}" + kind load docker-image "${EXAMPLE_PROVIDER_IMG}-${ARCH}:${VERSION}" --name "${BOOTSTRAP_CLUSTER_NAME}" } delete_bootstrap() { diff --git a/vendor/github.com/openshift/cluster-api/scripts/ci-is-vendor-in-sync.sh b/vendor/github.com/openshift/cluster-api/scripts/ci-is-vendor-in-sync.sh index 71c11a718..d4d04d935 100755 --- a/vendor/github.com/openshift/cluster-api/scripts/ci-is-vendor-in-sync.sh +++ b/vendor/github.com/openshift/cluster-api/scripts/ci-is-vendor-in-sync.sh @@ -18,8 +18,8 @@ set -o errexit set -o nounset set -o pipefail -REPO_ROOT=$(dirname "${BASH_SOURCE}")/.. +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -cd $REPO_ROOT +cd "$REPO_ROOT" find vendor -name 'BUILD.bazel' -delete dep check diff --git a/vendor/github.com/openshift/cluster-api/scripts/ci-make.sh b/vendor/github.com/openshift/cluster-api/scripts/ci-make.sh index 7c55a20df..542f741cb 100755 --- a/vendor/github.com/openshift/cluster-api/scripts/ci-make.sh +++ b/vendor/github.com/openshift/cluster-api/scripts/ci-make.sh @@ -18,6 +18,6 @@ set -o errexit set -o nounset set -o pipefail -REPO_ROOT=$(dirname "${BASH_SOURCE}")/.. +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -cd $REPO_ROOT && make lint-full docker-build +cd "$REPO_ROOT" && make lint-full docker-build diff --git a/vendor/github.com/openshift/cluster-api/scripts/ci-test.sh b/vendor/github.com/openshift/cluster-api/scripts/ci-test.sh index 909cf6012..a697bc3b8 100755 --- a/vendor/github.com/openshift/cluster-api/scripts/ci-test.sh +++ b/vendor/github.com/openshift/cluster-api/scripts/ci-test.sh @@ -18,9 +18,9 @@ set -o errexit set -o nounset set -o pipefail -REPO_ROOT=$(dirname "${BASH_SOURCE}")/.. +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -cd $REPO_ROOT && \ +cd "$REPO_ROOT" && \ source ./scripts/fetch_ext_bins.sh && \ fetch_tools && \ setup_envs && \ diff --git a/vendor/github.com/openshift/cluster-api/scripts/fetch_ext_bins.sh b/vendor/github.com/openshift/cluster-api/scripts/fetch_ext_bins.sh index 7b0f29173..fcb4aa7e7 100755 --- a/vendor/github.com/openshift/cluster-api/scripts/fetch_ext_bins.sh +++ b/vendor/github.com/openshift/cluster-api/scripts/fetch_ext_bins.sh @@ -59,11 +59,9 @@ function header_text { echo "$header$*$reset" } -rc=0 tmp_root=/tmp kb_root_dir=$tmp_root/kubebuilder -kb_orig=$(pwd) # Skip fetching and untaring the tools by setting the SKIP_FETCH_TOOLS variable # in your environment to any value: