From 9aab650206829392e9ad00591743a86c3b0c77fd Mon Sep 17 00:00:00 2001 From: I308301 Date: Wed, 25 Mar 2020 23:00:16 +0530 Subject: [PATCH] Added predicates to the controller manager with option reconcile only if annotation present. Apply suggestions from code review Co-Authored-By: Amshuman K R --- api/v1alpha1/etcd_types.go | 3 + api/v1alpha1/zz_generated.deepcopy.go | 5 + charts/etcd/templates/etcd-statefulset.yaml | 3 - .../crd/bases/druid.gardener.cloud_etcds.yaml | 5 + controllers/etcd_controller.go | 187 +- controllers/etcd_controller_test.go | 10 +- go.mod | 4 +- go.sum | 5 + main.go | 14 +- pkg/predicate/predicate.go | 188 + .../gardener/gardener/pkg/apis/core/doc.go | 19 + .../gardener/pkg/apis/core/field_constants.go | 27 + .../gardener/pkg/apis/core/register.go | 74 + .../pkg/apis/core/types_backupbucket.go | 82 + .../pkg/apis/core/types_backupentry.go | 68 + .../pkg/apis/core/types_cloudprofile.go | 152 + .../gardener/pkg/apis/core/types_common.go | 105 + .../apis/core/types_controllerinstallation.go | 72 + .../apis/core/types_controllerregistration.go | 73 + .../gardener/pkg/apis/core/types_plant.go | 104 + .../gardener/pkg/apis/core/types_project.go | 118 + .../gardener/pkg/apis/core/types_quota.go | 67 + .../pkg/apis/core/types_secretbinding.go | 44 + .../gardener/pkg/apis/core/types_seed.go | 185 + .../gardener/pkg/apis/core/types_shoot.go | 758 ++++ .../pkg/apis/core/types_shootstate.go | 71 + .../gardener/pkg/apis/core/types_utils.go | 67 + .../core/v1beta1/constants/types_constants.go | 266 ++ .../pkg/apis/core/v1beta1/constants/utils.go | 36 + .../pkg/apis/core/v1beta1/conversions.go | 163 + .../pkg/apis/core/v1beta1/defaults.go | 206 + .../gardener/pkg/apis/core/v1beta1/doc.go | 24 + .../core/v1beta1/helper/condition_builder.go | 140 + .../pkg/apis/core/v1beta1/helper/errors.go | 149 + .../pkg/apis/core/v1beta1/helper/helper.go | 776 ++++ .../pkg/apis/core/v1beta1/register.go | 75 + .../apis/core/v1beta1/types_backupbucket.go | 90 + .../apis/core/v1beta1/types_backupentry.go | 75 + .../apis/core/v1beta1/types_cloudprofile.go | 181 + .../pkg/apis/core/v1beta1/types_common.go | 179 + .../v1beta1/types_controllerinstallation.go | 76 + .../v1beta1/types_controllerregistration.go | 78 + .../pkg/apis/core/v1beta1/types_plant.go | 112 + .../pkg/apis/core/v1beta1/types_project.go | 139 + .../pkg/apis/core/v1beta1/types_quota.go | 56 + .../apis/core/v1beta1/types_secretbinding.go | 47 + .../pkg/apis/core/v1beta1/types_seed.go | 208 + .../pkg/apis/core/v1beta1/types_shoot.go | 906 ++++ .../pkg/apis/core/v1beta1/types_utils.go | 80 + .../core/v1beta1/zz_generated.conversion.go | 3974 +++++++++++++++++ .../core/v1beta1/zz_generated.deepcopy.go | 3180 +++++++++++++ .../core/v1beta1/zz_generated.defaults.go | 103 + .../pkg/apis/core/zz_generated.deepcopy.go | 3320 ++++++++++++++ .../gardener/pkg/apis/extensions/register.go | 19 + .../pkg/apis/extensions/v1alpha1/doc.go | 20 + .../pkg/apis/extensions/v1alpha1/register.go | 67 + .../pkg/apis/extensions/v1alpha1/types.go | 87 + .../extensions/v1alpha1/types_backupbucket.go | 81 + .../extensions/v1alpha1/types_backupentry.go | 84 + .../apis/extensions/v1alpha1/types_cluster.go | 58 + .../extensions/v1alpha1/types_controlplane.go | 97 + .../extensions/v1alpha1/types_defaults.go | 106 + .../extensions/v1alpha1/types_extension.go | 70 + .../v1alpha1/types_infrastructure.go | 84 + .../apis/extensions/v1alpha1/types_network.go | 80 + .../v1alpha1/types_operatingsystemconfig.go | 204 + .../apis/extensions/v1alpha1/types_worker.go | 177 + .../v1alpha1/zz_generated.deepcopy.go | 1305 ++++++ .../gardener/gardener/pkg/logger/logger.go | 84 + .../gardener/gardener/pkg/utils/encoding.go | 166 + .../gardener/pkg/utils/errors/errors.go | 262 ++ .../gardener/pkg/utils/errors/multierror.go | 42 + .../pkg/utils/kubernetes/health/health.go | 352 ++ .../pkg/utils/kubernetes/health/pod_health.go | 53 + .../gardener/pkg/utils/miscellaneous.go | 124 + .../gardener/gardener/pkg/utils/random.go | 43 + .../gardener/pkg/utils/template_engine.go | 100 + .../gardener/gardener/pkg/utils/timewindow.go | 237 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 ++ vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 + vendor/github.com/hashicorp/errwrap/go.mod | 1 + .../hashicorp/go-multierror/.travis.yml | 12 + .../hashicorp/go-multierror/LICENSE | 353 ++ .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 97 + .../hashicorp/go-multierror/append.go | 41 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../github.com/hashicorp/go-multierror/go.mod | 3 + .../github.com/hashicorp/go-multierror/go.sum | 4 + .../hashicorp/go-multierror/multierror.go | 51 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + vendor/github.com/onsi/ginkgo/CHANGELOG.md | 26 + .../github.com/onsi/ginkgo/config/config.go | 10 +- vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 4 +- .../internal/codelocation/code_location.go | 20 +- .../ginkgo/internal/leafnodes/benchmarker.go | 2 +- .../onsi/ginkgo/internal/remote/aggregator.go | 12 +- .../onsi/ginkgo/internal/remote/server.go | 2 +- .../onsi/ginkgo/internal/spec/spec.go | 10 +- .../onsi/ginkgo/internal/spec/specs.go | 4 +- .../ginkgo/internal/specrunner/spec_runner.go | 2 +- .../onsi/ginkgo/reporters/default_reporter.go | 3 + .../onsi/ginkgo/reporters/junit_reporter.go | 25 +- .../ginkgo/reporters/teamcity_reporter.go | 9 +- vendor/github.com/onsi/ginkgo/types/types.go | 2 +- vendor/github.com/onsi/gomega/.travis.yml | 1 + vendor/github.com/onsi/gomega/CHANGELOG.md | 32 + .../github.com/onsi/gomega/format/format.go | 37 +- .../onsi/gomega/gbytes/say_matcher.go | 4 +- vendor/github.com/onsi/gomega/gexec/build.go | 4 +- .../onsi/gomega/gexec/exit_matcher.go | 2 + .../onsi/gomega/gexec/prefixed_writer.go | 4 +- .../github.com/onsi/gomega/gexec/session.go | 3 + vendor/github.com/onsi/gomega/gomega_dsl.go | 10 +- .../asyncassertion/async_assertion.go | 2 + vendor/github.com/onsi/gomega/matchers.go | 16 + .../matchers/assignable_to_type_of_matcher.go | 2 + .../onsi/gomega/matchers/be_a_directory.go | 2 + .../onsi/gomega/matchers/be_a_regular_file.go | 2 + .../gomega/matchers/be_an_existing_file.go | 2 + .../onsi/gomega/matchers/be_closed_matcher.go | 2 + .../gomega/matchers/be_element_of_matcher.go | 57 + .../onsi/gomega/matchers/be_empty_matcher.go | 2 + .../matchers/be_equivalent_to_matcher.go | 2 + .../onsi/gomega/matchers/be_false_matcher.go | 2 + .../onsi/gomega/matchers/be_identical_to.go | 2 + .../onsi/gomega/matchers/be_nil_matcher.go | 2 + .../gomega/matchers/be_numerically_matcher.go | 2 + .../onsi/gomega/matchers/be_sent_matcher.go | 2 + .../gomega/matchers/be_temporally_matcher.go | 2 + .../onsi/gomega/matchers/be_true_matcher.go | 2 + .../onsi/gomega/matchers/consist_of.go | 2 + .../matchers/contain_element_matcher.go | 22 +- .../matchers/contain_substring_matcher.go | 2 + .../onsi/gomega/matchers/have_cap_matcher.go | 2 + .../onsi/gomega/matchers/have_key_matcher.go | 2 + .../matchers/have_key_with_value_matcher.go | 2 + .../gomega/matchers/have_occurred_matcher.go | 2 + .../onsi/gomega/matchers/receive_matcher.go | 2 + .../matchers/semi_structured_data_support.go | 2 + .../goraph/bipartitegraph/bipartitegraph.go | 3 +- .../onsi/gomega/matchers/type_support.go | 3 + .../k8s.io/apimachinery/pkg/util/rand/rand.go | 127 + vendor/modules.txt | 19 +- 147 files changed, 22833 insertions(+), 150 deletions(-) create mode 100644 pkg/predicate/predicate.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_backupentry.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_plant.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_quota.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_secretbinding.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupentry.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_plant.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_quota.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_secretbinding.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/register.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go create mode 100644 vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/pkg/logger/logger.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/encoding.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/errors/errors.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/pod_health.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/random.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/template_engine.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/timewindow.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/errwrap/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/go.sum create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/rand/rand.go diff --git a/api/v1alpha1/etcd_types.go b/api/v1alpha1/etcd_types.go index 522fcf563..5b1ac0e99 100644 --- a/api/v1alpha1/etcd_types.go +++ b/api/v1alpha1/etcd_types.go @@ -258,6 +258,9 @@ type LastOperation struct { // EtcdStatus defines the observed state of Etcd type EtcdStatus struct { + // ObservedGeneration is the most recent generation observed for this resource. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` // +optional Etcd CrossVersionObjectReference `json:"etcd,omitempty"` // +optional diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index a6127ab71..5f78076bc 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -289,6 +289,11 @@ func (in *EtcdSpec) DeepCopy() *EtcdSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdStatus) DeepCopyInto(out *EtcdStatus) { *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } out.Etcd = in.Etcd if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions diff --git a/charts/etcd/templates/etcd-statefulset.yaml b/charts/etcd/templates/etcd-statefulset.yaml index 302882a8b..e92d97937 100644 --- a/charts/etcd/templates/etcd-statefulset.yaml +++ b/charts/etcd/templates/etcd-statefulset.yaml @@ -29,9 +29,6 @@ spec: matchLabels: name: etcd instance: {{ .Values.name }} -{{- if .Values.labels }} -{{ toYaml .Values.labels | indent 6 }} -{{- end }} template: metadata: annotations: diff --git a/config/crd/bases/druid.gardener.cloud_etcds.yaml b/config/crd/bases/druid.gardener.cloud_etcds.yaml index a255386e8..00eaaec58 100644 --- a/config/crd/bases/druid.gardener.cloud_etcds.yaml +++ b/config/crd/bases/druid.gardener.cloud_etcds.yaml @@ -447,6 +447,11 @@ spec: type: object lastError: type: string + observedGeneration: + description: ObservedGeneration is the most recent generation observed for + this resource. + format: int64 + type: integer ready: type: boolean readyReplicas: diff --git a/controllers/etcd_controller.go b/controllers/etcd_controller.go index bbfe46198..42d797692 100644 --- a/controllers/etcd_controller.go +++ b/controllers/etcd_controller.go @@ -35,13 +35,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" "github.com/gardener/etcd-druid/pkg/chartrenderer" "github.com/gardener/etcd-druid/pkg/common" + druidpredicates "github.com/gardener/etcd-druid/pkg/predicate" "github.com/gardener/etcd-druid/pkg/utils" + "github.com/gardener/gardener/pkg/utils/kubernetes/health" kubernetes "github.com/gardener/etcd-druid/pkg/client/kubernetes" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + "github.com/gardener/gardener/pkg/utils/imagevector" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -160,10 +165,9 @@ func (r *EtcdReconciler) InitializeControllerWithImageVector() (*EtcdReconciler, // +kubebuilder:rbac:groups=druid.gardener.cloud,resources=etcds,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=druid.gardener.cloud,resources=etcds/status,verbs=get;update;patch -// Reconcile reconciles the . +// Reconcile reconciles the etcd. func (r *EtcdReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { - // your logic here etcd := &druidv1alpha1.Etcd{} if err := r.Get(context.TODO(), req.NamespacedName, etcd); err != nil { if errors.IsNotFound(err) { @@ -174,42 +178,34 @@ func (r *EtcdReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { // Error reading the object - requeue the request. return ctrl.Result{}, err } - etcdCopy := etcd.DeepCopy() - // Update the found object and write the result back if there are any changes - if !reflect.DeepEqual(etcd.Spec, etcdCopy.Spec) { - etcdCopy.Spec = etcd.Spec - } + logger.Infof("Reconciling etcd: %s/%s", etcd.GetNamespace(), etcd.GetName()) - if !etcdCopy.DeletionTimestamp.IsZero() { + if !etcd.DeletionTimestamp.IsZero() { logger.Infof("Deletion timestamp set for etcd: %s", etcd.GetName()) - if err := r.removeFinalizersToDependantSecrets(etcdCopy); err != nil { - if err := r.updateEtcdErrorStatus(etcd, etcdCopy, err); err != nil { + if err := r.removeFinalizersToDependantSecrets(etcd); err != nil { + if err := r.updateEtcdErrorStatus(etcd, err); err != nil { return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } if sets.NewString(etcd.Finalizers...).Has(FinalizerName) { logger.Infof("Removing finalizer (%s) from etcd %s", FinalizerName, etcd.GetName()) - finalizers := sets.NewString(etcdCopy.Finalizers...) + finalizers := sets.NewString(etcd.Finalizers...) finalizers.Delete(FinalizerName) - etcdCopy.Finalizers = finalizers.UnsortedList() - if err := r.Patch(context.TODO(), etcdCopy, client.MergeFrom(etcd)); err != nil { - if err := r.updateEtcdErrorStatus(etcd, etcdCopy, err); err != nil { + etcd.Finalizers = finalizers.UnsortedList() + if err := r.Update(context.TODO(), etcd); err != nil { + if err := r.updateEtcdErrorStatus(etcd, err); err != nil { return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } } @@ -221,55 +217,46 @@ func (r *EtcdReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if finalizers := sets.NewString(etcd.Finalizers...); !finalizers.Has(FinalizerName) { logger.Infof("Adding finalizer (%s) to etcd %s", FinalizerName, etcd.GetName()) finalizers.Insert(FinalizerName) - etcdCopy.Finalizers = finalizers.UnsortedList() - if err := r.Patch(context.TODO(), etcdCopy, client.MergeFrom(etcd)); err != nil { - if err := r.updateEtcdErrorStatus(etcd, etcdCopy, err); err != nil { + etcd.Finalizers = finalizers.UnsortedList() + if err := r.Update(context.TODO(), etcd); err != nil { + if err := r.updateEtcdErrorStatus(etcd, err); err != nil { return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } } - if err := r.addFinalizersToDependantSecrets(etcdCopy); err != nil { - if err := r.updateEtcdErrorStatus(etcd, etcdCopy, err); err != nil { + if err := r.addFinalizersToDependantSecrets(etcd); err != nil { + if err := r.updateEtcdErrorStatus(etcd, err); err != nil { return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } } - svc, ss, err := r.reconcileEtcd(etcdCopy) + svc, ss, err := r.reconcileEtcd(etcd) if err != nil { - if err := r.updateEtcdErrorStatus(etcd, etcdCopy, err); err != nil { + if err := r.updateEtcdErrorStatus(etcd, err); err != nil { return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } - if err := r.updateEtcdStatus(etcdCopy, etcd, svc, ss); err != nil { + if err := r.updateEtcdStatus(etcd, svc, ss); err != nil { return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, + Requeue: true, }, err } - logger.Infof("Successfully reconciled etcd: %s", etcd.GetName()) - return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Minute * 5, + Requeue: false, }, nil } @@ -572,6 +559,7 @@ func (r *EtcdReconciler) reconcileStatefulSet(cm *corev1.ConfigMap, svc *corev1. if err := r.Get(context.TODO(), types.NamespacedName{Name: filteredStatefulSets[0].Name, Namespace: filteredStatefulSets[0].Namespace}, ss); err != nil { return nil, err } + // Statefulset is claimed by for this etcd. Just sync the specs if ss, err = r.syncStatefulSetSpec(ss, cm, svc, etcd, values); err != nil { return nil, err @@ -643,10 +631,16 @@ func (r *EtcdReconciler) syncStatefulSetSpec(ss *appsv1.StatefulSet, cm *corev1. if reflect.DeepEqual(ss.Spec, decoded.Spec) { return ss, nil } + ssCopy := ss.DeepCopy() ssCopy.Spec.Replicas = decoded.Spec.Replicas ssCopy.Spec.UpdateStrategy = decoded.Spec.UpdateStrategy + recreateSTS := false + if !reflect.DeepEqual(ssCopy.Spec.Selector, decoded.Spec.Selector) { + recreateSTS = true + } + // Applying suggestions from containers := getContainerMapFromPodTemplateSpec(ssCopy.Spec.Template.Spec) for i, c := range decoded.Spec.Template.Spec.Containers { @@ -659,9 +653,14 @@ func (r *EtcdReconciler) syncStatefulSetSpec(ss *appsv1.StatefulSet, cm *corev1. ssCopy.Spec.Template = decoded.Spec.Template - err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { - return r.Patch(context.TODO(), ssCopy, client.MergeFrom(ss)) - }) + if recreateSTS { + logger.Infof("selector changed, recreating statefulset: %s", ssCopy.Name) + err = r.recreateStatefulset(decoded) + } else { + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + return r.Patch(context.TODO(), ssCopy, client.MergeFrom(ss)) + }) + } // Ignore the precondition violated error, this machine is already updated // with the desired label. @@ -670,12 +669,25 @@ func (r *EtcdReconciler) syncStatefulSetSpec(ss *appsv1.StatefulSet, cm *corev1. err = nil } if err != nil { - logger.Infof("patching statefulset failed for %s", ss.Name) return nil, err } return ssCopy, err } +func (r *EtcdReconciler) recreateStatefulset(ss *appsv1.StatefulSet) error { + skipDelete := false + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if !skipDelete { + if err := r.Delete(context.TODO(), ss); err != nil && !errors.IsNotFound(err) { + return err + } + } + skipDelete = true + return r.Create(context.TODO(), ss) + }) + return err +} + func (r *EtcdReconciler) getStatefulSetFromEtcd(etcd *druidv1alpha1.Etcd, cm *corev1.ConfigMap, svc *corev1.Service, values map[string]interface{}) (*appsv1.StatefulSet, error) { var err error decoded := &appsv1.StatefulSet{} @@ -852,8 +864,11 @@ func (r *EtcdReconciler) getMapFromEtcd(etcd *druidv1alpha1.Etcd) (map[string]in "storageContainer": etcd.Spec.Backup.Store.Container, "storePrefix": etcd.Spec.Backup.Store.Prefix, "storageProvider": utils.StorageProviderFromInfraProvider(etcd.Spec.Backup.Store.Provider), - "storeSecret": etcd.Spec.Backup.Store.SecretRef.Name, } + if etcd.Spec.Backup.Store.SecretRef != nil { + storeValues["storeSecret"] = etcd.Spec.Backup.Store.SecretRef.Name + } + values["store"] = storeValues } @@ -937,19 +952,19 @@ func (r *EtcdReconciler) removeFinalizersToDependantSecrets(etcd *druidv1alpha1. return nil } -func (r *EtcdReconciler) updateEtcdErrorStatus(etcdCopy, etcd *druidv1alpha1.Etcd, lastError error) error { - +func (r *EtcdReconciler) updateEtcdErrorStatus(etcd *druidv1alpha1.Etcd, lastError error) error { lastErrStr := fmt.Sprintf("%v", lastError) - etcdCopy.Status.LastError = &lastErrStr - if err := r.Status().Update(context.TODO(), etcdCopy); err != nil && !errors.IsNotFound(err) { + etcd.Status.LastError = &lastErrStr + etcd.Status.ObservedGeneration = &etcd.Generation + if err := r.Status().Update(context.TODO(), etcd); err != nil && !errors.IsNotFound(err) { return err } - return nil + return r.removeOperationAnnotation(etcd) } -func (r *EtcdReconciler) updateEtcdStatus(etcdCopy, etcd *druidv1alpha1.Etcd, svc *corev1.Service, ss *appsv1.StatefulSet) error { +func (r *EtcdReconciler) updateEtcdStatus(etcd *druidv1alpha1.Etcd, svc *corev1.Service, ss *appsv1.StatefulSet) error { svcName := svc.Name - etcdCopy.Status.Etcd = druidv1alpha1.CrossVersionObjectReference{ + etcd.Status.Etcd = druidv1alpha1.CrossVersionObjectReference{ APIVersion: ss.APIVersion, Kind: ss.Kind, Name: ss.Name, @@ -958,18 +973,28 @@ func (r *EtcdReconciler) updateEtcdStatus(etcdCopy, etcd *druidv1alpha1.Etcd, sv for _, condition := range ss.Status.Conditions { conditions = append(conditions, convertConditionsToEtcd(&condition)) } - etcdCopy.Status.Conditions = conditions + etcd.Status.Conditions = conditions // To be changed once we have multiple replicas. - etcdCopy.Status.CurrentReplicas = ss.Status.CurrentReplicas - etcdCopy.Status.ReadyReplicas = ss.Status.ReadyReplicas - etcdCopy.Status.UpdatedReplicas = ss.Status.UpdatedReplicas - etcdCopy.Status.Ready = (ss.Status.ReadyReplicas == ss.Status.Replicas) - etcdCopy.Status.ServiceName = &svcName - - if err := r.Status().Update(context.TODO(), etcdCopy); err != nil && !errors.IsNotFound(err) { + etcd.Status.CurrentReplicas = ss.Status.CurrentReplicas + etcd.Status.ReadyReplicas = ss.Status.ReadyReplicas + etcd.Status.UpdatedReplicas = ss.Status.UpdatedReplicas + etcd.Status.Ready = (health.CheckStatefulSet(ss) == nil) + etcd.Status.ServiceName = &svcName + etcd.Status.LastError = nil + etcd.Status.ObservedGeneration = &etcd.Generation + + if err := r.Status().Update(context.TODO(), etcd); err != nil && !errors.IsNotFound(err) { return err } + return r.removeOperationAnnotation(etcd) +} + +func (r *EtcdReconciler) removeOperationAnnotation(etcd *druidv1alpha1.Etcd) error { + if _, ok := etcd.Annotations[v1beta1constants.GardenerOperation]; ok { + delete(etcd.Annotations, v1beta1constants.GardenerOperation) + return r.Update(context.TODO(), etcd) + } return nil } @@ -983,15 +1008,6 @@ func convertConditionsToEtcd(condition *appsv1.StatefulSetCondition) druidv1alph } } -// SetupWithManager sets up manager with a new controller and r as the reconcile.Reconciler -func (r *EtcdReconciler) SetupWithManager(mgr ctrl.Manager, workers int) error { - return ctrl.NewControllerManagedBy(mgr).WithOptions(controller.Options{ - MaxConcurrentReconciles: workers, - }).For(&druidv1alpha1.Etcd{}). - Owns(&appsv1.StatefulSet{}). - Complete(r) -} - func (r *EtcdReconciler) claimStatefulSets(etcd *druidv1alpha1.Etcd, selector labels.Selector, ss *appsv1.StatefulSetList) ([]*appsv1.StatefulSet, error) { // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing Machines (see #42639). @@ -1045,3 +1061,24 @@ func (r *EtcdReconciler) claimConfigMaps(etcd *druidv1alpha1.Etcd, selector labe cm := NewEtcdDruidRefManager(r, etcd, selector, etcdGVK, canAdoptFunc) return cm.ClaimConfigMaps(ss) } + +// SetupWithManager sets up manager with a new controller and r as the reconcile.Reconciler +func (r *EtcdReconciler) SetupWithManager(mgr ctrl.Manager, workers int, ignoreOperationAnnotation bool) error { + predicates := []predicate.Predicate{ + druidpredicates.GenerationOrResourceVersionChangedPredicate{}, + druidpredicates.LastOperationNotSuccessful(), + } + builder := ctrl.NewControllerManagedBy(mgr).WithOptions(controller.Options{ + MaxConcurrentReconciles: workers, + }) + if !ignoreOperationAnnotation { + predicates = append(predicates, druidpredicates.HasOperationAnnotation()) + } + builder = builder.WithEventFilter(druidpredicates.Or(predicates...)) + return builder. + For(&druidv1alpha1.Etcd{}). + Owns(&appsv1.StatefulSet{}). + Owns(&v1.Service{}). + Owns(&v1.ConfigMap{}). + Complete(r) +} diff --git a/controllers/etcd_controller_test.go b/controllers/etcd_controller_test.go index fee7ac02e..920893b64 100644 --- a/controllers/etcd_controller_test.go +++ b/controllers/etcd_controller_test.go @@ -205,14 +205,14 @@ func getEtcd(name, namespace string) *druidv1alpha1.Etcd { "role": "test", }, Labels: map[string]string{ - "app": "etcd-statefulset", - "role": "test", - "name": name, + "app": "etcd-statefulset", + "role": "test", + "instance": name, }, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "app": "etcd-statefulset", - "name": name, + "app": "etcd-statefulset", + "instance": name, }, }, Replicas: 1, diff --git a/go.mod b/go.mod index 61054b4fe..c77861905 100644 --- a/go.mod +++ b/go.mod @@ -11,8 +11,8 @@ require ( github.com/imdario/mergo v0.3.8 // indirect github.com/json-iterator/go v1.1.9 // indirect github.com/mitchellh/reflectwalk v1.0.1 // indirect - github.com/onsi/ginkgo v1.8.0 - github.com/onsi/gomega v1.5.0 + github.com/onsi/ginkgo v1.10.1 + github.com/onsi/gomega v1.7.0 github.com/prometheus/client_golang v1.3.0 // indirect github.com/sirupsen/logrus v1.4.2 go.uber.org/atomic v1.5.1 // indirect diff --git a/go.sum b/go.sum index c2790ac04..ec665281d 100644 --- a/go.sum +++ b/go.sum @@ -300,11 +300,15 @@ github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -634,6 +638,7 @@ k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783 h1:V6ndwCPoao1 k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 h1:CS1tBQz3HOXiseWZu6ZicKX361CZLT97UFnnPx0aqBw= k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= +k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad h1:IMoNR9pilTBaCS5WpwWnAdmoVYVeXowOD3bLrwxIAtQ= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= k8s.io/autoscaler v0.0.0-20190805135949-100e91ba756e h1:5AX59ZgftHpbmNupSWosdtW4q/rCnF4s/0J0dEfJkAQ= k8s.io/autoscaler v0.0.0-20190805135949-100e91ba756e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= diff --git a/main.go b/main.go index 65c4c365b..a9035b99d 100644 --- a/main.go +++ b/main.go @@ -43,13 +43,19 @@ func init() { } func main() { - var metricsAddr string - var enableLeaderElection bool - var workers int + var ( + metricsAddr string + enableLeaderElection bool + workers int + ignoreOperationAnnotation bool + ) + flag.IntVar(&workers, "workers", 3, "Number of worker threads.") flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&ignoreOperationAnnotation, "ignore-operation-annotation", true, "Ignore the operation annotation or not.") + flag.Parse() ctrl.SetLogger(zap.Logger(true)) @@ -69,7 +75,7 @@ func main() { setupLog.Error(err, "unable to initialize controller with image vector") os.Exit(1) } - err = ec.SetupWithManager(mgr, workers) + err = ec.SetupWithManager(mgr, workers, ignoreOperationAnnotation) if err != nil { setupLog.Error(err, "unable to create controller", "controller", "Etcd") os.Exit(1) diff --git a/pkg/predicate/predicate.go b/pkg/predicate/predicate.go new file mode 100644 index 000000000..cd5bd9d31 --- /dev/null +++ b/pkg/predicate/predicate.go @@ -0,0 +1,188 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package predicate + +import ( + druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/runtime" + + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" +) + +// Log is the logger for predicates. +var ( + logger = logrus.New() +) + +type or struct { + predicates []predicate.Predicate +} + +func (o *or) orRange(f func(predicate.Predicate) bool) bool { + for _, p := range o.predicates { + if f(p) { + return true + } + } + return false +} + +// Create implements Predicate. +func (o *or) Create(event event.CreateEvent) bool { + return o.orRange(func(p predicate.Predicate) bool { return p.Create(event) }) +} + +// Delete implements Predicate. +func (o *or) Delete(event event.DeleteEvent) bool { + return o.orRange(func(p predicate.Predicate) bool { return p.Delete(event) }) +} + +// Update implements Predicate. +func (o *or) Update(event event.UpdateEvent) bool { + return o.orRange(func(p predicate.Predicate) bool { return p.Update(event) }) +} + +// Generic implements Predicate. +func (o *or) Generic(event event.GenericEvent) bool { + return o.orRange(func(p predicate.Predicate) bool { return p.Generic(event) }) +} + +// InjectFunc implements Injector. +func (o *or) InjectFunc(f inject.Func) error { + for _, p := range o.predicates { + if err := f(p); err != nil { + return err + } + } + return nil +} + +// Or builds a logical OR gate of passed predicates. +func Or(predicates ...predicate.Predicate) predicate.Predicate { + return &or{predicates} +} + +// HasOperationAnnotation is a predicate for the operation annotation. +func HasOperationAnnotation() predicate.Predicate { + f := func(obj runtime.Object) bool { + etcd, ok := obj.(*druidv1alpha1.Etcd) + if !ok { + return false + } + return etcd.Annotations[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationReconcile + } + + return predicate.Funcs{ + CreateFunc: func(event event.CreateEvent) bool { + return f(event.Object) + }, + UpdateFunc: func(event event.UpdateEvent) bool { + return f(event.ObjectNew) + }, + GenericFunc: func(event event.GenericEvent) bool { + return f(event.Object) + }, + DeleteFunc: func(event event.DeleteEvent) bool { + return true + }, + } +} + +// LastOperationNotSuccessful is a predicate for unsuccessful last operations for creation events. +func LastOperationNotSuccessful() predicate.Predicate { + operationNotSucceeded := func(obj runtime.Object) bool { + etcd, ok := obj.(*druidv1alpha1.Etcd) + if !ok { + return false + } + if etcd.Status.LastError != nil { + return true + } + return false + } + + return predicate.Funcs{ + CreateFunc: func(event event.CreateEvent) bool { + return operationNotSucceeded(event.Object) + }, + UpdateFunc: func(event event.UpdateEvent) bool { + return operationNotSucceeded(event.ObjectNew) + }, + GenericFunc: func(event event.GenericEvent) bool { + return operationNotSucceeded(event.Object) + }, + DeleteFunc: func(event event.DeleteEvent) bool { + return operationNotSucceeded(event.Object) + }, + } +} + +// GenerationOrResourceVersionChangedPredicate implements a update predicate function on Generation or ResourceVersion change. +type GenerationOrResourceVersionChangedPredicate struct { + predicate.Funcs +} + +// Update implements default UpdateEvent filter for validating generation change +func (GenerationOrResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { + if e.MetaOld == nil { + logger.Error("update event has no old metadata", "event", e) + return false + } + if e.ObjectOld == nil { + logger.Error("update event has no old runtime object to update", "event", e) + return false + } + if e.ObjectNew == nil { + logger.Error("update event has no new runtime object for update", "event", e) + return false + } + if e.MetaNew == nil { + logger.Error("update event has no new metadata", "event", e) + return false + } + if (e.MetaNew.GetGeneration() == e.MetaOld.GetGeneration()) && + (e.MetaNew.GetResourceVersion() == e.MetaOld.GetResourceVersion()) { + return false + } + + etcd, ok := e.ObjectNew.(*druidv1alpha1.Etcd) + if !ok { + // We are creating the other resources so we needn't reconcile etcd here. + return true + } + if etcd.Status.ObservedGeneration != nil && *etcd.Status.ObservedGeneration == etcd.Generation { + return false + } + + return true +} + +// Create implements default CreateEvent filter for validating generation change +func (GenerationOrResourceVersionChangedPredicate) Create(e event.CreateEvent) bool { + etcd, ok := e.Object.(*druidv1alpha1.Etcd) + if !ok { + // We are creating the other resources so we needn't reconcile etcd here. + return false + } + if etcd.Status.ObservedGeneration != nil && *etcd.Status.ObservedGeneration == etcd.Generation { + return false + } + return true +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/core/doc.go new file mode 100644 index 000000000..effda8b53 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package + +// Package core is the internal version of the API. +// +groupName=core.gardener.cloud +package core diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go b/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go new file mode 100644 index 000000000..be5a19b5f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/field_constants.go @@ -0,0 +1,27 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +// Field path constants that are specific to the internal API +// representation. +const ( + // ShootSeedName is the field selector path for finding + // the Seed cluster of a core.gardener.cloud/{v1alpha1,v1beta1} Shoot. + ShootSeedName = "spec.seedName" + + // ShootCloudProfileName is the field selector path for finding + // the CloudProfile name of a core.gardener.cloud/{v1alpha1,v1beta1} Shoot. + ShootCloudProfileName = "spec.cloudProfileName" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/register.go b/vendor/github.com/gardener/gardener/pkg/apis/core/register.go new file mode 100644 index 000000000..63db01907 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/register.go @@ -0,0 +1,74 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of the core API group. +const GroupName = "core.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is a new Scheme Builder which registers our API. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a reference to the Scheme Builder's AddToScheme function. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &BackupBucket{}, + &BackupBucketList{}, + &BackupEntry{}, + &BackupEntryList{}, + &CloudProfile{}, + &CloudProfileList{}, + &ControllerRegistration{}, + &ControllerRegistrationList{}, + &ControllerInstallation{}, + &ControllerInstallationList{}, + &Plant{}, + &PlantList{}, + &Project{}, + &ProjectList{}, + &Quota{}, + &QuotaList{}, + &SecretBinding{}, + &SecretBindingList{}, + &Seed{}, + &SeedList{}, + &ShootState{}, + &ShootStateList{}, + &Shoot{}, + &ShootList{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go new file mode 100644 index 000000000..f4da73642 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupbucket.go @@ -0,0 +1,82 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupBucket holds details about backup bucket +type BackupBucket struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Specification of the Backup Bucket. + Spec BackupBucketSpec + // Most recently observed status of the Backup Bucket. + Status BackupBucketStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupBucketList is a list of BackupBucket objects. +type BackupBucketList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of BackupBucket. + Items []BackupBucket +} + +// BackupBucketSpec is the specification of a Backup Bucket. +type BackupBucketSpec struct { + // Provider holds the details of cloud provider of the object store. + Provider BackupBucketProvider + // ProviderConfig is the configuration passed to BackupBucket resource. + ProviderConfig *ProviderConfig + // SecretRef is a reference to a secret that contains the credentials to access object store. + SecretRef corev1.SecretReference + // SeedName holds the name of the seed allocated to BackupBucket for running controller. + SeedName *string +} + +// BackupBucketStatus holds the most recently observed status of the Backup Bucket. +type BackupBucketStatus struct { + // ProviderStatus is the configuration passed to BackupBucket resource. + ProviderStatus *ProviderConfig + // LastOperation holds information about the last operation on the BackupBucket. + LastOperation *LastOperation + // LastError holds information about the last occurred error during an operation. + LastError *LastError + // ObservedGeneration is the most recent generation observed for this BackupBucket. It corresponds to the + // BackupBucket's generation, which is updated on mutation by the API Server. + ObservedGeneration int64 + // GeneratedSecretRef is reference to the secret generated by backup bucket, which + // will have object store specific credentials. + GeneratedSecretRef *corev1.SecretReference +} + +// BackupBucketProvider holds the details of cloud provider of the object store. +type BackupBucketProvider struct { + // Type is the type of provider. + Type string + // Region is the region of the bucket. + Region string +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupentry.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupentry.go new file mode 100644 index 000000000..287334741 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_backupentry.go @@ -0,0 +1,68 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // BackupEntryForceDeletion is a constant for an annotation on a BackupEntry indicating that it should be force deleted. + BackupEntryForceDeletion = "backupentry.core.gardener.cloud/force-deletion" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupEntry holds details about shoot backup. +type BackupEntry struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec contains the specification of the Backup Entry. + Spec BackupEntrySpec + // Status contains the most recently observed status of the Backup Entry. + Status BackupEntryStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupEntryList is a list of BackupEntry objects. +type BackupEntryList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of BackupEntry. + Items []BackupEntry +} + +// BackupEntrySpec is the specification of a Backup Entry. +type BackupEntrySpec struct { + // BucketName is the name of backup bucket for this Backup Entry. + BucketName string + // SeedName holds the name of the seed allocated to BackupBucket for running controller. + SeedName *string +} + +// BackupEntryStatus holds the most recently observed status of the Backup Entry. +type BackupEntryStatus struct { + // LastOperation holds information about the last operation on the BackupEntry. + LastOperation *LastOperation + // LastError holds information about the last occurred error during an operation. + LastError *LastError + // ObservedGeneration is the most recent generation observed for this BackupEntry. It corresponds to the + // BackupEntry's generation, which is updated on mutation by the API Server. + ObservedGeneration int64 +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go new file mode 100644 index 000000000..b2e4177c2 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go @@ -0,0 +1,152 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CloudProfile represents certain properties about a provider environment. +type CloudProfile struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec defines the provider environment properties. + Spec CloudProfileSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CloudProfileList is a collection of CloudProfiles. +type CloudProfileList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of CloudProfiles. + Items []CloudProfile +} + +// CloudProfileSpec is the specification of a CloudProfile. +// It must contain exactly one of its defined keys. +type CloudProfileSpec struct { + // CABundle is a certificate bundle which will be installed onto every host machine of shoot cluster targeting this profile. + CABundle *string + // Kubernetes contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. + Kubernetes KubernetesSettings + // MachineImages contains constraints regarding allowed values for machine images in the Shoot specification. + MachineImages []MachineImage + // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification. + MachineTypes []MachineType + // ProviderConfig contains provider-specific configuration for the profile. + ProviderConfig *ProviderConfig + // Regions contains constraints regarding allowed values for regions and zones. + Regions []Region + // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile. + // An empty list means that all seeds of the same provider type are supported. + // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes. + SeedSelector *metav1.LabelSelector + // Type is the name of the provider. + Type string + // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification. + VolumeTypes []VolumeType +} + +// KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. +type KubernetesSettings struct { + // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters. + Versions []ExpirableVersion +} + +// MachineImage defines the name and multiple versions of the machine image in any environment. +type MachineImage struct { + // Name is the name of the image. + Name string + // Versions contains versions and expiration dates of the machine image + Versions []ExpirableVersion +} + +// ExpirableVersion contains a version and an expiration date. +type ExpirableVersion struct { + // Version is the version identifier. + Version string + // ExpirationDate defines the time at which this version expires. + ExpirationDate *metav1.Time +} + +// MachineType contains certain properties of a machine type. +type MachineType struct { + // CPU is the number of CPUs for this machine type. + CPU resource.Quantity + // GPU is the number of GPUs for this machine type. + GPU resource.Quantity + // Memory is the amount of memory for this machine type. + Memory resource.Quantity + // Name is the name of the machine type. + Name string + // Storage is the amount of storage associated with the root volume of this machine type. + Storage *MachineTypeStorage + // Usable defines if the machine type can be used for shoot clusters. + Usable *bool +} + +// MachineTypeStorage is the amount of storage associated with the root volume of this machine type. +type MachineTypeStorage struct { + // Class is the class of the storage type. + Class string + // Size is the storage size. + Size resource.Quantity + // Type is the type of the storage. + Type string +} + +// Region contains certain properties of a region. +type Region struct { + // Name is a region name. + Name string + // Zones is a list of availability zones in this region. + Zones []AvailabilityZone +} + +// AvailabilityZone is an availability zone. +type AvailabilityZone struct { + // Name is an an availability zone name. + Name string + // UnavailableMachineTypes is a list of machine type names that are not availability in this zone. + UnavailableMachineTypes []string + // UnavailableVolumeTypes is a list of volume type names that are not availability in this zone. + UnavailableVolumeTypes []string +} + +// VolumeType contains certain properties of a volume type. +type VolumeType struct { + // Class is the class of the volume type. + Class string + // Name is the name of the volume type. + Name string + // Usable defines if the volume type can be used for shoot clusters. + Usable *bool +} + +const ( + // VolumeClassStandard is a constant for the standard volume class. + VolumeClassStandard string = "standard" + // VolumeClassPremium is a constant for the premium volume class. + VolumeClassPremium string = "premium" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go new file mode 100644 index 000000000..39212c3ac --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_common.go @@ -0,0 +1,105 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ErrorCode is a string alias. +type ErrorCode string + +const ( + // ErrorInfraUnauthorized indicates that the last error occurred due to invalid cloud provider credentials. + ErrorInfraUnauthorized ErrorCode = "ERR_INFRA_UNAUTHORIZED" + // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient cloud provider privileges. + ErrorInfraInsufficientPrivileges ErrorCode = "ERR_INFRA_INSUFFICIENT_PRIVILEGES" + // ErrorInfraQuotaExceeded indicates that the last error occurred due to cloud provider quota limits. + ErrorInfraQuotaExceeded ErrorCode = "ERR_INFRA_QUOTA_EXCEEDED" + // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the cloud provider level. + ErrorInfraDependencies ErrorCode = "ERR_INFRA_DEPENDENCIES" +) + +// LastError indicates the last occurred error for an operation on a resource. +type LastError struct { + // A human readable message indicating details about the last error. + Description string + // ID of the task which caused this last error + TaskID *string + // Well-defined error codes of the last error(s). + // +optional + Codes []ErrorCode + // Last time the error was reported + LastUpdateTime *metav1.Time +} + +// LastOperationType is a string alias. +type LastOperationType string + +const ( + // LastOperationTypeReconcile indicates a 'reconcile' operation. + LastOperationTypeReconcile LastOperationType = "Reconcile" + // LastOperationTypeDelete indicates a 'delete' operation. + LastOperationTypeDelete LastOperationType = "Delete" +) + +// LastOperationState is a string alias. +type LastOperationState string + +const ( + // LastOperationStateProcessing indicates that an operation is ongoing. + LastOperationStateProcessing LastOperationState = "Processing" + // LastOperationStateSucceeded indicates that an operation has completed successfully. + LastOperationStateSucceeded LastOperationState = "Succeeded" + // LastOperationStateError indicates that an operation is completed with errors and will be retried. + LastOperationStateError LastOperationState = "Error" + // LastOperationStateFailed indicates that an operation is completed with errors and won't be retried. + LastOperationStateFailed LastOperationState = "Failed" + // LastOperationStatePending indicates that an operation cannot be done now, but will be tried in future. + LastOperationStatePending LastOperationState = "Pending" + // LastOperationStateAborted indicates that an operation has been aborted. + LastOperationStateAborted LastOperationState = "Aborted" +) + +// LastOperation indicates the type and the state of the last operation, along with a description +// message and a progress indicator. +type LastOperation struct { + // A human readable message indicating details about the last operation. + Description string + // Last time the operation state transitioned from one to another. + LastUpdateTime metav1.Time + // The progress in percentage (0-100) of the last operation. + Progress int + // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed. + State LastOperationState + // Type of the last operation, one of Create, Reconcile, Delete. + Type LastOperationType +} + +// Gardener holds the information about the Gardener. +type Gardener struct { + // ID is the Docker container id of the Gardener which last acted on a Shoot cluster. + ID string + // Name is the hostname (pod name) of the Gardener which last acted on a Shoot cluster. + Name string + // Version is the version of the Gardener which last acted on a Shoot cluster. + Version string +} + +const ( + // GardenerName is the value in a Garden resource's `.metadata.finalizers[]` array on which the Gardener will react + // when performing a delete request on a resource. + GardenerName = "gardener" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go new file mode 100644 index 000000000..bfb233580 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerinstallation.go @@ -0,0 +1,72 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerInstallation represents an installation request for an external controller. +type ControllerInstallation struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec contains the specification of this installation. + Spec ControllerInstallationSpec + // Status contains the status of this installation. + Status ControllerInstallationStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerInstallationList is a collection of ControllerInstallations. +type ControllerInstallationList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of ControllerInstallations. + Items []ControllerInstallation +} + +// ControllerInstallationSpec is the specification of a ControllerInstallation. +type ControllerInstallationSpec struct { + // RegistrationRef is used to reference a ControllerRegistration resources. + RegistrationRef corev1.ObjectReference + // SeedRef is used to reference a Seed resources. + SeedRef corev1.ObjectReference +} + +// ControllerInstallationStatus is the status of a ControllerInstallation. +type ControllerInstallationStatus struct { + // Conditions represents the latest available observations of a ControllerInstallations's current state. + Conditions []Condition + // ProviderStatus contains type-specific status. + // +optional + ProviderStatus *ProviderConfig +} + +const ( + // ControllerInstallationHealthy is a condition type for indicating whether the controller is healthy. + ControllerInstallationHealthy ConditionType = "Healthy" + // ControllerInstallationInstalled is a condition type for indicating whether the controller has been installed. + ControllerInstallationInstalled ConditionType = "Installed" + // ControllerInstallationValid is a condition type for indicating whether the installation request is valid. + ControllerInstallationValid ConditionType = "Valid" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go new file mode 100644 index 000000000..403d96f18 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go @@ -0,0 +1,73 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRegistration represents a registration of an external controller. +type ControllerRegistration struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec contains the specification of this registration. + Spec ControllerRegistrationSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRegistrationList is a collection of ControllerRegistrations. +type ControllerRegistrationList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of ControllerRegistrations. + Items []ControllerRegistration +} + +// ControllerRegistrationSpec is the specification of a ControllerRegistration. +type ControllerRegistrationSpec struct { + // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types + // (aws-route53, gcp, auditlog, ...). + Resources []ControllerResource + // Deployment contains information for how this controller is deployed. + Deployment *ControllerDeployment +} + +// ControllerResource is a combination of a kind (DNSProvider, Infrastructure, Generic, ...) and the actual type for this +// kind (aws-route53, gcp, auditlog, ...). +type ControllerResource struct { + // Kind is the resource kind. + Kind string + // Type is the resource type. + Type string + // GloballyEnabled determines if this resource is required by all Shoot clusters. + GloballyEnabled *bool + // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation. + ReconcileTimeout *metav1.Duration +} + +// ControllerDeployment contains information for how this controller is deployed. +type ControllerDeployment struct { + // Type is the deployment type. + Type string + // ProviderConfig contains type-specific configuration. + ProviderConfig *ProviderConfig +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_plant.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_plant.go new file mode 100644 index 000000000..9919bb7a6 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_plant.go @@ -0,0 +1,104 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Plant represents an external kubernetes cluster. +type Plant struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec contains the specification of this Plant. + Spec PlantSpec + // Status contains the status of this Plant. + Status PlantStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PlantList is a collection of Plants. +type PlantList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of Plants. + Items []Plant +} + +const ( + // PlantEveryNodeReady is a constant for a condition type indicating the node health. + PlantEveryNodeReady ConditionType = "EveryNodeReady" + // PlantAPIServerAvailable is a constant for a condition type indicating that the Plant cluster API server is available. + PlantAPIServerAvailable ConditionType = "APIServerAvailable" +) + +// PlantSpec is the specification of a Plant. +type PlantSpec struct { + // SecretRef is a reference to a Secret object containing the Kubeconfig of the external kubernetes + // clusters to be added to Gardener. + SecretRef corev1.LocalObjectReference + // Endpoints is the configuration plant endpoints + Endpoints []Endpoint +} + +// Endpoint is an endpoint for monitoring, logging and other services around the plant. +type Endpoint struct { + // Name is the name of the endpoint + Name string + // URL is the url of the endpoint + URL string + // Purpose is the purpose of the endpoint + Purpose string +} + +// PlantStatus is the status of a Plant. +type PlantStatus struct { + // Conditions represents the latest available observations of a Plant's current state. + Conditions []Condition + // ObservedGeneration is the most recent generation observed for this Plant. It corresponds to the + // Plant's generation, which is updated on mutation by the API Server. + ObservedGeneration *int64 + // ClusterInfo is additional computed information about the newly added cluster (Plant) + ClusterInfo *ClusterInfo +} + +// ClusterInfo contains information about the Plant cluster +type ClusterInfo struct { + // Cloud describes the cloud information + Cloud CloudInfo + // Kubernetes describes kubernetes meta information (e.g., version) + Kubernetes KubernetesInfo +} + +// CloudInfo contains information about the cloud +type CloudInfo struct { + // Type is the cloud type + Type string + // Region is the cloud region + Region string +} + +// KubernetesInfo contains the version and configuration variables for the Plant cluster. +type KubernetesInfo struct { + // Version is the semantic Kubernetes version to use for the Plant cluster. + Version string +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go new file mode 100644 index 000000000..c5be18dcc --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go @@ -0,0 +1,118 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Project holds certain properties about a Gardener project. +type Project struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec defines the project properties. + Spec ProjectSpec + // Most recently observed status of the Project. + Status ProjectStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProjectList is a collection of Projects. +type ProjectList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of Projects. + Items []Project +} + +// ProjectSpec is the specification of a Project. +type ProjectSpec struct { + // CreatedBy is a subject representing a user name, an email address, or any other identifier of a user + // who created the project. + CreatedBy *rbacv1.Subject + // Description is a human-readable description of what the project is used for. + Description *string + // Owner is a subject representing a user name, an email address, or any other identifier of a user owning + // the project. + Owner *rbacv1.Subject + // Purpose is a human-readable explanation of the project's purpose. + Purpose *string + // Members is a list of subjects representing a user name, an email address, or any other identifier of a user, + // group, or service account that has a certain role. + Members []ProjectMember + // Namespace is the name of the namespace that has been created for the Project object. + // A nil value means that Gardener will determine the name of the namespace. + Namespace *string +} + +// ProjectStatus holds the most recently observed status of the project. +type ProjectStatus struct { + // ObservedGeneration is the most recent generation observed for this project. + ObservedGeneration int64 + // Phase is the current phase of the project. + Phase ProjectPhase +} + +// ProjectMember is a member of a project. +type ProjectMember struct { + // Subject is representing a user name, an email address, or any other identifier of a user, group, or service + // account that has a certain role. + rbacv1.Subject + // Roles is a list of roles of this member. + Roles []string +} + +const ( + // ProjectMemberAdmin is a const for a role that provides full admin access. + ProjectMemberAdmin = "admin" + // ProjectMemberOwner is a const for a role that provides full owner access. + ProjectMemberOwner = "owner" + // ProjectMemberViewer is a const for a role that provides limited permissions to only view some resources. + ProjectMemberViewer = "viewer" + + // ProjectMemberExtensionPrefix is a prefix for custom roles that are not known by Gardener. + ProjectMemberExtensionPrefix = "extension:" +) + +// ProjectPhase is a label for the condition of a project at the current time. +type ProjectPhase string + +const ( + // ProjectPending indicates that the project reconciliation is pending. + ProjectPending ProjectPhase = "Pending" + // ProjectReady indicates that the project reconciliation was successful. + ProjectReady ProjectPhase = "Ready" + // ProjectFailed indicates that the project reconciliation failed. + ProjectFailed ProjectPhase = "Failed" + // ProjectTerminating indicates that the project is in termination process. + ProjectTerminating ProjectPhase = "Terminating" + + // ProjectEventNamespaceReconcileFailed indicates that the namespace reconciliation has failed. + ProjectEventNamespaceReconcileFailed = "NamespaceReconcileFailed" + // ProjectEventNamespaceReconcileSuccessful indicates that the namespace reconciliation has succeeded. + ProjectEventNamespaceReconcileSuccessful = "NamespaceReconcileSuccessful" + // ProjectEventNamespaceDeletionFailed indicates that the namespace deletion failed. + ProjectEventNamespaceDeletionFailed = "NamespaceDeletionFailed" + // ProjectEventNamespaceMarkedForDeletion indicates that the namespace has been successfully marked for deletion. + ProjectEventNamespaceMarkedForDeletion = "NamespaceMarkedForDeletion" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_quota.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_quota.go new file mode 100644 index 000000000..0067b1341 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_quota.go @@ -0,0 +1,67 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Quota struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec defines the Quota constraints. + Spec QuotaSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// QuotaList is a collection of Quotas. +type QuotaList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of Quotas. + Items []Quota +} + +// QuotaSpec is the specification of a Quota. +type QuotaSpec struct { + // ClusterLifetimeDays is the lifetime of a Shoot cluster in days before it will be terminated automatically. + ClusterLifetimeDays *int + // Metrics is a list of resources which will be put under constraints. + Metrics corev1.ResourceList + // Scope is the scope of the Quota object, either 'project' or 'secret'. + Scope corev1.ObjectReference +} + +const ( + // QuotaMetricCPU is the constraint for the amount of CPUs + QuotaMetricCPU corev1.ResourceName = corev1.ResourceCPU + // QuotaMetricGPU is the constraint for the amount of GPUs (e.g. from Nvidia) + QuotaMetricGPU corev1.ResourceName = "gpu" + // QuotaMetricMemory is the constraint for the amount of memory + QuotaMetricMemory corev1.ResourceName = corev1.ResourceMemory + // QuotaMetricStorageStandard is the constraint for the size of a standard disk + QuotaMetricStorageStandard corev1.ResourceName = corev1.ResourceStorage + ".standard" + // QuotaMetricStoragePremium is the constraint for the size of a premium disk (e.g. SSD) + QuotaMetricStoragePremium corev1.ResourceName = corev1.ResourceStorage + ".premium" + // QuotaMetricLoadbalancer is the constraint for the amount of loadbalancers + QuotaMetricLoadbalancer corev1.ResourceName = "loadbalancer" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_secretbinding.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_secretbinding.go new file mode 100644 index 000000000..9732d385a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_secretbinding.go @@ -0,0 +1,44 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SecretBinding struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // SecretRef is a reference to a secret object in the same or another namespace. + SecretRef corev1.SecretReference + // Quotas is a list of references to Quota objects in the same or another namespace. + Quotas []corev1.ObjectReference +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecretBindingList is a collection of SecretBindings. +type SecretBindingList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of SecretBindings. + Items []SecretBinding +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go new file mode 100644 index 000000000..55130eb20 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go @@ -0,0 +1,185 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Seed represents an installation request for an external controller. +type Seed struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Spec contains the specification of this installation. + Spec SeedSpec + // Status contains the status of this installation. + Status SeedStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SeedList is a collection of Seeds. +type SeedList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of Seeds. + Items []Seed +} + +// SeedSpec is the specification of a Seed. +type SeedSpec struct { + // Backup holds the object store configuration for the backups of shoot (currently only etcd). + // If it is not specified, then there won't be any backups taken for shoots associated with this seed. + // If backup field is present in seed, then backups of the etcd from shoot control plane will be stored + // under the configured object store. + Backup *SeedBackup + // DNS contains DNS-relevant information about this seed cluster. + DNS SeedDNS + // Networks defines the pod, service and worker network of the Seed cluster. + Networks SeedNetworks + // Provider defines the provider type and region for this Seed cluster. + Provider SeedProvider + // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for + // the account the Seed cluster has been deployed to. + SecretRef *corev1.SecretReference + // Taints describes taints on the seed. + Taints []SeedTaint + // Volume contains settings for persistentvolumes created in the seed cluster. + Volume *SeedVolume +} + +// SeedStatus is the status of a Seed. +type SeedStatus struct { + // Gardener holds information about the Gardener which last acted on the Shoot. + Gardener *Gardener + // KubernetesVersion is the Kubernetes version of the seed cluster. + KubernetesVersion *string + // Conditions represents the latest available observations of a Seed's current state. + Conditions []Condition + // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the + // Seed's generation, which is updated on mutation by the API Server. + ObservedGeneration int64 +} + +// SeedBackup contains the object store configuration for backups for shoot (currently only etcd). +type SeedBackup struct { + // Provider is a provider name. + Provider string + // ProviderConfig is the configuration passed to BackupBucket resource. + ProviderConfig *ProviderConfig + // Region is a region name. + Region *string + // SecretRef is a reference to a Secret object containing the cloud provider credentials for + // the object store where backups should be stored. It should have enough privileges to manipulate + // the objects as well as buckets. + SecretRef corev1.SecretReference +} + +// SeedDNS contains DNS-relevant information about this seed cluster. +type SeedDNS struct { + // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used + // to construct ingress URLs for system applications running in Shoot clusters. + IngressDomain string +} + +// SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster. +type SeedNetworks struct { + // Nodes is the CIDR of the node network. + Nodes *string + // Pods is the CIDR of the pod network. + Pods string + // Services is the CIDR of the service network. + Services string + // ShootDefaults contains the default networks CIDRs for shoots. + ShootDefaults *ShootNetworks + // BlockCIDRs is a list of network addresses that should be blocked for shoot control plane components running + // in the seed cluster. + BlockCIDRs []string +} + +// ShootNetworks contains the default networks CIDRs for shoots. +type ShootNetworks struct { + // Pods is the CIDR of the pod network. + Pods *string + // Services is the CIDR of the service network. + Services *string +} + +// SeedProvider defines the provider type and region for this Seed cluster. +type SeedProvider struct { + // Type is the name of the provider. + Type string + // Region is a name of a region. + Region string +} + +// SeedTaint describes a taint on a seed. +type SeedTaint struct { + // Key is the taint key to be applied to a seed. + Key string + // Value is the taint value corresponding to the taint key. + Value *string +} + +const ( + // SeedTaintDisableCapacityReservation is a constant for a taint key on a seed that marks it for disabling + // excess capacity reservation. This can be useful for seed clusters which only host shooted seeds to reduce + // costs. + SeedTaintDisableCapacityReservation = "seed.gardener.cloud/disable-capacity-reservation" + // SeedTaintDisableDNS is a constant for a taint key on a seed that marks it for disabling DNS. All shoots + // using this seed won't get any DNS providers, DNS records, and no DNS extension controller is required to + // be installed here. This is useful for environment where DNS is not required. + SeedTaintDisableDNS = "seed.gardener.cloud/disable-dns" + // SeedTaintProtected is a constant for a taint key on a seed that marks it as protected. Protected seeds + // may only be used by shoots in the `garden` namespace. + SeedTaintProtected = "seed.gardener.cloud/protected" + // SeedTaintInvisible is a constant for a taint key on a seed that marks it as invisible. Invisible seeds + // are not considered by the gardener-scheduler. + SeedTaintInvisible = "seed.gardener.cloud/invisible" +) + +// SeedVolume contains settings for persistentvolumes created in the seed cluster. +type SeedVolume struct { + // MinimumSize defines the minimum size that should be used for PVCs in the seed. + MinimumSize *resource.Quantity + // Providers is a list of storage class provisioner types for the seed. + Providers []SeedVolumeProvider +} + +// SeedVolumeProvider is a storage class provisioner type. +type SeedVolumeProvider struct { + // Purpose is the purpose of this provider. + Purpose string + // Name is the name of the storage class provisioner type. + Name string +} + +const ( + // SeedBootstrapped is a constant for a condition type indicating that the seed cluster has been + // bootstrapped. + SeedBootstrapped ConditionType = "Bootstrapped" + // SeedExtensionsReady is a constant for a condition type indicating that the extensions are ready. + SeedExtensionsReady ConditionType = "ExtensionsReady" + // SeedGardenletReady is a constant for a condition type indicating that the Gardenlet is ready. + SeedGardenletReady ConditionType = "GardenletReady" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go new file mode 100644 index 000000000..2810fc186 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go @@ -0,0 +1,758 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Shoot struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Specification of the Shoot cluster. + Spec ShootSpec + // Most recently observed status of the Shoot cluster. + Status ShootStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ShootList is a list of Shoot objects. +type ShootList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of Shoots. + Items []Shoot +} + +// ShootSpec is the specification of a Shoot. +type ShootSpec struct { + // Addons contains information about enabled/disabled addons and their configuration. + Addons *Addons + // CloudProfileName is a name of a CloudProfile object. + CloudProfileName string + // DNS contains information about the DNS settings of the Shoot. + DNS *DNS + // Extensions contain type and provider information for Shoot extensions. + Extensions []Extension + // Hibernation contains information whether the Shoot is suspended or not. + Hibernation *Hibernation + // Kubernetes contains the version and configuration settings of the control plane components. + Kubernetes Kubernetes + // Networking contains information about cluster networking such as CNI Plugin type, CIDRs, ...etc. + Networking Networking + // Maintenance contains information about the time window for maintenance operations and which + // operations should be performed. + Maintenance *Maintenance + // Monitoring contains information about custom monitoring configurations for the shoot. + Monitoring *Monitoring + // Provider contains all provider-specific and provider-relevant information. + Provider Provider + // Purpose is the purpose class for this cluster. + Purpose *ShootPurpose + // Region is a name of a region. + Region string + // SecretBindingName is the name of the a SecretBinding that has a reference to the provider secret. + // The credentials inside the provider secret will be used to create the shoot in the respective account. + SecretBindingName string + // SeedName is the name of the seed cluster that runs the control plane of the Shoot. + SeedName *string +} + +// ShootStatus holds the most recently observed status of the Shoot cluster. +type ShootStatus struct { + // Conditions represents the latest available observations of a Shoots's current state. + Conditions []Condition + // Constraints represents conditions of a Shoot's current state that constraint some operations on it. + Constraints []Condition + // Gardener holds information about the Gardener which last acted on the Shoot. + Gardener Gardener + // IsHibernated indicates whether the Shoot is currently hibernated. + IsHibernated bool + // LastOperation holds information about the last operation on the Shoot. + LastOperation *LastOperation + // LastErrors holds information about the last occurred error(s) during an operation. + LastErrors []LastError + // ObservedGeneration is the most recent generation observed for this Shoot. It corresponds to the + // Shoot's generation, which is updated on mutation by the API Server. + ObservedGeneration int64 + // RetryCycleStartTime is the start time of the last retry cycle (used to determine how often an operation + // must be retried until we give up). + RetryCycleStartTime *metav1.Time + // SeedName is the name of the seed cluster that runs the control plane of the Shoot. This value is only written + // after a successful create/reconcile operation. It will be used when control planes are moved between Seeds. + SeedName *string + // TechnicalID is the name that is used for creating the Seed namespace, the infrastructure resources, and + // basically everything that is related to this particular Shoot. + TechnicalID string + // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters. + // It is used to compute unique hashes. + UID types.UID +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Addons relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Addons is a collection of configuration for specific addons which are managed by the Gardener. +type Addons struct { + // KubernetesDashboard holds configuration settings for the kubernetes dashboard addon. + KubernetesDashboard *KubernetesDashboard + // NginxIngress holds configuration settings for the nginx-ingress addon. + NginxIngress *NginxIngress +} + +// Addon allows enabling or disabling a specific addon and is used to derive from. +type Addon struct { + // Enabled indicates whether the addon is enabled or not. + Enabled bool +} + +// KubernetesDashboard describes configuration values for the kubernetes-dashboard addon. +type KubernetesDashboard struct { + Addon + // AuthenticationMode defines the authentication mode for the kubernetes-dashboard. + AuthenticationMode *string +} + +const ( + // KubernetesDashboardAuthModeBasic uses basic authentication mode for auth. + KubernetesDashboardAuthModeBasic = "basic" + // KubernetesDashboardAuthModeToken uses token-based mode for auth. + KubernetesDashboardAuthModeToken = "token" +) + +// NginxIngress describes configuration values for the nginx-ingress addon. +type NginxIngress struct { + Addon + // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + LoadBalancerSourceRanges []string + // Config contains custom configuration for the nginx-ingress-controller configuration. + // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options + Config map[string]string + // ExternalTrafficPolicy controls the `.spec.externalTrafficPolicy` value of the load balancer `Service` + // exposing the nginx-ingress. Defaults to `Cluster`. + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// DNS relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// DNS holds information about the provider, the hosted zone id and the domain. +type DNS struct { + // Domain is the external available domain of the Shoot cluster. This domain will be written into the + // kubeconfig that is handed out to end-users. + Domain *string + // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if + // not a default domain is used. + Providers []DNSProvider +} + +// DNSProvider contains information about a DNS provider. +type DNSProvider struct { + // Domains contains information about which domains shall be included/excluded for this provider. + Domains *DNSIncludeExclude + // Primary indicates that this DNSProvider is used for shoot related domains. + Primary *bool + // SecretName is a name of a secret containing credentials for the stated domain and the + // provider. When not specified, the Gardener will use the cloud provider credentials referenced + // by the Shoot and try to find respective credentials there. Specifying this field may override + // this behavior, i.e. forcing the Gardener to only look into the given secret. + SecretName *string + // Type is the DNS provider type for the Shoot. Only relevant if not the default domain is used for + // this shoot. + Type *string + // Zones contains information about which hosted zones shall be included/excluded for this provider. + Zones *DNSIncludeExclude +} + +type DNSIncludeExclude struct { + // Include is a list of resources that shall be included. + Include []string + // Exclude is a list of resources that shall be excluded. + Exclude []string +} + +// DefaultDomain is the default value in the Shoot's '.spec.dns.domain' when '.spec.dns.provider' is 'unmanaged' +const DefaultDomain = "cluster.local" + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Extension relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Extension contains type and provider information for Shoot extensions. +type Extension struct { + // Type is the type of the extension resource. + Type string + // ProviderConfig is the configuration passed to extension resource. + ProviderConfig *ProviderConfig +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Hibernation relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Hibernation contains information whether the Shoot is suspended or not. +type Hibernation struct { + // Enabled specifies whether the Shoot needs to be hibernated or not. If it is true, the Shoot's desired state is to be hibernated. + // If it is false or nil, the Shoot's desired state is to be awaken. + Enabled *bool + // Schedules determine the hibernation schedules. + Schedules []HibernationSchedule +} + +// HibernationSchedule determines the hibernation schedule of a Shoot. +// A Shoot will be regularly hibernated at each start time and will be woken up at each end time. +// Start or End can be omitted, though at least one of each has to be specified. +type HibernationSchedule struct { + // Start is a Cron spec at which time a Shoot will be hibernated. + Start *string + // End is a Cron spec at which time a Shoot will be woken up. + End *string + // Location is the time location in which both start and and shall be evaluated. + Location *string +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Kubernetes relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Kubernetes contains the version and configuration variables for the Shoot control plane. +type Kubernetes struct { + // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true). + AllowPrivilegedContainers *bool + // ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. + ClusterAutoscaler *ClusterAutoscaler + // KubeAPIServer contains configuration settings for the kube-apiserver. + KubeAPIServer *KubeAPIServerConfig + // KubeControllerManager contains configuration settings for the kube-controller-manager. + KubeControllerManager *KubeControllerManagerConfig + // KubeScheduler contains configuration settings for the kube-scheduler. + KubeScheduler *KubeSchedulerConfig + // KubeProxy contains configuration settings for the kube-proxy. + KubeProxy *KubeProxyConfig + // Kubelet contains configuration settings for the kubelet. + Kubelet *KubeletConfig + // Version is the semantic Kubernetes version to use for the Shoot cluster. + Version string +} + +// ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. +type ClusterAutoscaler struct { + // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 10 mins). + ScaleDownDelayAfterAdd *metav1.Duration + // ScaleDownDelayAfterDelete how long after node deletion that scale down evaluation resumes, defaults to scanInterval (defaults to ScanInterval). + ScaleDownDelayAfterDelete *metav1.Duration + // ScaleDownDelayAfterFailure how long after scale down failure that scale down evaluation resumes (default: 3 mins). + ScaleDownDelayAfterFailure *metav1.Duration + // ScaleDownUnneededTime defines how long a node should be unneeded before it is eligible for scale down (default: 10 mins). + ScaleDownUnneededTime *metav1.Duration + // ScaleDownUtilizationThreshold defines the threshold in % under which a node is being removed + ScaleDownUtilizationThreshold *float64 + // ScanInterval how often cluster is reevaluated for scale up or down (default: 10 secs). + ScanInterval *metav1.Duration +} + +// KubernetesConfig contains common configuration fields for the control plane components. +type KubernetesConfig struct { + // FeatureGates contains information about enabled feature gates. + FeatureGates map[string]bool +} + +// KubeAPIServerConfig contains configuration settings for the kube-apiserver. +type KubeAPIServerConfig struct { + KubernetesConfig + // AdmissionPlugins contains the list of user-defined admission plugins (additional to those managed by Gardener), and, if desired, the corresponding + // configuration. + AdmissionPlugins []AdmissionPlugin + // APIAudiences are the identifiers of the API. The service account token authenticator will + // validate that tokens used against the API are bound to at least one of these audiences. + // If `serviceAccountConfig.issuer` is configured and this is not, this defaults to a single + // element list containing the issuer URL. + APIAudiences []string + // AuditConfig contains configuration settings for the audit of the kube-apiserver. + AuditConfig *AuditConfig + // EnableBasicAuthentication defines whether basic authentication should be enabled for this cluster or not. + EnableBasicAuthentication *bool + // OIDCConfig contains configuration settings for the OIDC provider. + OIDCConfig *OIDCConfig + // RuntimeConfig contains information about enabled or disabled APIs. + RuntimeConfig map[string]bool + // ServiceAccountConfig contains configuration settings for the service account handling + // of the kube-apiserver. + ServiceAccountConfig *ServiceAccountConfig +} + +// ServiceAccountConfig is the kube-apiserver configuration for service accounts. +type ServiceAccountConfig struct { + // Issuer is the identifier of the service account token issuer. The issuer will assert this + // identifier in "iss" claim of issued tokens. This value is a string or URI. + Issuer *string + // SigningKeySecret is a reference to a secret that contains the current private key of the + // service account token issuer. The issuer will sign issued ID tokens with this private key. + // (Requires the 'TokenRequest' feature gate.) + SigningKeySecret *corev1.LocalObjectReference +} + +// AuditConfig contains settings for audit of the api server +type AuditConfig struct { + // AuditPolicy contains configuration settings for audit policy of the kube-apiserver. + AuditPolicy *AuditPolicy +} + +// AuditPolicy contains audit policy for kube-apiserver +type AuditPolicy struct { + // ConfigMapRef is a reference to a ConfigMap object in the same namespace, + // which contains the audit policy for the kube-apiserver. + ConfigMapRef *corev1.ObjectReference +} + +// OIDCConfig contains configuration settings for the OIDC provider. +// Note: Descriptions were taken from the Kubernetes documentation. +type OIDCConfig struct { + // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + CABundle *string + // ClientAuthentication can optionally contain client configuration used for kubeconfig generation. + ClientAuthentication *OpenIDConnectClientAuthentication + // The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set. + ClientID *string + // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details. + GroupsClaim *string + // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + GroupsPrefix *string + // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + IssuerURL *string + // ATTENTION: Only meaningful for Kubernetes >= 1.11 + // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. + RequiredClaims map[string]string + // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1 + SigningAlgs []string + // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub") + UsernameClaim *string + // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + UsernamePrefix *string +} + +// OpenIDConnectClientAuthentication contains configuration for OIDC clients. +type OpenIDConnectClientAuthentication struct { + // Extra configuration added to kubeconfig's auth-provider. + // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token + ExtraConfig map[string]string + // The client Secret for the OpenID Connect client. + Secret *string +} + +// AdmissionPlugin contains information about a specific admission plugin and its corresponding configuration. +type AdmissionPlugin struct { + // Name is the name of the plugin. + Name string + // Config is the configuration of the plugin. + Config *ProviderConfig +} + +// KubeControllerManagerConfig contains configuration settings for the kube-controller-manager. +type KubeControllerManagerConfig struct { + KubernetesConfig + // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. + HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig + // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24) + NodeCIDRMaskSize *int32 +} + +// HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. +// Note: Descriptions were taken from the Kubernetes documentation. +type HorizontalPodAutoscalerConfig struct { + // The period after which a ready pod transition is considered to be the first. + CPUInitializationPeriod *metav1.Duration + // The period since last downscale, before another downscale can be performed in horizontal pod autoscaler. + DownscaleDelay *metav1.Duration + // The configurable window at which the controller will choose the highest recommendation for autoscaling. + DownscaleStabilization *metav1.Duration + // The configurable period at which the horizontal pod autoscaler considers a Pod “not yet ready” given that it’s unready and it has transitioned to unready during that time. + InitialReadinessDelay *metav1.Duration + // The period for syncing the number of pods in horizontal pod autoscaler. + SyncPeriod *metav1.Duration + // The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling. + Tolerance *float64 + // The period since last upscale, before another upscale can be performed in horizontal pod autoscaler. + UpscaleDelay *metav1.Duration +} + +const ( + // DefaultHPADownscaleDelay is a constant for the default HPA downscale delay for a Shoot cluster. + DefaultHPADownscaleDelay = 15 * time.Minute + // DefaultHPASyncPeriod is a constant for the default HPA sync period for a Shoot cluster. + DefaultHPASyncPeriod = 30 * time.Second + // DefaultHPATolerance is a constant for the default HPA tolerance for a Shoot cluster. + DefaultHPATolerance = 0.1 + // DefaultHPAUpscaleDelay is for the default HPA upscale delay for a Shoot cluster. + DefaultHPAUpscaleDelay = 1 * time.Minute + // DefaultDownscaleStabilization is the default HPA downscale stabilization window for a Shoot cluster + DefaultDownscaleStabilization = 5 * time.Minute + // DefaultInitialReadinessDelay is for the default HPA ReadinessDelay value in the Shoot cluster + DefaultInitialReadinessDelay = 30 * time.Second + // DefaultCPUInitializationPeriod is the for the default value of the CPUInitializationPeriod in the Shoot cluster + DefaultCPUInitializationPeriod = 5 * time.Minute +) + +// KubeSchedulerConfig contains configuration settings for the kube-scheduler. +type KubeSchedulerConfig struct { + KubernetesConfig +} + +// KubeProxyConfig contains configuration settings for the kube-proxy. +type KubeProxyConfig struct { + KubernetesConfig + // Mode specifies which proxy mode to use. + // defaults to IPTables. + Mode *ProxyMode +} + +// ProxyMode available in Linux platform: 'userspace' (older, going to be EOL), 'iptables' +// (newer, faster), 'ipvs' (newest, better in performance and scalability). +// As of now only 'iptables' and 'ipvs' is supported by Gardener. +// In Linux platform, if the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are +// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs', +// and the fall back path is firstly iptables and then userspace. +type ProxyMode string + +const ( + // ProxyModeIPTables uses iptables as proxy implementation. + ProxyModeIPTables ProxyMode = "IPTables" + // ProxyModeIPVS uses ipvs as proxy implementation. + ProxyModeIPVS ProxyMode = "IPVS" +) + +// KubeletConfig contains configuration settings for the kubelet. +type KubeletConfig struct { + KubernetesConfig + // CPUCFSQuota allows you to disable/enable CPU throttling for Pods. + CPUCFSQuota *bool + // CPUManagerPolicy allows to set alternative CPU management policies (default: none). + CPUManagerPolicy *string + // EvictionHard describes a set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a Pod eviction. + // Default: + // memory.available: "100Mi/1Gi/5%" + // nodefs.available: "5%" + // nodefs.inodesFree: "5%" + // imagefs.available: "5%" + // imagefs.inodesFree: "5%" + EvictionHard *KubeletConfigEviction + // EvictionMaxPodGracePeriod describes the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. + // Default: 90 + EvictionMaxPodGracePeriod *int32 + // EvictionMinimumReclaim configures the amount of resources below the configured eviction threshold that the kubelet attempts to reclaim whenever the kubelet observes resource pressure. + // Default: 0 for each resource + EvictionMinimumReclaim *KubeletConfigEvictionMinimumReclaim + // EvictionPressureTransitionPeriod is the duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. + // Default: 4m0s + EvictionPressureTransitionPeriod *metav1.Duration + // EvictionSoft describes a set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a Pod eviction. + // Default: + // memory.available: "200Mi/1.5Gi/10%" + // nodefs.available: "10%" + // nodefs.inodesFree: "10%" + // imagefs.available: "10%" + // imagefs.inodesFree: "10%" + EvictionSoft *KubeletConfigEviction + // EvictionSoftGracePeriod describes a set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a Pod eviction. + // Default: + // memory.available: 1m30s + // nodefs.available: 1m30s + // nodefs.inodesFree: 1m30s + // imagefs.available: 1m30s + // imagefs.inodesFree: 1m30s + EvictionSoftGracePeriod *KubeletConfigEvictionSoftGracePeriod + // MaxPods is the maximum number of Pods that are allowed by the Kubelet. + // Default: 110 + MaxPods *int32 + // PodPIDsLimit is the maximum number of process IDs per pod allowed by the kubelet. + PodPIDsLimit *int64 +} + +// KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value. +type KubeletConfigEviction struct { + // MemoryAvailable is the threshold for the free memory on the host server. + MemoryAvailable *string + // ImageFSAvailable is the threshold for the free disk space in the imagefs filesystem (docker images and container writable layers). + ImageFSAvailable *string + // ImageFSInodesFree is the threshold for the available inodes in the imagefs filesystem. + ImageFSInodesFree *string + // NodeFSAvailable is the threshold for the free disk space in the nodefs filesystem (docker volumes, logs, etc). + NodeFSAvailable *string + // NodeFSInodesFree is the threshold for the available inodes in the nodefs filesystem. + NodeFSInodesFree *string +} + +// KubeletConfigEvictionMinimumReclaim contains configuration for the kubelet eviction minimum reclaim. +type KubeletConfigEvictionMinimumReclaim struct { + // MemoryAvailable is the threshold for the memory reclaim on the host server. + MemoryAvailable *resource.Quantity + // ImageFSAvailable is the threshold for the disk space reclaim in the imagefs filesystem (docker images and container writable layers). + ImageFSAvailable *resource.Quantity + // ImageFSInodesFree is the threshold for the inodes reclaim in the imagefs filesystem. + ImageFSInodesFree *resource.Quantity + // NodeFSAvailable is the threshold for the disk space reclaim in the nodefs filesystem (docker volumes, logs, etc). + NodeFSAvailable *resource.Quantity + // NodeFSInodesFree is the threshold for the inodes reclaim in the nodefs filesystem. + NodeFSInodesFree *resource.Quantity +} + +// KubeletConfigEvictionSoftGracePeriod contains grace periods for kubelet eviction thresholds. +type KubeletConfigEvictionSoftGracePeriod struct { + // MemoryAvailable is the grace period for the MemoryAvailable eviction threshold. + MemoryAvailable *metav1.Duration + // ImageFSAvailable is the grace period for the ImageFSAvailable eviction threshold. + ImageFSAvailable *metav1.Duration + // ImageFSInodesFree is the grace period for the ImageFSInodesFree eviction threshold. + ImageFSInodesFree *metav1.Duration + // NodeFSAvailable is the grace period for the NodeFSAvailable eviction threshold. + NodeFSAvailable *metav1.Duration + // NodeFSInodesFree is the grace period for the NodeFSInodesFree eviction threshold. + NodeFSInodesFree *metav1.Duration +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Networking relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Networking defines networking parameters for the shoot cluster. +type Networking struct { + // Type identifies the type of the networking plugin. + Type string + // ProviderConfig is the configuration passed to network resource. + ProviderConfig *ProviderConfig + // Pods is the CIDR of the pod network. + Pods *string + // Nodes is the CIDR of the entire node network. + Nodes *string + // Services is the CIDR of the service network. + Services *string +} + +const ( + // DefaultPodNetworkCIDR is a constant for the default pod network CIDR of a Shoot cluster. + DefaultPodNetworkCIDR = "100.96.0.0/11" + // DefaultServiceNetworkCIDR is a constant for the default service network CIDR of a Shoot cluster. + DefaultServiceNetworkCIDR = "100.64.0.0/13" +) + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Maintenance relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Maintenance contains information about the time window for maintenance operations and which +// operations should be performed. +type Maintenance struct { + // AutoUpdate contains information about which constraints should be automatically updated. + AutoUpdate *MaintenanceAutoUpdate + // TimeWindow contains information about the time window for maintenance operations. + TimeWindow *MaintenanceTimeWindow +} + +// MaintenanceAutoUpdate contains information about which constraints should be automatically updated. +type MaintenanceAutoUpdate struct { + // KubernetesVersion indicates whether the patch Kubernetes version may be automatically updated (default: true). + KubernetesVersion bool + // MachineImageVersion indicates whether the machine image version may be automatically updated (default: true). + MachineImageVersion bool +} + +// MaintenanceTimeWindow contains information about the time window for maintenance operations. +type MaintenanceTimeWindow struct { + // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + // If not present, a random value will be computed. + Begin string + // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + // If not present, the value will be computed based on the "Begin" value. + End string +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Monitoring relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Monitoring contains information about the monitoring configuration for the shoot. +type Monitoring struct { + // Alerting contains information about the alerting configuration for the shoot cluster. + Alerting *Alerting +} + +// Alerting contains information about how alerting will be done (i.e. who will receive alerts and how). +type Alerting struct { + // MonitoringEmailReceivers is a list of recipients for alerts + EmailReceivers []string +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Provider relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Provider contains provider-specific information that are handed-over to the provider-specific +// extension controller. +type Provider struct { + // Type is the type of the provider. + Type string + // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete + // definition in the documentation of your provider extension. + ControlPlaneConfig *ProviderConfig + // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete + // definition in the documentation of your provider extension. + InfrastructureConfig *ProviderConfig + // Workers is a list of worker groups. + Workers []Worker +} + +// Worker is the base definition of a worker group. +type Worker struct { + // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool. + Annotations map[string]string + // CABundle is a certificate bundle which will be installed onto every machine of this worker pool. + CABundle *string + // Kubernetes contains configuration for Kubernetes components related to this worker pool. + Kubernetes *WorkerKubernetes + // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool. + Labels map[string]string + // Name is the name of the worker group. + Name string + // Machine contains information about the machine type and image. + Machine Machine + // Maximum is the maximum number of VMs to create. + Maximum int32 + // Minimum is the minimum number of VMs to create. + Minimum int32 + // MaxSurge is maximum number of VMs that are created during an update. + MaxSurge *intstr.IntOrString + // MaxUnavailable is the maximum number of VMs that can be unavailable during an update. + MaxUnavailable *intstr.IntOrString + // ProviderConfig is the provider-specific configuration for this worker pool. + ProviderConfig *ProviderConfig + // Taints is a list of taints for all the `Node` objects in this worker pool. + Taints []corev1.Taint + // Volume contains information about the volume type and size. + Volume *Volume + // DataVolumes contains a list of additional worker volumes. + DataVolumes []Volume + // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state. + KubeletDataVolumeName *string + // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional + // as not every provider may support availability zones. + Zones []string +} + +// WorkerKubernetes contains configuration for Kubernetes components related to this worker pool. +type WorkerKubernetes struct { + // Kubelet contains configuration settings for all kubelets of this worker pool. + Kubelet *KubeletConfig +} + +// Machine contains information about the machine type and image. +type Machine struct { + // Type is the machine type of the worker group. + Type string + // Image holds information about the machine image to use for all nodes of this pool. It will default to the + // latest version of the first image stated in the referenced CloudProfile if no value has been provided. + Image *ShootMachineImage +} + +// ShootMachineImage defines the name and the version of the shoot's machine image in any environment. Has to be +// defined in the respective CloudProfile. +type ShootMachineImage struct { + // Name is the name of the image. + Name string + // ProviderConfig is the shoot's individual configuration passed to an extension resource. + ProviderConfig *ProviderConfig + // Version is the version of the shoot's image. + Version string +} + +// Volume contains information about the volume type and size. +type Volume struct { + // Name of the volume to make it referencable. + Name *string + // Type is the type of the volume. + Type *string + // Size is the size of the volume. + Size string + // Encrypted determines if the volume should be encrypted. + Encrypted *bool +} + +var ( + // DefaultWorkerMaxSurge is the default value for Worker MaxSurge. + DefaultWorkerMaxSurge = intstr.FromInt(1) + // DefaultWorkerMaxUnavailable is the default value for Worker MaxUnavailable. + DefaultWorkerMaxUnavailable = intstr.FromInt(0) +) + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Other/miscellaneous constants and types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +const ( + // ShootEventMaintenanceDone indicates that a maintenance operation has been performed. + ShootEventMaintenanceDone = "MaintenanceDone" + // ShootEventMaintenanceError indicates that a maintenance operation has failed. + ShootEventMaintenanceError = "MaintenanceError" + + // ShootEventSchedulingSuccessful indicates that a scheduling decision was taken successfully. + ShootEventSchedulingSuccessful = "SchedulingSuccessful" + // ShootEventSchedulingFailed indicates that a scheduling decision failed. + ShootEventSchedulingFailed = "SchedulingFailed" +) + +const ( + // ShootAPIServerAvailable is a constant for a condition type indicating that the Shoot cluster's API server is available. + ShootAPIServerAvailable ConditionType = "APIServerAvailable" + // ShootControlPlaneHealthy is a constant for a condition type indicating the control plane health. + ShootControlPlaneHealthy ConditionType = "ControlPlaneHealthy" + // ShootEveryNodeReady is a constant for a condition type indicating the node health. + ShootEveryNodeReady ConditionType = "EveryNodeReady" + // ShootSystemComponentsHealthy is a constant for a condition type indicating the system components health. + ShootSystemComponentsHealthy ConditionType = "SystemComponentsHealthy" + // ShootHibernationPossible is a constant for a condition type indicating whether the Shoot can be hibernated. + ShootHibernationPossible ConditionType = "HibernationPossible" +) + +// DNSUnmanaged is a constant for the 'unmanaged' DNS provider. +const DNSUnmanaged string = "unmanaged" + +// ShootPurpose is a type alias for string. +type ShootPurpose string + +const ( + // ShootPurposeEvaluation is a constant for the evaluation purpose. + ShootPurposeEvaluation ShootPurpose = "evaluation" + // ShootPurposeTesting is a constant for the testing purpose. + ShootPurposeTesting ShootPurpose = "testing" + // ShootPurposeDevelopment is a constant for the development purpose. + ShootPurposeDevelopment ShootPurpose = "development" + // ShootPurposeProduction is a constant for the production purpose. + ShootPurposeProduction ShootPurpose = "production" + // ShootPurposeInfrastructure is a constant for the infrastructure purpose. + ShootPurposeInfrastructure ShootPurpose = "infrastructure" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go new file mode 100644 index 000000000..e13bb73d1 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shootstate.go @@ -0,0 +1,71 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ShootState contains the state of a Shoot cluster required to migrate the Shoot's control plane to a new Seed. +type ShootState struct { + metav1.TypeMeta + // Standard object metadata. + metav1.ObjectMeta + // Specification of the ShootState. + Spec ShootStateSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ShootStateList is a list of ShootState objects. +type ShootStateList struct { + metav1.TypeMeta + // Standard list object metadata. + metav1.ListMeta + // Items is the list of ShootStates. + Items []ShootState +} + +// ShootStateSpec is the specification of the ShootState. +type ShootStateSpec struct { + // Gardener holds the data required to generate resources deployed by the gardenlet + Gardener []GardenerResourceData + // Extensions holds the state of custom resources reconciled by extension controllers in the seed + Extensions []ExtensionResourceState +} + +// GardenerResourceData holds the data which is used to generate resources, deployed in the Shoot's control plane. +type GardenerResourceData struct { + // Name of the object required to generate resources + Name string + // Data contains the payload required to generate resources + Data map[string]string +} + +// ExtensionResourceState contains the kind of the extension custom resource and its last observed state in the Shoot's +// namespace on the Seed cluster. +type ExtensionResourceState struct { + // Kind (type) of the extension custom resource + Kind string + // Name of the extension custom resource + Name *string + // Purpose of the extension custom resource + Purpose *string + // State of the extension resource + State ProviderConfig +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go new file mode 100644 index 000000000..370ed9744 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_utils.go @@ -0,0 +1,67 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ProviderConfig is a workaround for missing OpenAPI functions on runtime.RawExtension struct. +// https://github.com/kubernetes/kubernetes/issues/55890 +// https://github.com/kubernetes-sigs/cluster-api/issues/137 +type ProviderConfig struct { + runtime.RawExtension +} + +// Condition holds the information about the state of a resource. +type Condition struct { + // Type of the Shoot condition. + Type ConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // Last time the condition was updated. + LastUpdateTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +// ConditionStatus is the status of a condition. +type ConditionStatus string + +// ConditionType is a string alias. +type ConditionType string + +const ( + // ConditionAvailable is a condition type for indicating availability. + ConditionAvailable ConditionType = "Available" + + // ConditionTrue means a resource is in the condition. + ConditionTrue ConditionStatus = "True" + // ConditionFalse means a resource is not in the condition. + ConditionFalse ConditionStatus = "False" + // ConditionUnknown means Gardener can't decide if a resource is in the condition or not. + ConditionUnknown ConditionStatus = "Unknown" + // ConditionProgressing means the condition was seen true, failed but stayed within a predefined failure threshold. + // In the future, we could add other intermediate conditions, e.g. ConditionDegraded. + ConditionProgressing ConditionStatus = "Progressing" + + // ConditionCheckError is a constant for a reason in condition. + ConditionCheckError = "ConditionCheckError" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go new file mode 100644 index 000000000..55d9726e3 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go @@ -0,0 +1,266 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package constants + +const ( + // SecretNameCACluster is a constant for the name of a Kubernetes secret object that contains the CA + // certificate of a shoot cluster. + SecretNameCACluster = "ca" + // SecretNameCAETCD is a constant for the name of a Kubernetes secret object that contains the CA + // certificate of the etcd of a shoot cluster. + SecretNameCAETCD = "ca-etcd" + // SecretNameCAFrontProxy is a constant for the name of a Kubernetes secret object that contains the CA + // certificate of the kube-aggregator a shoot cluster. + SecretNameCAFrontProxy = "ca-front-proxy" + // SecretNameCAKubelet is a constant for the name of a Kubernetes secret object that contains the CA + // certificate of the kubelet of a shoot cluster. + SecretNameCAKubelet = "ca-kubelet" + // SecretNameCAMetricsServer is a constant for the name of a Kubernetes secret object that contains the CA + // certificate of the metrics-server of a shoot cluster. + SecretNameCAMetricsServer = "ca-metrics-server" + // SecretNameCloudProvider is a constant for the name of a Kubernetes secret object that contains the provider + // specific credentials that shall be used to create/delete the shoot. + SecretNameCloudProvider = "cloudprovider" + // SecretNameSSHKeyPair is a constant for the name of a Kubernetes secret object that contains the SSH key pair + // (public and private key) that can be used to SSH into the shoot nodes. + SecretNameSSHKeyPair = "ssh-keypair" + + // SecretNameGardener is a constant for the name of a Kubernetes secret object that contains the client + // certificate and a kubeconfig for a shoot cluster. It is used by Gardener and can be used by extension + // controllers in order to communicate with the shoot's API server. The client certificate has administrator + // privileges. + SecretNameGardener = "gardener" + // SecretNameGardenerInternal is a constant for the name of a Kubernetes secret object that contains the client + // certificate and a kubeconfig for a shoot cluster. It is used by Gardener and can be used by extension + // controllers in order to communicate with the shoot's API server. The client certificate has administrator + // privileges. The difference to the "gardener" secret is that is contains the in-cluster endpoint as address to + // for the shoot API server instead the DNS name or load balancer address. + SecretNameGardenerInternal = "gardener-internal" + + // DeploymentNameClusterAutoscaler is a constant for the name of a Kubernetes deployment object that contains + // the cluster-autoscaler pod. + DeploymentNameClusterAutoscaler = "cluster-autoscaler" + // DeploymentNameKubeAPIServer is a constant for the name of a Kubernetes deployment object that contains + // the kube-apiserver pod. + DeploymentNameKubeAPIServer = "kube-apiserver" + // DeploymentNameKubeControllerManager is a constant for the name of a Kubernetes deployment object that contains + // the kube-controller-manager pod. + DeploymentNameKubeControllerManager = "kube-controller-manager" + + // DeploymentNameKubeScheduler is a constant for the name of a Kubernetes deployment object that contains + // the kube-scheduler pod. + DeploymentNameKubeScheduler = "kube-scheduler" + // DeploymentNameGardenerResourceManager is a constant for the name of a Kubernetes deployment object that contains + // the gardener-resource-manager pod. + DeploymentNameGardenerResourceManager = "gardener-resource-manager" + // DeploymentNameGrafanaOperators is a constant for the name of a Kubernetes deployment object that contains + // the grafana-operators pod. + DeploymentNameGrafanaOperators = "grafana-operators" + // DeploymentNameGrafanaUsers is a constant for the name of a Kubernetes deployment object that contains + // the grafana-users pod. + DeploymentNameGrafanaUsers = "grafana-users" + // DeploymentNameKubeStateMetricsShoot is a constant for the name of a Kubernetes deployment object that contains + // the kube-state-metrics pod. + DeploymentNameKubeStateMetricsShoot = "kube-state-metrics" + // DeploymentNameKubeStateMetricsSeed is a constant for the name of a Kubernetes deployment object that contains + // the kube-state-metrics-seed pod. + DeploymentNameKubeStateMetricsSeed = "kube-state-metrics-seed" + // DeploymentNameKibana is a constant for the name of a Kubernetes deployment object that contains + // the kibana-logging pod. + DeploymentNameKibana = "kibana-logging" + + // StatefulSetNameAlertManager is a constant for the name of a Kubernetes stateful set object that contains + // the alertmanager pod. + StatefulSetNameAlertManager = "alertmanager" + // ETCDMain is a constant for the name of etcd-main Etcd object. + ETCDMain = "etcd-main" + // ETCDEvents is a constant for the name of etcd-events Etcd object. + ETCDEvents = "etcd-events" + // StatefulSetNameElasticSearch is a constant for the name of a Kubernetes stateful set object that contains + // the elasticsearch-logging pod. + StatefulSetNameElasticSearch = "elasticsearch-logging" + // StatefulSetNamePrometheus is a constant for the name of a Kubernetes stateful set object that contains + // the prometheus pod. + StatefulSetNamePrometheus = "prometheus" + + // GardenerPurpose is a constant for the key in a label describing the purpose of the respective object. + GardenerPurpose = "gardener.cloud/purpose" + + // GardenerOperation is a constant for an annotation on a resource that describes a desired operation. + GardenerOperation = "gardener.cloud/operation" + // GardenerOperationReconcile is a constant for the value of the operation annotation describing a reconcile + // operation. + GardenerOperationReconcile = "reconcile" + // GardenerOperationMigrate is a constant for the value of the operation annotation describing a migration + // operation. + GardenerOperationMigrate = "migrate" + // GardenerOperationRestore is a constant for the value of the operation annotation describing a restoration + // operation. + GardenerOperationRestore = "restore" + + // DeprecatedGardenRole is the key for an annotation on a Kubernetes object indicating what it is used for. + // + // Deprecated: Use `GardenRole` instead. + DeprecatedGardenRole = "garden.sapcloud.io/role" + // GardenRole is a constant for a label that describes a role. + GardenRole = "gardener.cloud/role" + // GardenRoleExtension is a constant for a label that describes the 'extensions' role. + GardenRoleExtension = "extension" + // GardenRoleSeed is the value of the GardenRole key indicating type 'seed'. + GardenRoleSeed = "seed" + // GardenRoleShoot is the value of the GardenRole key indicating type 'shoot'. + GardenRoleShoot = "shoot" + // GardenRoleLogging is the value of the GardenRole key indicating type 'logging'. + GardenRoleLogging = "logging" + // GardenRoleProject is the value of GardenRole key indicating type 'project'. + GardenRoleProject = "project" + // GardenRoleControlPlane is the value of the GardenRole key indicating type 'controlplane'. + GardenRoleControlPlane = "controlplane" + // GardenRoleSystemComponent is the value of the GardenRole key indicating type 'system-component'. + GardenRoleSystemComponent = "system-component" + // GardenRoleMonitoring is the value of the GardenRole key indicating type 'monitoring'. + GardenRoleMonitoring = "monitoring" + // GardenRoleOptionalAddon is the value of the GardenRole key indicating type 'optional-addon'. + GardenRoleOptionalAddon = "optional-addon" + + // DeprecatedShootUID is an annotation key for the shoot namespace in the seed cluster, + // which value will be the value of `shoot.status.uid` + // +deprecated: Use `Cluster` resource instead. + DeprecatedShootUID = "shoot.garden.sapcloud.io/uid" + + // SeedResourceManagerClass is the resource-class managed by the Gardener-Resource-Manager + // instance in the garden namespace on the seeds. + SeedResourceManagerClass = "seed" + // LabelBackupProvider is used to identify the backup provider. + LabelBackupProvider = "backup.gardener.cloud/provider" + // LabelSeedProvider is used to identify the seed provider. + LabelSeedProvider = "seed.gardener.cloud/provider" + // LabelShootProvider is used to identify the shoot provider. + LabelShootProvider = "shoot.gardener.cloud/provider" + // LabelNetworkingProvider is used to identify the networking provider for the cni plugin. + LabelNetworkingProvider = "networking.shoot.gardener.cloud/provider" + // LabelExtensionConfiguration is used to identify the provider's configuration which will be added to Gardener configuration + LabelExtensionConfiguration = "extensions.gardener.cloud/configuration" + // LabelLogging is a constant for a label for logging stack configurations + LabelLogging = "logging" + // LabelMonitoring is a constant for a label for monitoring stack configurations + LabelMonitoring = "monitoring" + + // LabelNetworkPolicyToBlockedCIDRs allows Egress from pods labeled with 'networking.gardener.cloud/to-blocked-cidrs=allowed'. + LabelNetworkPolicyToBlockedCIDRs = "networking.gardener.cloud/to-blocked-cidrs" + // LabelNetworkPolicyToDNS allows Egress from pods labeled with 'networking.gardener.cloud/to-dns=allowed' to DNS running in 'kube-system'. + // In practice, most of the Pods which require network Egress need this label. + LabelNetworkPolicyToDNS = "networking.gardener.cloud/to-dns" + // LabelNetworkPolicyToPrivateNetworks allows Egress from pods labeled with 'networking.gardener.cloud/to-private-networks=allowed' to the + // private networks (RFC1918), Carrier-grade NAT (RFC6598) except for cloudProvider's specific metadata service IP, seed networks, + // shoot networks. + LabelNetworkPolicyToPrivateNetworks = "networking.gardener.cloud/to-private-networks" + // LabelNetworkPolicyToPublicNetworks allows Egress from pods labeled with 'networking.gardener.cloud/to-public-networks=allowed' to all public + // network IPs, except for private networks (RFC1918), carrier-grade NAT (RFC6598), cloudProvider's specific metadata service IP. + // In practice, this blocks Egress traffic to all networks in the Seed cluster and only traffic to public IPv4 addresses. + LabelNetworkPolicyToPublicNetworks = "networking.gardener.cloud/to-public-networks" + // LabelNetworkPolicyToSeedAPIServer allows Egress from pods labeled with 'networking.gardener.cloud/to-seed-apiserver=allowed' to Seed's Kubernetes + // API Server. + LabelNetworkPolicyToSeedAPIServer = "networking.gardener.cloud/to-seed-apiserver" + // LabelNetworkPolicyToShootAPIServer allows Egress from pods labeled with 'networking.gardener.cloud/to-shoot-apiserver=allowed' to talk to Shoot's + // Kubernetes API Server. + LabelNetworkPolicyToShootAPIServer = "networking.gardener.cloud/to-shoot-apiserver" + // LabelNetworkPolicyToAll disables all Ingress and Egress traffic into/from this namespace when set to "disallowed". + LabelNetworkPolicyToAll = "networking.gardener.cloud/to-all" + // LabelNetworkPolicyToElasticSearch allows Ingress to the ElasticSearch API pods labeled with 'networking.gardener.cloud/to-elasticsearch=allowed', + // and fluentd in 'garden' namespace. + LabelNetworkPolicyToElasticSearch = "networking.gardener.cloud/to-elasticsearch" + // LabelNetworkPolicyFromPrometheus allows Ingress from Prometheus to pods labeled with 'networking.gardener.cloud/from-prometheus=allowed' and ports + // named 'metrics' in the PodSpecification. + LabelNetworkPolicyFromPrometheus = "networking.gardener.cloud/from-prometheus" + // LabelNetworkPolicyAllowed is a constant for allowing a network policy. + LabelNetworkPolicyAllowed = "allowed" + // LabelNetworkPolicyDisallowed is a constant for disallowing a network policy. + LabelNetworkPolicyDisallowed = "disallowed" + + // LabelApp is a constant for a label key. + LabelApp = "app" + // LabelRole is a constant for a label key. + LabelRole = "role" + // LabelKubernetes is a constant for a label for Kubernetes workload. + LabelKubernetes = "kubernetes" + // LabelAPIServer is a constant for a label for the kube-apiserver. + LabelAPIServer = "apiserver" + // LabelControllerManager is a constant for a label for the kube-controller-manager. + LabelControllerManager = "controller-manager" + // LabelScheduler is a constant for a label for the kube-scheduler. + LabelScheduler = "scheduler" + // LabelExtensionProjectRole is a constant for a label value for extension project roles + LabelExtensionProjectRole = "extension-project-role" + + // LabelAPIServerExposure is a constant for label key which gardener can add to various objects related + // to kube-apiserver exposure. + LabelAPIServerExposure = "core.gardener.cloud/apiserver-exposure" + // LabelAPIServerExposureGardenerManaged is a constant for label value which gardener sets on the label key + // "core.gardener.cloud/apiserver-exposure" to indicate that it's responsible for apiserver exposure (via SNI). + LabelAPIServerExposureGardenerManaged = "gardener-managed" + + // GardenNamespace is the namespace in which the configuration and secrets for + // the Gardener controller manager will be stored (e.g., secrets for the Seed clusters). + // It is also used by the gardener-apiserver. + GardenNamespace = "garden" + + // AnnotationShootUseAsSeed is a constant for an annotation on a Shoot resource indicating that the Shoot shall be registered as Seed in the + // Garden cluster once successfully created. + AnnotationShootUseAsSeed = "shoot.gardener.cloud/use-as-seed" + // AnnotationShootUseAsSeedDeprecated is a constant for an annotation on a Shoot resource indicating that the Shoot shall be registered as Seed in the + // Garden cluster once successfully created. + // + // Deprecated: Use `AnnotationShootUseAsSeed` instead. + AnnotationShootUseAsSeedDeprecated = "shoot.garden.sapcloud.io/use-as-seed" + // AnnotationShootIgnoreAlerts is the key for an annotation of a Shoot cluster whose value indicates + // if alerts for this cluster should be ignored + AnnotationShootIgnoreAlerts = "shoot.gardener.cloud/ignore-alerts" + // AnnotationShootIgnoreAlertsDeprecated is the key for an annotation of a Shoot cluster whose value indicates + // if alerts for this cluster should be ignored + // + // Deprecated: Use `AnnotationShootIgnoreAlerts` instead. + AnnotationShootIgnoreAlertsDeprecated = "shoot.garden.sapcloud.io/ignore-alerts" + // AnnotationShootSkipCleanup is a key for an annotation on a Shoot resource that declares that the clean up steps should be skipped when the + // cluster is deleted. Concretely, this will skip everything except the deletion of (load balancer) services and persistent volume resources. + AnnotationShootSkipCleanup = "shoot.gardener.cloud/skip-cleanup" + + // OperatingSystemConfigUnitNameKubeletService is a constant for a unit in the operating system config that contains the kubelet service. + OperatingSystemConfigUnitNameKubeletService = "kubelet.service" + // OperatingSystemConfigFilePathKernelSettings is a constant for a path to a file in the operating system config that contains some general kernel settings. + OperatingSystemConfigFilePathKernelSettings = "/etc/sysctl.d/99-k8s-general.conf" + // OperatingSystemConfigFilePathKubeletConfig is a constant for a path to a file in the operating system config that contains the kubelet configuration. + OperatingSystemConfigFilePathKubeletConfig = "/var/lib/kubelet/config/kubelet" + + // FluentBitConfigMapKubernetesFilter is a constant for the Fluent Bit ConfigMap's section regarding Kubernetes filters + FluentBitConfigMapKubernetesFilter = "filter-kubernetes.conf" + // FluentBitConfigMapParser is a constant for the Fluent Bit ConfigMap's section regarding Parsers for common container types + FluentBitConfigMapParser = "parsers.conf" + // PrometheusConfigMapAlertingRules is a constant for the Prometheus alerting rules tag in provider-specific monitoring configuration + PrometheusConfigMapAlertingRules = "alerting_rules" + // PrometheusConfigMapScrapeConfig is a constant for the Prometheus scrape config tag in provider-specific monitoring configuration + PrometheusConfigMapScrapeConfig = "scrape_config" + // GrafanaConfigMapUserDashboard is a constant for the Grafana user dashboard tag in provider-specific monitoring configuration + GrafanaConfigMapUserDashboard = "dashboard_users" + // GrafanaConfigMapOperatorDashboard is a constant for the Grafana operator dashboard tag in provider-specific monitoring configuration + GrafanaConfigMapOperatorDashboard = "dashboard_operators" + + // LabelControllerRegistrationName is the key of a label on extension namespaces that indicates the controller registration name. + LabelControllerRegistrationName = "controllerregistration.core.gardener.cloud/name" + + // EventResourceReferenced indicates that the resource deletion is in waiting mode because the resource is still + // being referenced by at least one other resource (e.g. a SecretBinding is still referenced by a Shoot) + EventResourceReferenced = "ResourceReferenced" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go new file mode 100644 index 000000000..e6862fec2 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/utils.go @@ -0,0 +1,36 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package constants + +// GetShootUseAsSeedAnnotation fetches the value for AnnotationShootUseAsSeed annotation. +// If not present, it fallbacks to AnnotationShootUseAsSeedDeprecated. +func GetShootUseAsSeedAnnotation(annotations map[string]string) (string, bool) { + return getDeprecatedAnnotation(annotations, AnnotationShootUseAsSeed, AnnotationShootUseAsSeedDeprecated) +} + +// GetShootIgnoreAlertsAnnotation fetches the value for AnnotationShootIgnoreAlerts annotation. +// If not present, it fallbacks to AnnotationShootIgnoreAlertsDeprecated. +func GetShootIgnoreAlertsAnnotation(annotations map[string]string) (string, bool) { + return getDeprecatedAnnotation(annotations, AnnotationShootIgnoreAlerts, AnnotationShootIgnoreAlertsDeprecated) +} + +func getDeprecatedAnnotation(annotations map[string]string, annotationKey, deprecatedAnnotationKey string) (string, bool) { + val, ok := annotations[annotationKey] + if !ok { + val, ok = annotations[deprecatedAnnotationKey] + } + + return val, ok +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go new file mode 100644 index 000000000..8c5a3058c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/conversions.go @@ -0,0 +1,163 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + "fmt" + + "github.com/gardener/gardener/pkg/apis/core" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Shoot"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", core.ShootSeedName, core.ShootCloudProfileName: + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ); err != nil { + return err + } + + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} + +func Convert_v1beta1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *core.ProjectSpec, s conversion.Scope) error { + if err := autoConvert_v1beta1_ProjectSpec_To_core_ProjectSpec(in, out, s); err != nil { + return err + } + + if owner := out.Owner; owner != nil { + outer: + for i, member := range out.Members { + if member.Name == owner.Name && member.APIGroup == owner.APIGroup && member.Kind == owner.Kind { + // add owner role to the current project's owner if not present + for _, role := range member.Roles { + if role == core.ProjectMemberOwner { + continue outer + } + } + + out.Members[i].Roles = append(out.Members[i].Roles, core.ProjectMemberOwner) + } else { + // delete owner role from all other members + out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner) + } + } + } + + return nil +} + +func Convert_core_ProjectSpec_To_v1beta1_ProjectSpec(in *core.ProjectSpec, out *ProjectSpec, s conversion.Scope) error { + if err := autoConvert_core_ProjectSpec_To_v1beta1_ProjectSpec(in, out, s); err != nil { + return err + } + + if owner := out.Owner; owner != nil { + outer: + for i, member := range out.Members { + if member.Name == owner.Name && member.APIGroup == owner.APIGroup && member.Kind == owner.Kind { + // add owner role to the current project's owner if not present + if member.Role == core.ProjectMemberOwner { + // remove it from owners list if present + out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner) + continue outer + } + for _, role := range member.Roles { + if role == ProjectMemberOwner { + continue outer + } + } + + if out.Members[i].Role == "" { + out.Members[i].Role = core.ProjectMemberOwner + } else { + out.Members[i].Roles = append(out.Members[i].Roles, core.ProjectMemberOwner) + } + } else { + // delete owner role from all other members + out.Members[i].Roles = removeRoleFromRoles(member.Roles, ProjectMemberOwner) + + if member.Role == ProjectMemberOwner { + if len(out.Members[i].Roles) == 0 { + out.Members[i].Role = "" + } else { + out.Members[i].Role = out.Members[i].Roles[0] + if len(out.Members[i].Roles) > 1 { + out.Members[i].Roles = out.Members[i].Roles[1:] + } else { + out.Members[i].Roles = nil + } + } + } + } + } + } + + return nil +} + + +func Convert_v1beta1_ProjectMember_To_core_ProjectMember(in *ProjectMember, out *core.ProjectMember, s conversion.Scope) error { + if err := autoConvert_v1beta1_ProjectMember_To_core_ProjectMember(in, out, s); err != nil { + return err + } + + if len(in.Role) == 0 { + return nil + } + + // delete in.Role from out.Roles to make sure it gets added to the head + if len(out.Roles) > 0 { + out.Roles = removeRoleFromRoles(out.Roles, in.Role) + } + + // add in.Role to the head of out.Roles + out.Roles = append([]string{in.Role}, out.Roles...) + + return nil +} + +func Convert_core_ProjectMember_To_v1beta1_ProjectMember(in *core.ProjectMember, out *ProjectMember, s conversion.Scope) error { + if err := autoConvert_core_ProjectMember_To_v1beta1_ProjectMember(in, out, s); err != nil { + return err + } + + if len(in.Roles) > 0 { + out.Role = in.Roles[0] + out.Roles = in.Roles[1:] + } + + return nil +} + + +func removeRoleFromRoles(roles []string, role string) []string{ + var newRoles []string + for _, r := range roles { + if r != role { + newRoles = append(newRoles, r) + } + } + return newRoles +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go new file mode 100644 index 000000000..a3625a6ad --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go @@ -0,0 +1,206 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + "math" + + "github.com/gardener/gardener/pkg/utils" + versionutils "github.com/gardener/gardener/pkg/utils/version" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_SecretBinding sets default values for SecretBinding objects. +func SetDefaults_SecretBinding(obj *SecretBinding) { + if len(obj.SecretRef.Namespace) == 0 { + obj.SecretRef.Namespace = obj.Namespace + } + + for i, quota := range obj.Quotas { + if len(quota.Namespace) == 0 { + obj.Quotas[i].Namespace = obj.Namespace + } + } +} + +// SetDefaults_Project sets default values for Project objects. +func SetDefaults_Project(obj *Project) { + defaultSubject(obj.Spec.Owner) + + for i, member := range obj.Spec.Members { + defaultSubject(&obj.Spec.Members[i].Subject) + + if len(member.Role) == 0 && len(member.Roles) == 0 { + obj.Spec.Members[i].Role = ProjectMemberViewer + } + } +} + +func defaultSubject(obj *rbacv1.Subject) { + if obj != nil && len(obj.APIGroup) == 0 { + switch obj.Kind { + case rbacv1.ServiceAccountKind: + obj.APIGroup = "" + case rbacv1.UserKind: + obj.APIGroup = rbacv1.GroupName + case rbacv1.GroupKind: + obj.APIGroup = rbacv1.GroupName + } + } +} + +// SetDefaults_MachineType sets default values for MachineType objects. +func SetDefaults_MachineType(obj *MachineType) { + if obj.Usable == nil { + trueVar := true + obj.Usable = &trueVar + } +} + +// SetDefaults_VolumeType sets default values for VolumeType objects. +func SetDefaults_VolumeType(obj *VolumeType) { + if obj.Usable == nil { + trueVar := true + obj.Usable = &trueVar + } +} + +// SetDefaults_Shoot sets default values for Shoot objects. +func SetDefaults_Shoot(obj *Shoot) { + k8sVersionLessThan116, _ := versionutils.CompareVersions(obj.Spec.Kubernetes.Version, "<", "1.16") + // Error is ignored here because we cannot do anything meaningful with it. + // k8sVersionLessThan116 will default to `false`. + + trueVar := true + falseVar := false + + if obj.Spec.Kubernetes.AllowPrivilegedContainers == nil { + obj.Spec.Kubernetes.AllowPrivilegedContainers = &trueVar + } + + if obj.Spec.Kubernetes.KubeAPIServer == nil { + obj.Spec.Kubernetes.KubeAPIServer = &KubeAPIServerConfig{} + } + if obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication == nil { + if k8sVersionLessThan116 { + obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = &trueVar + } else { + obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication = &falseVar + } + } + + if obj.Spec.Kubernetes.KubeControllerManager == nil { + obj.Spec.Kubernetes.KubeControllerManager = &KubeControllerManagerConfig{} + } + if obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize == nil { + obj.Spec.Kubernetes.KubeControllerManager.NodeCIDRMaskSize = calculateDefaultNodeCIDRMaskSize(obj.Spec.Kubernetes.Kubelet, obj.Spec.Provider.Workers) + } + + if obj.Spec.Kubernetes.KubeProxy == nil { + obj.Spec.Kubernetes.KubeProxy = &KubeProxyConfig{} + } + if obj.Spec.Kubernetes.KubeProxy.Mode == nil { + defaultProxyMode := ProxyModeIPTables + obj.Spec.Kubernetes.KubeProxy.Mode = &defaultProxyMode + } + + if obj.Spec.Addons == nil { + obj.Spec.Addons = &Addons{} + } + if obj.Spec.Addons.KubernetesDashboard == nil { + obj.Spec.Addons.KubernetesDashboard = &KubernetesDashboard{} + } + if obj.Spec.Addons.KubernetesDashboard.AuthenticationMode == nil { + var defaultAuthMode string + if *obj.Spec.Kubernetes.KubeAPIServer.EnableBasicAuthentication { + defaultAuthMode = KubernetesDashboardAuthModeBasic + } else { + defaultAuthMode = KubernetesDashboardAuthModeToken + } + obj.Spec.Addons.KubernetesDashboard.AuthenticationMode = &defaultAuthMode + } + + if obj.Spec.Purpose == nil { + p := ShootPurposeEvaluation + obj.Spec.Purpose = &p + } +} + +// SetDefaults_Maintenance sets default values for Maintenance objects. +func SetDefaults_Maintenance(obj *Maintenance) { + if obj == nil { + obj = &Maintenance{} + } + + if obj.AutoUpdate == nil { + obj.AutoUpdate = &MaintenanceAutoUpdate{ + KubernetesVersion: true, + MachineImageVersion: true, + } + } + + if obj.TimeWindow == nil { + mt := utils.RandomMaintenanceTimeWindow() + obj.TimeWindow = &MaintenanceTimeWindow{ + Begin: mt.Begin().Formatted(), + End: mt.End().Formatted(), + } + } +} + +// SetDefaults_Worker sets default values for Worker objects. +func SetDefaults_Worker(obj *Worker) { + if obj.MaxSurge == nil { + obj.MaxSurge = &DefaultWorkerMaxSurge + } + if obj.MaxUnavailable == nil { + obj.MaxUnavailable = &DefaultWorkerMaxUnavailable + } +} + +// SetDefaults_NginxIngress sets default values for NginxIngress objects. +func SetDefaults_NginxIngress(obj *NginxIngress) { + if obj.ExternalTrafficPolicy == nil { + v := corev1.ServiceExternalTrafficPolicyTypeCluster + obj.ExternalTrafficPolicy = &v + } +} + +// Helper functions + +func calculateDefaultNodeCIDRMaskSize(kubelet *KubeletConfig, workers []Worker) *int32 { + var maxPods int32 = 110 // default maxPods setting on kubelet + + if kubelet != nil && kubelet.MaxPods != nil { + maxPods = *kubelet.MaxPods + } + + for _, worker := range workers { + if worker.Kubernetes != nil && worker.Kubernetes.Kubelet != nil && worker.Kubernetes.Kubelet.MaxPods != nil && *worker.Kubernetes.Kubelet.MaxPods > maxPods { + maxPods = *worker.Kubernetes.Kubelet.MaxPods + } + } + + // by having approximately twice as many available IP addresses as possible Pods, Kubernetes is able to mitigate IP address reuse as Pods are added to and removed from a node. + nodeCidrRange := int32(32 - int(math.Ceil(math.Log2(float64(maxPods*2))))) + return &nodeCidrRange +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/doc.go new file mode 100644 index 000000000..e7ed9f6f1 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1beta1 is the v1beta1 version of the API. +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/gardener/gardener/pkg/apis/core +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta +//go:generate gen-crd-api-reference-docs -api-dir . -config ../../../../hack/api-reference/core-config.json -template-dir ../../../../hack/api-reference/template -out-file ../../../../hack/api-reference/core.md + +// Package v1beta1 is a version of the API. +// +groupName=core.gardener.cloud +package v1beta1 diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go new file mode 100644 index 000000000..d8c47fd8b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition_builder.go @@ -0,0 +1,140 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "fmt" + + api "github.com/gardener/gardener/pkg/apis/core/v1beta1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ConditionBuilder build a Condition. +type ConditionBuilder interface { + WithOldCondition(old api.Condition) ConditionBuilder + WithStatus(status api.ConditionStatus) ConditionBuilder + WithReason(reason string) ConditionBuilder + WithMessage(message string) ConditionBuilder + WithNowFunc(now func() metav1.Time) ConditionBuilder + Build() (new api.Condition, updated bool) +} + +// defaultConditionBuilder build a Condition. +type defaultConditionBuilder struct { + old api.Condition + status api.ConditionStatus + conditionType api.ConditionType + reason string + message string + nowFunc func() metav1.Time +} + +// NewConditionBuilder returns a ConditionBuilder for a specific condition. +func NewConditionBuilder(conditionType api.ConditionType) (ConditionBuilder, error) { + if conditionType == "" { + return nil, fmt.Errorf("conditionType cannot be empty") + } + + return &defaultConditionBuilder{ + conditionType: conditionType, + nowFunc: metav1.Now, + }, nil +} + +// WithOldCondition sets the old condition. It can be used to prodive default values. +// The old's condition type is overridden to the one specified in the builder. +func (b *defaultConditionBuilder) WithOldCondition(old api.Condition) ConditionBuilder { + old.Type = b.conditionType + b.old = old + + return b +} + +// WithStatus sets the status of the condition. +func (b *defaultConditionBuilder) WithStatus(status api.ConditionStatus) ConditionBuilder { + b.status = status + return b +} + +// WithReason sets the reason of the condition. +func (b *defaultConditionBuilder) WithReason(reason string) ConditionBuilder { + b.reason = reason + return b +} + +// WithMessage sets the message of the condition. +func (b *defaultConditionBuilder) WithMessage(message string) ConditionBuilder { + b.message = message + return b +} + +// WithNowFunc sets the function used for getting the current time. +// Should only be used for tests. +func (b *defaultConditionBuilder) WithNowFunc(now func() metav1.Time) ConditionBuilder { + b.nowFunc = now + return b +} + +// Build creates the condition and returns if there are modifications with the OldCondition. +// If OldCondition is provided: +// - Any changes to status set the `LastTransitionTime` +// - Any updates to the message or the reason cause set `LastUpdateTime` to the current time. +func (b *defaultConditionBuilder) Build() (new api.Condition, updated bool) { + var ( + now = b.nowFunc() + emptyTime = metav1.Time{} + ) + + new = *b.old.DeepCopy() + + if new.LastTransitionTime == emptyTime { + new.LastTransitionTime = now + } + + if new.LastUpdateTime == emptyTime { + new.LastUpdateTime = now + } + + new.Type = b.conditionType + + if b.status != "" { + new.Status = b.status + } else if b.status == "" && b.old.Status == "" { + new.Status = api.ConditionUnknown + } + + if b.reason != "" { + new.Reason = b.reason + } else if b.reason == "" && b.old.Reason == "" { + new.Reason = "ConditionInitialized" + } + + if b.message != "" { + new.Message = b.message + } else if b.message == "" && b.old.Message == "" { + new.Message = "The condition has been initialized but its semantic check has not been performed yet." + } + + if new.Status != b.old.Status { + new.LastTransitionTime = now + } + + if new.Reason != b.old.Reason || new.Message != b.old.Message { + new.LastUpdateTime = now + } + + return new, !apiequality.Semantic.DeepEqual(new, b.old) +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go new file mode 100644 index 000000000..46e667d08 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go @@ -0,0 +1,149 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "errors" + "regexp" + "strings" + "time" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + utilerrors "github.com/gardener/gardener/pkg/utils/errors" + + errors2 "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type errorWithCode struct { + code gardencorev1beta1.ErrorCode + message string +} + +// NewErrorWithCode creates a new error that additionally exposes the given code via the Coder interface. +func NewErrorWithCode(code gardencorev1beta1.ErrorCode, message string) error { + return &errorWithCode{code, message} +} + +func (e *errorWithCode) Code() gardencorev1beta1.ErrorCode { + return e.code +} + +func (e *errorWithCode) Error() string { + return e.message +} + +var ( + unauthorizedRegexp = regexp.MustCompile(`(?i)(Unauthorized|InvalidClientTokenId|SignatureDoesNotMatch|Authentication failed|AuthFailure|AuthorizationFailed|invalid character|invalid_grant|invalid_client|Authorization Profile was not found|cannot fetch token|no active subscriptions|InvalidAccessKeyId|InvalidSecretAccessKey)`) + quotaExceededRegexp = regexp.MustCompile(`(?i)(LimitExceeded|Quota)`) + insufficientPrivilegesRegexp = regexp.MustCompile(`(?i)(AccessDenied|Forbidden|deny|denied)`) + dependenciesRegexp = regexp.MustCompile(`(?i)(PendingVerification|Access Not Configured|accessNotConfigured|DependencyViolation|OptInRequired|DeleteConflict|Conflict|inactive billing state|ReadOnlyDisabledSubscription|is already being used|not available in the current hardware cluster)`) +) + +// DetermineError determines the Garden error code for the given error message. +func DetermineError(message string) error { + code := determineErrorCode(message) + if code == "" { + return errors.New(message) + } + return &errorWithCode{code, message} +} + +func determineErrorCode(message string) gardencorev1beta1.ErrorCode { + switch { + case unauthorizedRegexp.MatchString(message): + return gardencorev1beta1.ErrorInfraUnauthorized + case quotaExceededRegexp.MatchString(message): + return gardencorev1beta1.ErrorInfraQuotaExceeded + case insufficientPrivilegesRegexp.MatchString(message): + return gardencorev1beta1.ErrorInfraInsufficientPrivileges + case dependenciesRegexp.MatchString(message): + return gardencorev1beta1.ErrorInfraDependencies + default: + return "" + } +} + +// Coder is an error that may produce an ErrorCode visible to the outside. +type Coder interface { + error + Code() gardencorev1beta1.ErrorCode +} + +// ExtractErrorCodes extracts all error codes from the given error by using utilerrors.Errors +func ExtractErrorCodes(err error) []gardencorev1beta1.ErrorCode { + var codes []gardencorev1beta1.ErrorCode + for _, err := range utilerrors.Errors(err) { + if coder, ok := err.(Coder); ok { + codes = append(codes, coder.Code()) + } + } + return codes +} + +// FormatLastErrDescription formats the error message string for the last occurred error. +func FormatLastErrDescription(err error) string { + errString := err.Error() + if len(errString) > 0 { + errString = strings.ToUpper(string(errString[0])) + errString[1:] + } + return errString +} + +// WrappedLastErrors is a structure which contains the general description of the lastErrors which occurred and an array of all lastErrors +type WrappedLastErrors struct { + Description string + LastErrors []gardencorev1beta1.LastError +} + +// NewWrappedLastErrors returns an error +func NewWrappedLastErrors(description string, err error) *WrappedLastErrors { + var lastErrors []gardencorev1beta1.LastError + + for _, partError := range utilerrors.Errors(err) { + lastErrors = append(lastErrors, *LastErrorWithTaskID( + partError.Error(), + utilerrors.GetID(partError), + ExtractErrorCodes(errors2.Cause(partError))...)) + } + + return &WrappedLastErrors{ + Description: description, + LastErrors: lastErrors, + } +} + +// LastError creates a new LastError with the given description, optional codes and sets timestamp when the error is lastly observed. +func LastError(description string, codes ...gardencorev1beta1.ErrorCode) *gardencorev1beta1.LastError { + return &gardencorev1beta1.LastError{ + Description: description, + Codes: codes, + LastUpdateTime: &metav1.Time{ + Time: time.Now(), + }, + } +} + +// LastErrorWithTaskID creates a new LastError with the given description, the ID of the task when the error occurred, optional codes and sets timestamp when the error is lastly observed. +func LastErrorWithTaskID(description string, taskID string, codes ...gardencorev1beta1.ErrorCode) *gardencorev1beta1.LastError { + return &gardencorev1beta1.LastError{ + Description: description, + Codes: codes, + TaskID: &taskID, + LastUpdateTime: &metav1.Time{ + Time: time.Now(), + }, + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go new file mode 100644 index 000000000..a0bdf860b --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go @@ -0,0 +1,776 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "fmt" + "sort" + "strconv" + "strings" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + "github.com/gardener/gardener/pkg/logger" + versionutils "github.com/gardener/gardener/pkg/utils/version" + + "github.com/Masterminds/semver" + errors "github.com/pkg/errors" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// Now determines the current metav1.Time. +var Now = metav1.Now + +// InitCondition initializes a new Condition with an Unknown status. +func InitCondition(conditionType gardencorev1beta1.ConditionType) gardencorev1beta1.Condition { + return gardencorev1beta1.Condition{ + Type: conditionType, + Status: gardencorev1beta1.ConditionUnknown, + Reason: "ConditionInitialized", + Message: "The condition has been initialized but its semantic check has not been performed yet.", + LastTransitionTime: Now(), + } +} + +// NewConditions initializes the provided conditions based on an existing list. If a condition type does not exist +// in the list yet, it will be set to default values. +func NewConditions(conditions []gardencorev1beta1.Condition, conditionTypes ...gardencorev1beta1.ConditionType) []*gardencorev1beta1.Condition { + newConditions := []*gardencorev1beta1.Condition{} + + // We retrieve the current conditions in order to update them appropriately. + for _, conditionType := range conditionTypes { + if c := GetCondition(conditions, conditionType); c != nil { + newConditions = append(newConditions, c) + continue + } + initializedCondition := InitCondition(conditionType) + newConditions = append(newConditions, &initializedCondition) + } + + return newConditions +} + +// GetCondition returns the condition with the given out of the list of . +// In case the required type could not be found, it returns nil. +func GetCondition(conditions []gardencorev1beta1.Condition, conditionType gardencorev1beta1.ConditionType) *gardencorev1beta1.Condition { + for _, condition := range conditions { + if condition.Type == conditionType { + c := condition + return &c + } + } + return nil +} + +// GetOrInitCondition tries to retrieve the condition with the given condition type from the given conditions. +// If the condition could not be found, it returns an initialized condition of the given type. +func GetOrInitCondition(conditions []gardencorev1beta1.Condition, conditionType gardencorev1beta1.ConditionType) gardencorev1beta1.Condition { + if condition := GetCondition(conditions, conditionType); condition != nil { + return *condition + } + return InitCondition(conditionType) +} + +// UpdatedCondition updates the properties of one specific condition. +func UpdatedCondition(condition gardencorev1beta1.Condition, status gardencorev1beta1.ConditionStatus, reason, message string) gardencorev1beta1.Condition { + newCondition := gardencorev1beta1.Condition{ + Type: condition.Type, + Status: status, + Reason: reason, + Message: message, + LastTransitionTime: condition.LastTransitionTime, + LastUpdateTime: Now(), + } + + if condition.Status != status { + newCondition.LastTransitionTime = Now() + } + return newCondition +} + +func UpdatedConditionUnknownError(condition gardencorev1beta1.Condition, err error) gardencorev1beta1.Condition { + return UpdatedConditionUnknownErrorMessage(condition, err.Error()) +} + +func UpdatedConditionUnknownErrorMessage(condition gardencorev1beta1.Condition, message string) gardencorev1beta1.Condition { + return UpdatedCondition(condition, gardencorev1beta1.ConditionUnknown, gardencorev1beta1.ConditionCheckError, message) +} + +// MergeConditions merges the given with the . Existing conditions are superseded by +// the (depending on the condition type). +func MergeConditions(oldConditions []gardencorev1beta1.Condition, newConditions ...gardencorev1beta1.Condition) []gardencorev1beta1.Condition { + var ( + out = make([]gardencorev1beta1.Condition, 0, len(oldConditions)) + typeToIndex = make(map[gardencorev1beta1.ConditionType]int, len(oldConditions)) + ) + + for i, condition := range oldConditions { + out = append(out, condition) + typeToIndex[condition.Type] = i + } + + for _, condition := range newConditions { + if index, ok := typeToIndex[condition.Type]; ok { + out[index] = condition + continue + } + out = append(out, condition) + } + + return out +} + +// ConditionsNeedUpdate returns true if the must be updated based on . +func ConditionsNeedUpdate(existingConditions, newConditions []gardencorev1beta1.Condition) bool { + return existingConditions == nil || !apiequality.Semantic.DeepEqual(newConditions, existingConditions) +} + +// IsResourceSupported returns true if a given combination of kind/type is part of a controller resources list. +func IsResourceSupported(resources []gardencorev1beta1.ControllerResource, resourceKind, resourceType string) bool { + for _, resource := range resources { + if resource.Kind == resourceKind && strings.EqualFold(resource.Type, resourceType) { + return true + } + } + + return false +} + +// IsControllerInstallationSuccessful returns true if a ControllerInstallation has been marked as "successfully" +// installed. +func IsControllerInstallationSuccessful(controllerInstallation gardencorev1beta1.ControllerInstallation) bool { + for _, condition := range controllerInstallation.Status.Conditions { + if condition.Type == gardencorev1beta1.ControllerInstallationInstalled && condition.Status == gardencorev1beta1.ConditionTrue { + return true + } + } + + return false +} + +// ComputeOperationType checksthe and determines whether is it is Create operation or reconcile operation +func ComputeOperationType(meta metav1.ObjectMeta, lastOperation *gardencorev1beta1.LastOperation) gardencorev1beta1.LastOperationType { + switch { + case meta.Annotations[v1beta1constants.GardenerOperation] == v1beta1constants.GardenerOperationMigrate: + return gardencorev1beta1.LastOperationTypeMigrate + case meta.DeletionTimestamp != nil: + return gardencorev1beta1.LastOperationTypeDelete + case lastOperation == nil: + return gardencorev1beta1.LastOperationTypeCreate + case (lastOperation.Type == gardencorev1beta1.LastOperationTypeCreate && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded): + return gardencorev1beta1.LastOperationTypeCreate + case (lastOperation.Type == gardencorev1beta1.LastOperationTypeMigrate && lastOperation.State != gardencorev1beta1.LastOperationStateSucceeded): + return gardencorev1beta1.LastOperationTypeMigrate + } + return gardencorev1beta1.LastOperationTypeReconcile +} + +// TaintsHave returns true if the given key is part of the taints list. +func TaintsHave(taints []gardencorev1beta1.SeedTaint, key string) bool { + for _, taint := range taints { + if taint.Key == key { + return true + } + } + return false +} + +type ShootedSeed struct { + DisableDNS *bool + DisableCapacityReservation *bool + Protected *bool + Visible *bool + MinimumVolumeSize *string + APIServer *ShootedSeedAPIServer + BlockCIDRs []string + ShootDefaults *gardencorev1beta1.ShootNetworks + Backup *gardencorev1beta1.SeedBackup + NoGardenlet bool + UseServiceAccountBootstrapping bool + WithSecretRef bool +} + +type ShootedSeedAPIServer struct { + Replicas *int32 + Autoscaler *ShootedSeedAPIServerAutoscaler +} + +type ShootedSeedAPIServerAutoscaler struct { + MinReplicas *int32 + MaxReplicas int32 +} + +func parseInt32(s string) (int32, error) { + i64, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, err + } + return int32(i64), nil +} + +func parseShootedSeed(annotation string) (*ShootedSeed, error) { + var ( + flags = make(map[string]struct{}) + settings = make(map[string]string) + + trueVar = true + falseVar = false + + shootedSeed ShootedSeed + ) + + for _, fragment := range strings.Split(annotation, ",") { + parts := strings.SplitN(fragment, "=", 2) + if len(parts) == 1 { + flags[fragment] = struct{}{} + continue + } + + settings[parts[0]] = parts[1] + } + + if _, ok := flags["true"]; !ok { + return nil, nil + } + + apiServer, err := parseShootedSeedAPIServer(settings) + if err != nil { + return nil, err + } + shootedSeed.APIServer = apiServer + + blockCIDRs, err := parseShootedSeedBlockCIDRs(settings) + if err != nil { + return nil, err + } + shootedSeed.BlockCIDRs = blockCIDRs + + shootDefaults, err := parseShootedSeedShootDefaults(settings) + if err != nil { + return nil, err + } + shootedSeed.ShootDefaults = shootDefaults + + backup, err := parseShootedSeedBackup(settings) + if err != nil { + return nil, err + } + shootedSeed.Backup = backup + + if size, ok := settings["minimumVolumeSize"]; ok { + shootedSeed.MinimumVolumeSize = &size + } + + if _, ok := flags["disable-dns"]; ok { + shootedSeed.DisableDNS = &trueVar + } + if _, ok := flags["disable-capacity-reservation"]; ok { + shootedSeed.DisableCapacityReservation = &trueVar + } + if _, ok := flags["no-gardenlet"]; ok { + shootedSeed.NoGardenlet = true + } + if _, ok := flags["use-serviceaccount-bootstrapping"]; ok { + shootedSeed.UseServiceAccountBootstrapping = true + } + if _, ok := flags["with-secret-ref"]; ok { + shootedSeed.WithSecretRef = true + } + + if _, ok := flags["protected"]; ok { + shootedSeed.Protected = &trueVar + } + if _, ok := flags["unprotected"]; ok { + shootedSeed.Protected = &falseVar + } + if _, ok := flags["visible"]; ok { + shootedSeed.Visible = &trueVar + } + if _, ok := flags["invisible"]; ok { + shootedSeed.Visible = &falseVar + } + + return &shootedSeed, nil +} + +func parseShootedSeedBlockCIDRs(settings map[string]string) ([]string, error) { + cidrs, ok := settings["blockCIDRs"] + if !ok { + return nil, nil + } + + return strings.Split(cidrs, ";"), nil +} + +func parseShootedSeedShootDefaults(settings map[string]string) (*gardencorev1beta1.ShootNetworks, error) { + var ( + podCIDR, ok1 = settings["shootDefaults.pods"] + serviceCIDR, ok2 = settings["shootDefaults.services"] + ) + + if !ok1 && !ok2 { + return nil, nil + } + + shootNetworks := &gardencorev1beta1.ShootNetworks{} + + if ok1 { + shootNetworks.Pods = &podCIDR + } + + if ok2 { + shootNetworks.Services = &serviceCIDR + } + + return shootNetworks, nil +} + +func parseShootedSeedBackup(settings map[string]string) (*gardencorev1beta1.SeedBackup, error) { + var ( + provider, ok1 = settings["backup.provider"] + region, ok2 = settings["backup.region"] + secretRefName, ok3 = settings["backup.secretRef.name"] + secretRefNamespace, ok4 = settings["backup.secretRef.namespace"] + ) + + if ok1 && provider == "none" { + return nil, nil + } + + backup := &gardencorev1beta1.SeedBackup{} + + if ok1 { + backup.Provider = provider + } + if ok2 { + backup.Region = ®ion + } + if ok3 { + backup.SecretRef.Name = secretRefName + } + if ok4 { + backup.SecretRef.Namespace = secretRefNamespace + } + + return backup, nil +} + +func parseShootedSeedAPIServer(settings map[string]string) (*ShootedSeedAPIServer, error) { + apiServerAutoscaler, err := parseShootedSeedAPIServerAutoscaler(settings) + if err != nil { + return nil, err + } + + replicasString, ok := settings["apiServer.replicas"] + if !ok && apiServerAutoscaler == nil { + return nil, nil + } + + var apiServer ShootedSeedAPIServer + + apiServer.Autoscaler = apiServerAutoscaler + + if ok { + replicas, err := parseInt32(replicasString) + if err != nil { + return nil, err + } + + apiServer.Replicas = &replicas + } + + return &apiServer, nil +} + +func parseShootedSeedAPIServerAutoscaler(settings map[string]string) (*ShootedSeedAPIServerAutoscaler, error) { + minReplicasString, ok1 := settings["apiServer.autoscaler.minReplicas"] + maxReplicasString, ok2 := settings["apiServer.autoscaler.maxReplicas"] + if !ok1 && !ok2 { + return nil, nil + } + if !ok2 { + return nil, fmt.Errorf("apiSrvMaxReplicas has to be specified for shooted seed API server autoscaler") + } + + var apiServerAutoscaler ShootedSeedAPIServerAutoscaler + + if ok1 { + minReplicas, err := parseInt32(minReplicasString) + if err != nil { + return nil, err + } + apiServerAutoscaler.MinReplicas = &minReplicas + } + + maxReplicas, err := parseInt32(maxReplicasString) + if err != nil { + return nil, err + } + apiServerAutoscaler.MaxReplicas = maxReplicas + + return &apiServerAutoscaler, nil +} + +func validateShootedSeed(shootedSeed *ShootedSeed, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if shootedSeed.APIServer != nil { + allErrs = validateShootedSeedAPIServer(shootedSeed.APIServer, fldPath.Child("apiServer")) + } + + return allErrs +} + +func validateShootedSeedAPIServer(apiServer *ShootedSeedAPIServer, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if apiServer.Replicas != nil && *apiServer.Replicas < 1 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("replicas"), *apiServer.Replicas, "must be greater than 0")) + } + if apiServer.Autoscaler != nil { + allErrs = append(allErrs, validateShootedSeedAPIServerAutoscaler(apiServer.Autoscaler, fldPath.Child("autoscaler"))...) + } + + return allErrs +} + +func validateShootedSeedAPIServerAutoscaler(autoscaler *ShootedSeedAPIServerAutoscaler, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < 1 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas, "must be greater than 0")) + } + if autoscaler.MaxReplicas < 1 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than 0")) + } + if autoscaler.MinReplicas != nil && autoscaler.MaxReplicas < *autoscaler.MinReplicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than or equal to `minReplicas`")) + } + + return allErrs +} + +func setDefaults_ShootedSeed(shootedSeed *ShootedSeed) { + if shootedSeed.APIServer == nil { + shootedSeed.APIServer = &ShootedSeedAPIServer{} + } + setDefaults_ShootedSeedAPIServer(shootedSeed.APIServer) +} + +func setDefaults_ShootedSeedAPIServer(apiServer *ShootedSeedAPIServer) { + if apiServer.Replicas == nil { + three := int32(3) + apiServer.Replicas = &three + } + if apiServer.Autoscaler == nil { + apiServer.Autoscaler = &ShootedSeedAPIServerAutoscaler{ + MaxReplicas: 3, + } + } + setDefaults_ShootedSeedAPIServerAutoscaler(apiServer.Autoscaler) +} + +func minInt32(a int32, b int32) int32 { + if a < b { + return a + } + return b +} + +func setDefaults_ShootedSeedAPIServerAutoscaler(autoscaler *ShootedSeedAPIServerAutoscaler) { + if autoscaler.MinReplicas == nil { + minReplicas := minInt32(3, autoscaler.MaxReplicas) + autoscaler.MinReplicas = &minReplicas + } +} + +// ReadShootedSeed determines whether the Shoot has been marked to be registered automatically as a Seed cluster. +func ReadShootedSeed(shoot *gardencorev1beta1.Shoot) (*ShootedSeed, error) { + if shoot.Namespace != v1beta1constants.GardenNamespace || shoot.Annotations == nil { + return nil, nil + } + + val, ok := v1beta1constants.GetShootUseAsSeedAnnotation(shoot.Annotations) + if !ok { + return nil, nil + } + + shootedSeed, err := parseShootedSeed(val) + if err != nil { + return nil, err + } + + if shootedSeed == nil { + return nil, nil + } + + setDefaults_ShootedSeed(shootedSeed) + + if errs := validateShootedSeed(shootedSeed, nil); len(errs) > 0 { + return nil, errs.ToAggregate() + } + + return shootedSeed, nil +} + +// HibernationIsEnabled checks if the given shoot's desired state is hibernated. +func HibernationIsEnabled(shoot *gardencorev1beta1.Shoot) bool { + return shoot.Spec.Hibernation != nil && shoot.Spec.Hibernation.Enabled != nil && *shoot.Spec.Hibernation.Enabled +} + +// ShootWantsClusterAutoscaler checks if the given Shoot needs a cluster autoscaler. +// This is determined by checking whether one of the Shoot workers has a different +// Maximum than Minimum. +func ShootWantsClusterAutoscaler(shoot *gardencorev1beta1.Shoot) (bool, error) { + for _, worker := range shoot.Spec.Provider.Workers { + if worker.Maximum > worker.Minimum { + return true, nil + } + } + return false, nil +} + +// ShootIgnoresAlerts checks if the alerts for the annotated shoot cluster should be ignored. +func ShootIgnoresAlerts(shoot *gardencorev1beta1.Shoot) bool { + ignore := false + if value, ok := v1beta1constants.GetShootIgnoreAlertsAnnotation(shoot.Annotations); ok { + ignore, _ = strconv.ParseBool(value) + } + return ignore +} + +// ShootWantsBasicAuthentication returns true if basic authentication is not configured or +// if it is set explicitly to 'true'. +func ShootWantsBasicAuthentication(shoot *gardencorev1beta1.Shoot) bool { + kubeAPIServerConfig := shoot.Spec.Kubernetes.KubeAPIServer + if kubeAPIServerConfig == nil { + return true + } + if kubeAPIServerConfig.EnableBasicAuthentication == nil { + return true + } + return *kubeAPIServerConfig.EnableBasicAuthentication +} + +// ShootUsesUnmanagedDNS returns true if the shoot's DNS section is marked as 'unmanaged'. +func ShootUsesUnmanagedDNS(shoot *gardencorev1beta1.Shoot) bool { + return shoot.Spec.DNS != nil && len(shoot.Spec.DNS.Providers) > 0 && shoot.Spec.DNS.Providers[0].Type != nil && *shoot.Spec.DNS.Providers[0].Type == "unmanaged" +} + +// GetMachineImagesFor returns a list of all machine images for a given shoot. +func GetMachineImagesFor(shoot *gardencorev1beta1.Shoot) []*gardencorev1beta1.ShootMachineImage { + var workerMachineImages []*gardencorev1beta1.ShootMachineImage + for _, worker := range shoot.Spec.Provider.Workers { + if worker.Machine.Image != nil { + workerMachineImages = append(workerMachineImages, worker.Machine.Image) + } + } + return workerMachineImages +} + +// DetermineMachineImageForName finds the cloud specific machine images in the for the given and +// region. In case it does not find the machine image with the , it returns false. Otherwise, true and the +// cloud-specific machine image will be returned. +func DetermineMachineImageForName(cloudProfile *gardencorev1beta1.CloudProfile, name string) (bool, gardencorev1beta1.MachineImage, error) { + for _, image := range cloudProfile.Spec.MachineImages { + if strings.EqualFold(image.Name, name) { + return true, image, nil + } + } + return false, gardencorev1beta1.MachineImage{}, nil +} + +// ShootMachineImageVersionExists checks if the shoot machine image (name, version) exists in the machine image constraint and returns true if yes and the index in the versions slice +func ShootMachineImageVersionExists(constraint gardencorev1beta1.MachineImage, image gardencorev1beta1.ShootMachineImage) (bool, int) { + if constraint.Name != image.Name { + return false, 0 + } + + for index, v := range constraint.Versions { + if v.Version == image.Version { + return true, index + } + } + + return false, 0 +} + +// DetermineLatestMachineImageVersion determines the latest MachineImageVersion from a MachineImage +func DetermineLatestMachineImageVersion(image gardencorev1beta1.MachineImage) (*semver.Version, gardencorev1beta1.ExpirableVersion, error) { + var ( + latestSemVerVersion *semver.Version + latestMachineImageVersion gardencorev1beta1.ExpirableVersion + ) + + for _, imageVersion := range image.Versions { + v, err := semver.NewVersion(imageVersion.Version) + if err != nil { + return nil, gardencorev1beta1.ExpirableVersion{}, fmt.Errorf("error while parsing machine image version '%s' of machine image '%s': version not valid: %s", imageVersion.Version, image.Name, err.Error()) + } + if latestSemVerVersion == nil || v.GreaterThan(latestSemVerVersion) { + latestSemVerVersion = v + latestMachineImageVersion = imageVersion + } + } + return latestSemVerVersion, latestMachineImageVersion, nil +} + +// GetShootMachineImageFromLatestMachineImageVersion determines the latest version in a machine image and returns that as a ShootMachineImage +func GetShootMachineImageFromLatestMachineImageVersion(image gardencorev1beta1.MachineImage) (*semver.Version, gardencorev1beta1.ShootMachineImage, error) { + latestSemVerVersion, latestImage, err := DetermineLatestMachineImageVersion(image) + if err != nil { + return nil, gardencorev1beta1.ShootMachineImage{}, err + } + return latestSemVerVersion, gardencorev1beta1.ShootMachineImage{Name: image.Name, Version: latestImage.Version}, nil +} + +// UpdateMachineImages updates the machine images in place. +func UpdateMachineImages(workers []gardencorev1beta1.Worker, machineImages []*gardencorev1beta1.ShootMachineImage) { + for _, machineImage := range machineImages { + for idx, worker := range workers { + if worker.Machine.Image != nil && machineImage.Name == worker.Machine.Image.Name { + logger.Logger.Infof("Updating worker images of worker '%s' from version %s to version %s", worker.Name, worker.Machine.Image.Version, machineImage.Version) + workers[idx].Machine.Image = machineImage + } + } + } +} + +// KubernetesVersionExistsInCloudProfile checks if the given Kubernetes version exists in the CloudProfile +func KubernetesVersionExistsInCloudProfile(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, gardencorev1beta1.ExpirableVersion, error) { + for _, version := range cloudProfile.Spec.Kubernetes.Versions { + ok, err := versionutils.CompareVersions(version.Version, "=", currentVersion) + if err != nil { + return false, gardencorev1beta1.ExpirableVersion{}, err + } + if ok { + return true, version, nil + } + } + return false, gardencorev1beta1.ExpirableVersion{}, nil +} + +// DetermineLatestKubernetesPatchVersion finds the latest Kubernetes patch version in the compared +// to the given . In case it does not find a newer patch version, it returns false. Otherwise, +// true and the found version will be returned. +func DetermineLatestKubernetesPatchVersion(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) { + ok, newerVersions, _, err := determineNextKubernetesVersions(cloudProfile, currentVersion, "~") + if err != nil || !ok { + return ok, "", err + } + sort.Strings(newerVersions) + return true, newerVersions[len(newerVersions)-1], nil +} + +// DetermineNextKubernetesMinorVersion finds the next available Kubernetes minor version in the compared +// to the given . In case it does not find a newer minor version, it returns false. Otherwise, +// true and the found version will be returned. +func DetermineNextKubernetesMinorVersion(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) { + ok, newerVersions, _, err := determineNextKubernetesVersions(cloudProfile, currentVersion, "^") + if err != nil || !ok { + return ok, "", err + } + sort.Strings(newerVersions) + return true, newerVersions[0], nil +} + +// determineKubernetesVersions finds newer Kubernetes versions in the compared +// with the to the given . The has to be a github.com/Masterminds/semver +// range comparison symbol. In case it does not find a newer version, it returns false. Otherwise, +// true and the found version will be returned. +func determineNextKubernetesVersions(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion, operator string) (bool, []string, []gardencorev1beta1.ExpirableVersion, error) { + var ( + newerVersions = []gardencorev1beta1.ExpirableVersion{} + newerVersionsString = []string{} + ) + + for _, version := range cloudProfile.Spec.Kubernetes.Versions { + ok, err := versionutils.CompareVersions(version.Version, operator, currentVersion) + if err != nil { + return false, []string{}, []gardencorev1beta1.ExpirableVersion{}, err + } + if version.Version != currentVersion && ok { + newerVersions = append(newerVersions, version) + newerVersionsString = append(newerVersionsString, version.Version) + } + } + + if len(newerVersions) == 0 { + return false, []string{}, []gardencorev1beta1.ExpirableVersion{}, nil + } + + return true, newerVersionsString, newerVersions, nil +} + +// SetMachineImageVersionsToMachineImage sets imageVersions to the matching imageName in the machineImages. +func SetMachineImageVersionsToMachineImage(machineImages []gardencorev1beta1.MachineImage, imageName string, imageVersions []gardencorev1beta1.ExpirableVersion) ([]gardencorev1beta1.MachineImage, error) { + for index, image := range machineImages { + if strings.EqualFold(image.Name, imageName) { + machineImages[index].Versions = imageVersions + return machineImages, nil + } + } + return nil, fmt.Errorf("machine image with name '%s' could not be found", imageName) +} + +// GetDefaultMachineImageFromCloudProfile gets the first MachineImage from the CloudProfile +func GetDefaultMachineImageFromCloudProfile(profile gardencorev1beta1.CloudProfile) *gardencorev1beta1.MachineImage { + if len(profile.Spec.MachineImages) == 0 { + return nil + } + return &profile.Spec.MachineImages[0] +} + +// WrapWithLastError is wrapper function for gardencorev1beta1.LastError +func WrapWithLastError(err error, lastError *gardencorev1beta1.LastError) error { + if err == nil || lastError == nil { + return err + } + return errors.Wrapf(err, "last error: %s", lastError.Description) +} + +// IsAPIServerExposureManaged returns true, if the Object is managed by Gardener for API server exposure. +// This indicates to extensions that they should not mutate the object. +// Gardener marks the kube-apiserver Service and Deployment as managed by it when it uses SNI to expose them. +func IsAPIServerExposureManaged(obj metav1.Object) bool { + if obj == nil { + return false + } + + if v, found := obj.GetLabels()[v1beta1constants.LabelAPIServerExposure]; found && + v == v1beta1constants.LabelAPIServerExposureGardenerManaged { + return true + } + + return false +} + +// FindPrimaryDNSProvider finds the primary provider among the given `providers`. +// It returns the first provider in case no primary provider is available or the first one if multiple candidates are found. +func FindPrimaryDNSProvider(providers []gardencorev1beta1.DNSProvider) *gardencorev1beta1.DNSProvider { + for _, provider := range providers { + if provider.Primary != nil && *provider.Primary { + primaryProvider := provider + return &primaryProvider + } + } + // TODO: timuthy - Only required for migration and can be removed in a future version. + if len(providers) > 0 { + return &providers[0] + } + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/register.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/register.go new file mode 100644 index 000000000..1692e76d7 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/register.go @@ -0,0 +1,75 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of the core API group. +const GroupName = "core.gardener.cloud" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is a new Scheme Builder which registers our API. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a reference to the Scheme Builder's AddToScheme function. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &BackupBucket{}, + &BackupBucketList{}, + &BackupEntry{}, + &BackupEntryList{}, + &CloudProfile{}, + &CloudProfileList{}, + &ControllerRegistration{}, + &ControllerRegistrationList{}, + &ControllerInstallation{}, + &ControllerInstallationList{}, + &Plant{}, + &PlantList{}, + &Project{}, + &ProjectList{}, + &Quota{}, + &QuotaList{}, + &SecretBinding{}, + &SecretBindingList{}, + &Seed{}, + &SeedList{}, + &Shoot{}, + &ShootList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go new file mode 100644 index 000000000..e094af027 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupbucket.go @@ -0,0 +1,90 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupBucket holds details about backup bucket +type BackupBucket struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata"` + // Specification of the Backup Bucket. + Spec BackupBucketSpec `json:"spec"` + // Most recently observed status of the Backup Bucket. + Status BackupBucketStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupBucketList is a list of BackupBucket objects. +type BackupBucketList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of BackupBucket. + Items []BackupBucket `json:"items"` +} + +// BackupBucketSpec is the specification of a Backup Bucket. +type BackupBucketSpec struct { + // Provider hold the details of cloud provider of the object store. + Provider BackupBucketProvider `json:"provider"` + // ProviderConfig is the configuration passed to BackupBucket resource. + // +optional + ProviderConfig *ProviderConfig `json:"providerConfig,omitempty"` + // SecretRef is a reference to a secret that contains the credentials to access object store. + SecretRef corev1.SecretReference `json:"secretRef"` + // SeedName holds the name of the seed allocated to BackupBucket for running controller. + // +optional + SeedName *string `json:"seedName,omitempty"` +} + +// BackupBucketStatus holds the most recently observed status of the Backup Bucket. +type BackupBucketStatus struct { + // ProviderStatus is the configuration passed to BackupBucket resource. + // +optional + ProviderStatus *ProviderConfig `json:"providerStatus,omitempty"` + // LastOperation holds information about the last operation on the BackupBucket. + // +optional + LastOperation *LastOperation `json:"lastOperation,omitempty"` + // LastError holds information about the last occurred error during an operation. + // +optional + LastError *LastError `json:"lastError,omitempty"` + // ObservedGeneration is the most recent generation observed for this BackupBucket. It corresponds to the + // BackupBucket's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // GeneratedSecretRef is reference to the secret generated by backup bucket, which + // will have object store specific credentials. + // +optional + GeneratedSecretRef *corev1.SecretReference `json:"generatedSecretRef,omitempty"` +} + +// BackupBucketProvider holds the details of cloud provider of the object store. +type BackupBucketProvider struct { + // Type is the type of provider. + Type string `json:"type"` + // Region is the region of the bucket. + Region string `json:"region"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupentry.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupentry.go new file mode 100644 index 000000000..4ad89087f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_backupentry.go @@ -0,0 +1,75 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // BackupEntryForceDeletion is a constant for an annotation on a BackupEntry indicating that it should be force deleted. + BackupEntryForceDeletion = "backupentry.core.gardener.cloud/force-deletion" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupEntry holds details about shoot backup. +type BackupEntry struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata"` + // Spec contains the specification of the Backup Entry. + // +optional + Spec BackupEntrySpec `json:"spec,omitempty"` + // Status contains the most recently observed status of the Backup Entry. + // +optional + Status BackupEntryStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupEntryList is a list of BackupEntry objects. +type BackupEntryList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of BackupEntry. + Items []BackupEntry `json:"items"` +} + +// BackupEntrySpec is the specification of a Backup Entry. +type BackupEntrySpec struct { + // BucketName is the name of backup bucket for this Backup Entry. + BucketName string `json:"bucketName"` + // SeedName holds the name of the seed allocated to BackupEntry for running controller. + // +optional + SeedName *string `json:"seedName,omitempty"` +} + +// BackupEntryStatus holds the most recently observed status of the Backup Entry. +type BackupEntryStatus struct { + // LastOperation holds information about the last operation on the BackupEntry. + // +optional + LastOperation *LastOperation `json:"lastOperation,omitempty"` + // LastError holds information about the last occurred error during an operation. + // +optional + LastError *LastError `json:"lastError,omitempty"` + // ObservedGeneration is the most recent generation observed for this BackupEntry. It corresponds to the + // BackupEntry's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go new file mode 100644 index 000000000..c11403bd0 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go @@ -0,0 +1,181 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CloudProfile represents certain properties about a provider environment. +type CloudProfile struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec defines the provider environment properties. + // +optional + Spec CloudProfileSpec `json:"spec,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CloudProfileList is a collection of CloudProfiles. +type CloudProfileList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of CloudProfiles. + Items []CloudProfile `json:"items"` +} + +// CloudProfileSpec is the specification of a CloudProfile. +// It must contain exactly one of its defined keys. +type CloudProfileSpec struct { + // CABundle is a certificate bundle which will be installed onto every host machine of shoot cluster targeting this profile. + // +optional + CABundle *string `json:"caBundle,omitempty"` + // Kubernetes contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. + Kubernetes KubernetesSettings `json:"kubernetes"` + // MachineImages contains constraints regarding allowed values for machine images in the Shoot specification. + // +patchMergeKey=name + // +patchStrategy=merge + MachineImages []MachineImage `json:"machineImages" patchStrategy:"merge" patchMergeKey:"name"` + // MachineTypes contains constraints regarding allowed values for machine types in the 'workers' block in the Shoot specification. + // +patchMergeKey=name + // +patchStrategy=merge + MachineTypes []MachineType `json:"machineTypes" patchStrategy:"merge" patchMergeKey:"name"` + // ProviderConfig contains provider-specific configuration for the profile. + // +optional + ProviderConfig *ProviderConfig `json:"providerConfig,omitempty"` + // Regions contains constraints regarding allowed values for regions and zones. + // +patchMergeKey=name + // +patchStrategy=merge + Regions []Region `json:"regions" patchStrategy:"merge" patchMergeKey:"name"` + // SeedSelector contains an optional list of labels on `Seed` resources that marks those seeds whose shoots may use this provider profile. + // An empty list means that all seeds of the same provider type are supported. + // This is useful for environments that are of the same type (like openstack) but may have different "instances"/landscapes. + // +optional + SeedSelector *metav1.LabelSelector `json:"seedSelector,omitempty"` + // Type is the name of the provider. + Type string `json:"type"` + // VolumeTypes contains constraints regarding allowed values for volume types in the 'workers' block in the Shoot specification. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + VolumeTypes []VolumeType `json:"volumeTypes,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// KubernetesSettings contains constraints regarding allowed values of the 'kubernetes' block in the Shoot specification. +type KubernetesSettings struct { + // Versions is the list of allowed Kubernetes versions with optional expiration dates for Shoot clusters. + // +patchMergeKey=version + // +patchStrategy=merge + // +optional + Versions []ExpirableVersion `json:"versions,omitempty" patchStrategy:"merge" patchMergeKey:"version"` +} + +// MachineImage defines the name and multiple versions of the machine image in any environment. +type MachineImage struct { + // Name is the name of the image. + Name string `json:"name"` + // Versions contains versions and expiration dates of the machine image + // +patchMergeKey=version + // +patchStrategy=merge + Versions []ExpirableVersion `json:"versions" patchStrategy:"merge" patchMergeKey:"version"` +} + +// ExpirableVersion contains a version and an expiration date. +type ExpirableVersion struct { + // Version is the version identifier. + Version string `json:"version"` + // ExpirationDate defines the time at which this version expires. + // +optional + ExpirationDate *metav1.Time `json:"expirationDate,omitempty"` +} + +// MachineType contains certain properties of a machine type. +type MachineType struct { + // CPU is the number of CPUs for this machine type. + CPU resource.Quantity `json:"cpu"` + // GPU is the number of GPUs for this machine type. + GPU resource.Quantity `json:"gpu"` + // Memory is the amount of memory for this machine type. + Memory resource.Quantity `json:"memory"` + // Name is the name of the machine type. + Name string `json:"name"` + // Storage is the amount of storage associated with the root volume of this machine type. + // +optional + Storage *MachineTypeStorage `json:"storage,omitempty"` + // Usable defines if the machine type can be used for shoot clusters. + // +optional + Usable *bool `json:"usable,omitempty"` +} + +// MachineTypeStorage is the amount of storage associated with the root volume of this machine type. +type MachineTypeStorage struct { + // Class is the class of the storage type. + Class string `json:"class"` + // Size is the storage size. + Size resource.Quantity `json:"size"` + // Type is the type of the storage. + Type string `json:"type"` +} + +// Region contains certain properties of a region. +type Region struct { + // Name is a region name. + Name string `json:"name"` + // Zones is a list of availability zones in this region. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Zones []AvailabilityZone `json:"zones,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// AvailabilityZone is an availability zone. +type AvailabilityZone struct { + // Name is an an availability zone name. + Name string `json:"name"` + // UnavailableMachineTypes is a list of machine type names that are not availability in this zone. + // +optional + UnavailableMachineTypes []string `json:"unavailableMachineTypes,omitempty"` + // UnavailableVolumeTypes is a list of volume type names that are not availability in this zone. + // +optional + UnavailableVolumeTypes []string `json:"unavailableVolumeTypes,omitempty"` +} + +// VolumeType contains certain properties of a volume type. +type VolumeType struct { + // Class is the class of the volume type. + Class string `json:"class"` + // Name is the name of the volume type. + Name string `json:"name"` + // Usable defines if the volume type can be used for shoot clusters. + // +optional + Usable *bool `json:"usable,omitempty"` +} + +const ( + // VolumeClassStandard is a constant for the standard volume class. + VolumeClassStandard string = "standard" + // VolumeClassPremium is a constant for the premium volume class. + VolumeClassPremium string = "premium" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go new file mode 100644 index 000000000..75c6714ca --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go @@ -0,0 +1,179 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// ErrorCode is a string alias. +type ErrorCode string + +const ( + // ErrorInfraUnauthorized indicates that the last error occurred due to invalid cloud provider credentials. + ErrorInfraUnauthorized ErrorCode = "ERR_INFRA_UNAUTHORIZED" + // ErrorInfraInsufficientPrivileges indicates that the last error occurred due to insufficient cloud provider privileges. + ErrorInfraInsufficientPrivileges ErrorCode = "ERR_INFRA_INSUFFICIENT_PRIVILEGES" + // ErrorInfraQuotaExceeded indicates that the last error occurred due to cloud provider quota limits. + ErrorInfraQuotaExceeded ErrorCode = "ERR_INFRA_QUOTA_EXCEEDED" + // ErrorInfraDependencies indicates that the last error occurred due to dependent objects on the cloud provider level. + ErrorInfraDependencies ErrorCode = "ERR_INFRA_DEPENDENCIES" +) + +// LastError indicates the last occurred error for an operation on a resource. +type LastError struct { + // A human readable message indicating details about the last error. + Description string `json:"description"` + // ID of the task which caused this last error + // +optional + TaskID *string `json:"taskID,omitempty"` + // Well-defined error codes of the last error(s). + // +optional + Codes []ErrorCode `json:"codes,omitempty"` + // Last time the error was reported + // +optional + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` +} + +// GetDescription implements LastError. +func (l *LastError) GetDescription() string { + return l.Description +} + +// GetTaskID implements LastError +func (l *LastError) GetTaskID() *string { + return l.TaskID +} + +// GetCodes implements LastError. +func (l *LastError) GetCodes() []ErrorCode { + return l.Codes +} + +// GetLastUpdateTime implements LastError. +func (l *LastError) GetLastUpdateTime() *metav1.Time { + return l.LastUpdateTime +} + +// LastOperationType is a string alias. +type LastOperationType string + +const ( + // LastOperationTypeCreate indicates a 'create' operation. + LastOperationTypeCreate LastOperationType = "Create" + // LastOperationTypeReconcile indicates a 'reconcile' operation. + LastOperationTypeReconcile LastOperationType = "Reconcile" + // LastOperationTypeDelete indicates a 'delete' operation. + LastOperationTypeDelete LastOperationType = "Delete" + // LastOperationTypeMigrate indicates a 'migrate' operation. + LastOperationTypeMigrate LastOperationType = "Migrate" +) + +// LastOperationState is a string alias. +type LastOperationState string + +const ( + // LastOperationStateProcessing indicates that an operation is ongoing. + LastOperationStateProcessing LastOperationState = "Processing" + // LastOperationStateSucceeded indicates that an operation has completed successfully. + LastOperationStateSucceeded LastOperationState = "Succeeded" + // LastOperationStateError indicates that an operation is completed with errors and will be retried. + LastOperationStateError LastOperationState = "Error" + // LastOperationStateFailed indicates that an operation is completed with errors and won't be retried. + LastOperationStateFailed LastOperationState = "Failed" + // LastOperationStatePending indicates that an operation cannot be done now, but will be tried in future. + LastOperationStatePending LastOperationState = "Pending" + // LastOperationStateAborted indicates that an operation has been aborted. + LastOperationStateAborted LastOperationState = "Aborted" +) + +// LastOperation indicates the type and the state of the last operation, along with a description +// message and a progress indicator. +type LastOperation struct { + // A human readable message indicating details about the last operation. + Description string `json:"description"` + // Last time the operation state transitioned from one to another. + LastUpdateTime metav1.Time `json:"lastUpdateTime"` + // The progress in percentage (0-100) of the last operation. + Progress int `json:"progress"` + // Status of the last operation, one of Aborted, Processing, Succeeded, Error, Failed. + State LastOperationState `json:"state"` + // Type of the last operation, one of Create, Reconcile, Delete. + Type LastOperationType `json:"type"` +} + +// GetDescription implements LastOperation. +func (l *LastOperation) GetDescription() string { + return l.Description +} + +// GetLastUpdateTime implements LastOperation. +func (l *LastOperation) GetLastUpdateTime() metav1.Time { + return l.LastUpdateTime +} + +// GetProgress implements LastOperation. +func (l *LastOperation) GetProgress() int { + return l.Progress +} + +// GetState implements LastOperation. +func (l *LastOperation) GetState() LastOperationState { + return l.State +} + +// GetType implements LastOperation. +func (l *LastOperation) GetType() LastOperationType { + return l.Type +} + +// Gardener holds the information about the Gardener version that operated a resource. +type Gardener struct { + // ID is the Docker container id of the Gardener which last acted on a resource. + ID string `json:"id"` + // Name is the hostname (pod name) of the Gardener which last acted on a resource. + Name string `json:"name"` + // Version is the version of the Gardener which last acted on a resource. + Version string `json:"version"` +} + +const ( + // GardenerName is the value in a Garden resource's `.metadata.finalizers[]` array on which the Gardener will react + // when performing a delete request on a resource. + GardenerName = "gardener" + // ExternalGardenerName is the value in a Kubernetes core resources `.metadata.finalizers[]` array on which the + // Gardener will react when performing a delete request on a resource. + ExternalGardenerName = "gardener.cloud/gardener" + // ExternalGardenerNameDeprecated is the value in a Kubernetes core resources `.metadata.finalizers[]` array on which the + // Gardener will react when performing a delete request on a resource. + // + // Deprecated: Use `ExternalGardenerName` instead. + ExternalGardenerNameDeprecated = "garden.sapcloud.io/gardener" +) + +const ( + // EventReconciling indicates that the a Reconcile operation started. + EventReconciling = "Reconciling" + // EventReconciled indicates that the a Reconcile operation was successful. + EventReconciled = "Reconciled" + // EventReconcileError indicates that the a Reconcile operation failed. + EventReconcileError = "ReconcileError" + // EventDeleting indicates that the a Delete operation started. + EventDeleting = "Deleting" + // EventDeleted indicates that the a Delete operation was successful. + EventDeleted = "Deleted" + // EventDeleteError indicates that the a Delete operation failed. + EventDeleteError = "DeleteError" + // EventOperationPending + EventOperationPending = "OperationPending" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go new file mode 100644 index 000000000..b3c62caf2 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerinstallation.go @@ -0,0 +1,76 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerInstallation represents an installation request for an external controller. +type ControllerInstallation struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec contains the specification of this installation. + Spec ControllerInstallationSpec `json:"spec,omitempty"` + // Status contains the status of this installation. + Status ControllerInstallationStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerInstallationList is a collection of ControllerInstallations. +type ControllerInstallationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of ControllerInstallations. + Items []ControllerInstallation `json:"items"` +} + +// ControllerInstallationSpec is the specification of a ControllerInstallation. +type ControllerInstallationSpec struct { + // RegistrationRef is used to reference a ControllerRegistration resources. + RegistrationRef corev1.ObjectReference `json:"registrationRef"` + // SeedRef is used to reference a Seed resources. + SeedRef corev1.ObjectReference `json:"seedRef"` +} + +// ControllerInstallationStatus is the status of a ControllerInstallation. +type ControllerInstallationStatus struct { + // Conditions represents the latest available observations of a ControllerInstallations's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // ProviderStatus contains type-specific status. + // +optional + ProviderStatus *ProviderConfig `json:"providerStatus,omitempty"` +} + +const ( + // ControllerInstallationHealthy is a condition type for indicating whether the controller is healthy. + ControllerInstallationHealthy ConditionType = "Healthy" + // ControllerInstallationInstalled is a condition type for indicating whether the controller has been installed. + ControllerInstallationInstalled ConditionType = "Installed" + // ControllerInstallationValid is a condition type for indicating whether the installation request is valid. + ControllerInstallationValid ConditionType = "Valid" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go new file mode 100644 index 000000000..9f88dd5a7 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go @@ -0,0 +1,78 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRegistration represents a registration of an external controller. +type ControllerRegistration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec contains the specification of this registration. + Spec ControllerRegistrationSpec `json:"spec,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRegistrationList is a collection of ControllerRegistrations. +type ControllerRegistrationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of ControllerRegistrations. + Items []ControllerRegistration `json:"items"` +} + +// ControllerRegistrationSpec is the specification of a ControllerRegistration. +type ControllerRegistrationSpec struct { + // Resources is a list of combinations of kinds (DNSProvider, Infrastructure, Generic, ...) and their actual types + // (aws-route53, gcp, auditlog, ...). + Resources []ControllerResource `json:"resources"` + // Deployment contains information for how this controller is deployed. + // +optional + Deployment *ControllerDeployment `json:"deployment,omitempty"` +} + +// ControllerResource is a combination of a kind (DNSProvider, Infrastructure, Generic, ...) and the actual type for this +// kind (aws-route53, gcp, auditlog, ...). +type ControllerResource struct { + // Kind is the resource kind, for example "OperatingSystemConfig". + Kind string `json:"kind"` + // Type is the resource type, for example "coreos" or "ubuntu". + Type string `json:"type"` + // GloballyEnabled determines if this ControllerResource is required by all Shoot clusters. + // +optional + GloballyEnabled *bool `json:"globallyEnabled,omitempty"` + // ReconcileTimeout defines how long Gardener should wait for the resource reconciliation. + // +optional + ReconcileTimeout *metav1.Duration `json:"reconcileTimeout,omitempty"` +} + +// ControllerDeployment contains information for how this controller is deployed. +type ControllerDeployment struct { + // Type is the deployment type. + Type string `json:"type"` + // ProviderConfig contains type-specific configuration. + // +optional + ProviderConfig *ProviderConfig `json:"providerConfig,omitempty"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_plant.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_plant.go new file mode 100644 index 000000000..073b29e2f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_plant.go @@ -0,0 +1,112 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Plant struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec contains the specification of this Plant. + Spec PlantSpec `json:"spec,omitempty"` + // Status contains the status of this Plant. + Status PlantStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PlantList is a collection of Plants. +type PlantList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of Plants. + Items []Plant `json:"items"` +} + +const ( + // PlantEveryNodeReady is a constant for a condition type indicating the node health. + PlantEveryNodeReady ConditionType = "EveryNodeReady" + // PlantAPIServerAvailable is a constant for a condition type indicating that the Plant cluster API server is available. + PlantAPIServerAvailable ConditionType = "APIServerAvailable" +) + +// PlantSpec is the specification of a Plant. +type PlantSpec struct { + // SecretRef is a reference to a Secret object containing the Kubeconfig of the external kubernetes + // clusters to be added to Gardener. + SecretRef corev1.LocalObjectReference `json:"secretRef"` + // Endpoints is the configuration plant endpoints + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// PlantStatus is the status of a Plant. +type PlantStatus struct { + // Conditions represents the latest available observations of a Plant's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // ObservedGeneration is the most recent generation observed for this Plant. It corresponds to the + // Plant's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + // ClusterInfo is additional computed information about the newly added cluster (Plant) + ClusterInfo *ClusterInfo `json:"clusterInfo,omitempty"` +} + +// Endpoint is an endpoint for monitoring, logging and other services around the plant. +type Endpoint struct { + // Name is the name of the endpoint + Name string `json:"name"` + // URL is the url of the endpoint + URL string `json:"url"` + // Purpose is the purpose of the endpoint + Purpose string `json:"purpose"` +} + +// ClusterInfo contains information about the Plant cluster +type ClusterInfo struct { + // Cloud describes the cloud information + Cloud CloudInfo `json:"cloud"` + // Kubernetes describes kubernetes meta information (e.g., version) + Kubernetes KubernetesInfo `json:"kubernetes"` +} + +// CloudInfo contains information about the cloud +type CloudInfo struct { + // Type is the cloud type + Type string `json:"type"` + // Region is the cloud region + Region string `json:"region"` +} + +// KubernetesInfo contains the version and configuration variables for the Plant cluster. +type KubernetesInfo struct { + // Version is the semantic Kubernetes version to use for the Plant cluster. + Version string `json:"version"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go new file mode 100644 index 000000000..b08424388 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_project.go @@ -0,0 +1,139 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Project holds certain properties about a Gardener project. +type Project struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec defines the project properties. + // +optional + Spec ProjectSpec `json:"spec,omitempty"` + // Most recently observed status of the Project. + // +optional + Status ProjectStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProjectList is a collection of Projects. +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of Projects. + Items []Project `json:"items"` +} + +// ProjectSpec is the specification of a Project. +type ProjectSpec struct { + // CreatedBy is a subject representing a user name, an email address, or any other identifier of a user + // who created the project. + // +optional + CreatedBy *rbacv1.Subject `json:"createdBy,omitempty"` + // Description is a human-readable description of what the project is used for. + // +optional + Description *string `json:"description,omitempty"` + // Owner is a subject representing a user name, an email address, or any other identifier of a user owning + // the project. + // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `owner` + // role. The only way to change the owner will be by moving the `owner` role. In this API version the only way + // to change the owner is to use this field. + // +optional + // TODO: Remove this field in favor of the `owner` role in `v1`. + Owner *rbacv1.Subject `json:"owner,omitempty"` + // Purpose is a human-readable explanation of the project's purpose. + // +optional + Purpose *string `json:"purpose,omitempty"` + // Members is a list of subjects representing a user name, an email address, or any other identifier of a user, + // group, or service account that has a certain role. + // +optional + Members []ProjectMember `json:"members,omitempty"` + // Namespace is the name of the namespace that has been created for the Project object. + // A nil value means that Gardener will determine the name of the namespace. + // +optional + Namespace *string `json:"namespace,omitempty"` +} + +// ProjectStatus holds the most recently observed status of the project. +type ProjectStatus struct { + // ObservedGeneration is the most recent generation observed for this project. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // Phase is the current phase of the project. + Phase ProjectPhase `json:"phase,omitempty"` +} + +// ProjectMember is a member of a project. +type ProjectMember struct { + // Subject is representing a user name, an email address, or any other identifier of a user, group, or service + // account that has a certain role. + rbacv1.Subject `json:",inline"` + // Role represents the role of this member. + // IMPORTANT: Be aware that this field will be removed in the `v1` version of this API in favor of the `roles` + // list. + // TODO: Remove this field in favor of the `owner` role in `v1`. + Role string `json:"role"` + // Roles represents the list of roles of this member. + // +optional + Roles []string `json:"roles,omitempty"` +} + +const ( + // ProjectMemberAdmin is a const for a role that provides full admin access. + ProjectMemberAdmin = "admin" + // ProjectMemberOwner is a const for a role that provides full owner access. + ProjectMemberOwner = "owner" + // ProjectMemberViewer is a const for a role that provides limited permissions to only view some resources. + ProjectMemberViewer = "viewer" + + // ProjectMemberExtensionPrefix is a prefix for custom roles that are not known by Gardener. + ProjectMemberExtensionPrefix = "extension:" +) + +// ProjectPhase is a label for the condition of a project at the current time. +type ProjectPhase string + +const ( + // ProjectPending indicates that the project reconciliation is pending. + ProjectPending ProjectPhase = "Pending" + // ProjectReady indicates that the project reconciliation was successful. + ProjectReady ProjectPhase = "Ready" + // ProjectFailed indicates that the project reconciliation failed. + ProjectFailed ProjectPhase = "Failed" + // ProjectTerminating indicates that the project is in termination process. + ProjectTerminating ProjectPhase = "Terminating" + + // ProjectEventNamespaceReconcileFailed indicates that the namespace reconciliation has failed. + ProjectEventNamespaceReconcileFailed = "NamespaceReconcileFailed" + // ProjectEventNamespaceReconcileSuccessful indicates that the namespace reconciliation has succeeded. + ProjectEventNamespaceReconcileSuccessful = "NamespaceReconcileSuccessful" + // ProjectEventNamespaceDeletionFailed indicates that the namespace deletion failed. + ProjectEventNamespaceDeletionFailed = "NamespaceDeletionFailed" + // ProjectEventNamespaceMarkedForDeletion indicates that the namespace has been successfully marked for deletion. + ProjectEventNamespaceMarkedForDeletion = "NamespaceMarkedForDeletion" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_quota.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_quota.go new file mode 100644 index 000000000..cda5b1040 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_quota.go @@ -0,0 +1,56 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Quota struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec defines the Quota constraints. + // +optional + Spec QuotaSpec `json:"spec,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// QuotaList is a collection of Quotas. +type QuotaList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of Quotas. + Items []Quota `json:"items"` +} + +// QuotaSpec is the specification of a Quota. +type QuotaSpec struct { + // ClusterLifetimeDays is the lifetime of a Shoot cluster in days before it will be terminated automatically. + // +optional + ClusterLifetimeDays *int `json:"clusterLifetimeDays,omitempty"` + // Metrics is a list of resources which will be put under constraints. + Metrics corev1.ResourceList `json:"metrics"` + // Scope is the scope of the Quota object, either 'project' or 'secret'. + Scope corev1.ObjectReference `json:"scope"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_secretbinding.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_secretbinding.go new file mode 100644 index 000000000..a726a66d5 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_secretbinding.go @@ -0,0 +1,47 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SecretBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // SecretRef is a reference to a secret object in the same or another namespace. + SecretRef corev1.SecretReference `json:"secretRef"` + // Quotas is a list of references to Quota objects in the same or another namespace. + // +optional + Quotas []corev1.ObjectReference `json:"quotas,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecretBindingList is a collection of SecretBindings. +type SecretBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of SecretBindings. + Items []SecretBinding `json:"items"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go new file mode 100644 index 000000000..8db23fdf0 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go @@ -0,0 +1,208 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Seed represents an installation request for an external controller. +type Seed struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec contains the specification of this installation. + Spec SeedSpec `json:"spec,omitempty"` + // Status contains the status of this installation. + Status SeedStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SeedList is a collection of Seeds. +type SeedList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of Seeds. + Items []Seed `json:"items"` +} + +// SeedSpec is the specification of a Seed. +type SeedSpec struct { + // Backup holds the object store configuration for the backups of shoot (currently only etcd). + // If it is not specified, then there won't be any backups taken for shoots associated with this seed. + // If backup field is present in seed, then backups of the etcd from shoot control plane will be stored + // under the configured object store. + // +optional + Backup *SeedBackup `json:"backup,omitempty"` + // DNS contains DNS-relevant information about this seed cluster. + DNS SeedDNS `json:"dns"` + // Networks defines the pod, service and worker network of the Seed cluster. + Networks SeedNetworks `json:"networks"` + // Provider defines the provider type and region for this Seed cluster. + Provider SeedProvider `json:"provider"` + // SecretRef is a reference to a Secret object containing the Kubeconfig and the cloud provider credentials for + // the account the Seed cluster has been deployed to. + // +optional + SecretRef *corev1.SecretReference `json:"secretRef,omitempty"` + // Taints describes taints on the seed. + // +optional + Taints []SeedTaint `json:"taints,omitempty"` + // Volume contains settings for persistentvolumes created in the seed cluster. + // +optional + Volume *SeedVolume `json:"volume,omitempty"` +} + +// SeedStatus is the status of a Seed. +type SeedStatus struct { + // Gardener holds information about the Gardener which last acted on the Shoot. + // +optional + Gardener *Gardener `json:"gardener,omitempty"` + // KubernetesVersion is the Kubernetes version of the seed cluster. + // +optional + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + // Conditions represents the latest available observations of a Seed's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // ObservedGeneration is the most recent generation observed for this Seed. It corresponds to the + // Seed's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// SeedBackup contains the object store configuration for backups for shoot (currently only etcd). +type SeedBackup struct { + // Provider is a provider name. + Provider string `json:"provider"` + // ProviderConfig is the configuration passed to BackupBucket resource. + // +optional + ProviderConfig *ProviderConfig `json:"providerConfig,omitempty"` + // Region is a region name. + // +optional + Region *string `json:"region,omitempty"` + // SecretRef is a reference to a Secret object containing the cloud provider credentials for + // the object store where backups should be stored. It should have enough privileges to manipulate + // the objects as well as buckets. + SecretRef corev1.SecretReference `json:"secretRef"` +} + +// SeedDNS contains DNS-relevant information about this seed cluster. +type SeedDNS struct { + // IngressDomain is the domain of the Seed cluster pointing to the ingress controller endpoint. It will be used + // to construct ingress URLs for system applications running in Shoot clusters. + IngressDomain string `json:"ingressDomain"` +} + +// SeedNetworks contains CIDRs for the pod, service and node networks of a Kubernetes cluster. +type SeedNetworks struct { + // Nodes is the CIDR of the node network. + // +optional + Nodes *string `json:"nodes,omitempty"` + // Pods is the CIDR of the pod network. + Pods string `json:"pods"` + // Services is the CIDR of the service network. + Services string `json:"services"` + // ShootDefaults contains the default networks CIDRs for shoots. + // +optional + ShootDefaults *ShootNetworks `json:"shootDefaults,omitempty"` + // BlockCIDRs is a list of network addresses that should be blocked for shoot control plane components running + // in the seed cluster. + // +optional + BlockCIDRs []string `json:"blockCIDRs,omitempty"` +} + +// ShootNetworks contains the default networks CIDRs for shoots. +type ShootNetworks struct { + // Pods is the CIDR of the pod network. + // +optional + Pods *string `json:"pods,omitempty"` + // Services is the CIDR of the service network. + // +optional + Services *string `json:"services,omitempty"` +} + +// SeedProvider defines the provider type and region for this Seed cluster. +type SeedProvider struct { + // Type is the name of the provider. + Type string `json:"type"` + // Region is a name of a region. + Region string `json:"region"` +} + +// SeedTaint describes a taint on a seed. +type SeedTaint struct { + // Key is the taint key to be applied to a seed. + Key string `json:"key"` + // Value is the taint value corresponding to the taint key. + // +optional + Value *string `json:"value,omitempty"` +} + +const ( + // SeedTaintDisableDNS is a constant for a taint key on a seed that marks it for disabling DNS. All shoots + // using this seed won't get any DNS providers, DNS records, and no DNS extension controller is required to + // be installed here. This is useful for environment where DNS is not required. + SeedTaintDisableDNS = "seed.gardener.cloud/disable-dns" + // SeedTaintProtected is a constant for a taint key on a seed that marks it as protected. Protected seeds + // may only be used by shoots in the `garden` namespace. + SeedTaintProtected = "seed.gardener.cloud/protected" + // SeedTaintInvisible is a constant for a taint key on a seed that marks it as invisible. Invisible seeds + // are not considered by the gardener-scheduler. + SeedTaintInvisible = "seed.gardener.cloud/invisible" + // SeedTaintDisableCapacityReservation is a constant for a taint key on a seed that marks it for disabling + // excess capacity reservation. This can be useful for seed clusters which only host shooted seeds to reduce + // costs. + SeedTaintDisableCapacityReservation = "seed.gardener.cloud/disable-capacity-reservation" +) + +// SeedVolume contains settings for persistentvolumes created in the seed cluster. +type SeedVolume struct { + // MinimumSize defines the minimum size that should be used for PVCs in the seed. + // +optional + MinimumSize *resource.Quantity `json:"minimumSize,omitempty"` + // Providers is a list of storage class provisioner types for the seed. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Providers []SeedVolumeProvider `json:"providers,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// SeedVolumeProvider is a storage class provisioner type. +type SeedVolumeProvider struct { + // Purpose is the purpose of this provider. + Purpose string `json:"purpose"` + // Name is the name of the storage class provisioner type. + Name string `json:"name"` +} + +const ( + // SeedBootstrapped is a constant for a condition type indicating that the seed cluster has been + // bootstrapped. + SeedBootstrapped ConditionType = "Bootstrapped" + // SeedExtensionsReady is a constant for a condition type indicating that the extensions are ready. + SeedExtensionsReady ConditionType = "ExtensionsReady" + // SeedGardenletReady is a constant for a condition type indicating that the Gardenlet is ready. + SeedGardenletReady ConditionType = "GardenletReady" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go new file mode 100644 index 000000000..ddd36630e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go @@ -0,0 +1,906 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Shoot struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of the Shoot cluster. + // +optional + Spec ShootSpec `json:"spec,omitempty"` + // Most recently observed status of the Shoot cluster. + // +optional + Status ShootStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ShootList is a list of Shoot objects. +type ShootList struct { + metav1.TypeMeta `json:",inline"` + // Standard list object metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of Shoots. + Items []Shoot `json:"items"` +} + +// ShootSpec is the specification of a Shoot. +type ShootSpec struct { + // Addons contains information about enabled/disabled addons and their configuration. + // +optional + Addons *Addons `json:"addons,omitempty"` + // CloudProfileName is a name of a CloudProfile object. + CloudProfileName string `json:"cloudProfileName"` + // DNS contains information about the DNS settings of the Shoot. + // +optional + DNS *DNS `json:"dns,omitempty"` + // Extensions contain type and provider information for Shoot extensions. + // +optional + Extensions []Extension `json:"extensions,omitempty"` + // Hibernation contains information whether the Shoot is suspended or not. + // +optional + Hibernation *Hibernation `json:"hibernation,omitempty"` + // Kubernetes contains the version and configuration settings of the control plane components. + Kubernetes Kubernetes `json:"kubernetes"` + // Networking contains information about cluster networking such as CNI Plugin type, CIDRs, ...etc. + Networking Networking `json:"networking"` + // Maintenance contains information about the time window for maintenance operations and which + // operations should be performed. + // +optional + Maintenance *Maintenance `json:"maintenance,omitempty"` + // Monitoring contains information about custom monitoring configurations for the shoot. + // +optional + Monitoring *Monitoring `json:"monitoring,omitempty"` + // Provider contains all provider-specific and provider-relevant information. + Provider Provider `json:"provider"` + // Purpose is the purpose class for this cluster. + // +optional + Purpose *ShootPurpose `json:"purpose,omitempty"` + // Region is a name of a region. + Region string `json:"region"` + // SecretBindingName is the name of the a SecretBinding that has a reference to the provider secret. + // The credentials inside the provider secret will be used to create the shoot in the respective account. + SecretBindingName string `json:"secretBindingName"` + // SeedName is the name of the seed cluster that runs the control plane of the Shoot. + // +optional + SeedName *string `json:"seedName,omitempty"` +} + +// ShootStatus holds the most recently observed status of the Shoot cluster. +type ShootStatus struct { + // Conditions represents the latest available observations of a Shoots's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // Constraints represents conditions of a Shoot's current state that constraint some operations on it. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Constraints []Condition `json:"constraints,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // Gardener holds information about the Gardener which last acted on the Shoot. + Gardener Gardener `json:"gardener"` + // IsHibernated indicates whether the Shoot is currently hibernated. + IsHibernated bool `json:"hibernated"` + // LastOperation holds information about the last operation on the Shoot. + // +optional + LastOperation *LastOperation `json:"lastOperation,omitempty"` + // LastErrors holds information about the last occurred error(s) during an operation. + // +optional + LastErrors []LastError `json:"lastErrors,omitempty"` + // ObservedGeneration is the most recent generation observed for this Shoot. It corresponds to the + // Shoot's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // RetryCycleStartTime is the start time of the last retry cycle (used to determine how often an operation + // must be retried until we give up). + // +optional + RetryCycleStartTime *metav1.Time `json:"retryCycleStartTime,omitempty"` + // SeedName is the name of the seed cluster that runs the control plane of the Shoot. This value is only written + // after a successful create/reconcile operation. It will be used when control planes are moved between Seeds. + // +optional + SeedName *string `json:"seedName,omitempty"` + // TechnicalID is the name that is used for creating the Seed namespace, the infrastructure resources, and + // basically everything that is related to this particular Shoot. + TechnicalID string `json:"technicalID"` + // UID is a unique identifier for the Shoot cluster to avoid portability between Kubernetes clusters. + // It is used to compute unique hashes. + UID types.UID `json:"uid"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Addons relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Addons is a collection of configuration for specific addons which are managed by the Gardener. +type Addons struct { + // KubernetesDashboard holds configuration settings for the kubernetes dashboard addon. + // +optional + KubernetesDashboard *KubernetesDashboard `json:"kubernetesDashboard,omitempty"` + // NginxIngress holds configuration settings for the nginx-ingress addon. + // +optional + NginxIngress *NginxIngress `json:"nginxIngress,omitempty"` +} + +// Addon allows enabling or disabling a specific addon and is used to derive from. +type Addon struct { + // Enabled indicates whether the addon is enabled or not. + Enabled bool `json:"enabled"` +} + +// KubernetesDashboard describes configuration values for the kubernetes-dashboard addon. +type KubernetesDashboard struct { + Addon `json:",inline"` + // AuthenticationMode defines the authentication mode for the kubernetes-dashboard. + // +optional + AuthenticationMode *string `json:"authenticationMode,omitempty"` +} + +const ( + // KubernetesDashboardAuthModeBasic uses basic authentication mode for auth. + KubernetesDashboardAuthModeBasic = "basic" + // KubernetesDashboardAuthModeToken uses token-based mode for auth. + KubernetesDashboardAuthModeToken = "token" +) + +// NginxIngress describes configuration values for the nginx-ingress addon. +type NginxIngress struct { + Addon `json:",inline"` + // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + // +optional + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` + // Config contains custom configuration for the nginx-ingress-controller configuration. + // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options + // +optional + Config map[string]string `json:"config,omitempty"` + // ExternalTrafficPolicy controls the `.spec.externalTrafficPolicy` value of the load balancer `Service` + // exposing the nginx-ingress. Defaults to `Cluster`. + // +optional + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// DNS relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// DNS holds information about the provider, the hosted zone id and the domain. +type DNS struct { + // Domain is the external available domain of the Shoot cluster. This domain will be written into the + // kubeconfig that is handed out to end-users. + // +optional + Domain *string `json:"domain,omitempty"` + // Providers is a list of DNS providers that shall be enabled for this shoot cluster. Only relevant if + // not a default domain is used. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Providers []DNSProvider `json:"providers,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// DNSProvider contains information about a DNS provider. +type DNSProvider struct { + // Domains contains information about which domains shall be included/excluded for this provider. + // +optional + Domains *DNSIncludeExclude `json:"domains,omitempty"` + // Primary indicates that this DNSProvider is used for shoot related domains. + // +optional + Primary *bool `json:"primary,omitempty"` + // SecretName is a name of a secret containing credentials for the stated domain and the + // provider. When not specified, the Gardener will use the cloud provider credentials referenced + // by the Shoot and try to find respective credentials there. Specifying this field may override + // this behavior, i.e. forcing the Gardener to only look into the given secret. + // +optional + SecretName *string `json:"secretName,omitempty"` + // Type is the DNS provider type for the Shoot. Only relevant if not the default domain is used for + // this shoot. + // +optional + Type *string `json:"type,omitempty"` + // Zones contains information about which hosted zones shall be included/excluded for this provider. + // +optional + Zones *DNSIncludeExclude `json:"zones,omitempty"` +} + +type DNSIncludeExclude struct { + // Include is a list of resources that shall be included. + // +optional + Include []string `json:"include,omitempty"` + // Exclude is a list of resources that shall be excluded. + // +optional + Exclude []string `json:"exclude,omitempty"` +} + +// DefaultDomain is the default value in the Shoot's '.spec.dns.domain' when '.spec.dns.provider' is 'unmanaged' +const DefaultDomain = "cluster.local" + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Extension relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Extension contains type and provider information for Shoot extensions. +type Extension struct { + // Type is the type of the extension resource. + Type string `json:"type"` + // ProviderConfig is the configuration passed to extension resource. + // +optional + ProviderConfig *ProviderConfig `json:"providerConfig,omitempty"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Hibernation relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Hibernation contains information whether the Shoot is suspended or not. +type Hibernation struct { + // Enabled specifies whether the Shoot needs to be hibernated or not. If it is true, the Shoot's desired state is to be hibernated. + // If it is false or nil, the Shoot's desired state is to be awaken. + // +optional + Enabled *bool `json:"enabled,omitempty"` + // Schedules determine the hibernation schedules. + // +optional + Schedules []HibernationSchedule `json:"schedules,omitempty"` +} + +// HibernationSchedule determines the hibernation schedule of a Shoot. +// A Shoot will be regularly hibernated at each start time and will be woken up at each end time. +// Start or End can be omitted, though at least one of each has to be specified. +type HibernationSchedule struct { + // Start is a Cron spec at which time a Shoot will be hibernated. + // +optional + Start *string `json:"start,omitempty"` + // End is a Cron spec at which time a Shoot will be woken up. + // +optional + End *string `json:"end,omitempty"` + // Location is the time location in which both start and and shall be evaluated. + // +optional + Location *string `json:"location,omitempty"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Kubernetes relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Kubernetes contains the version and configuration variables for the Shoot control plane. +type Kubernetes struct { + // AllowPrivilegedContainers indicates whether privileged containers are allowed in the Shoot (default: true). + // +optional + AllowPrivilegedContainers *bool `json:"allowPrivilegedContainers,omitempty"` + // ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. + // +optional + ClusterAutoscaler *ClusterAutoscaler `json:"clusterAutoscaler,omitempty"` + // KubeAPIServer contains configuration settings for the kube-apiserver. + // +optional + KubeAPIServer *KubeAPIServerConfig `json:"kubeAPIServer,omitempty"` + // KubeControllerManager contains configuration settings for the kube-controller-manager. + // +optional + KubeControllerManager *KubeControllerManagerConfig `json:"kubeControllerManager,omitempty"` + // KubeScheduler contains configuration settings for the kube-scheduler. + // +optional + KubeScheduler *KubeSchedulerConfig `json:"kubeScheduler,omitempty"` + // KubeProxy contains configuration settings for the kube-proxy. + // +optional + KubeProxy *KubeProxyConfig `json:"kubeProxy,omitempty"` + // Kubelet contains configuration settings for the kubelet. + // +optional + Kubelet *KubeletConfig `json:"kubelet,omitempty"` + // Version is the semantic Kubernetes version to use for the Shoot cluster. + Version string `json:"version"` +} + +// ClusterAutoscaler contains the configration flags for the Kubernetes cluster autoscaler. +type ClusterAutoscaler struct { + // ScaleDownDelayAfterAdd defines how long after scale up that scale down evaluation resumes (default: 10 mins). + // +optional + ScaleDownDelayAfterAdd *metav1.Duration `json:"scaleDownDelayAfterAdd,omitempty"` + // ScaleDownDelayAfterDelete how long after node deletion that scale down evaluation resumes, defaults to scanInterval (defaults to ScanInterval). + // +optional + ScaleDownDelayAfterDelete *metav1.Duration `json:"scaleDownDelayAfterDelete,omitempty"` + // ScaleDownDelayAfterFailure how long after scale down failure that scale down evaluation resumes (default: 3 mins). + // +optional + ScaleDownDelayAfterFailure *metav1.Duration `json:"scaleDownDelayAfterFailure,omitempty"` + // ScaleDownUnneededTime defines how long a node should be unneeded before it is eligible for scale down (default: 10 mins). + // +optional + ScaleDownUnneededTime *metav1.Duration `json:"scaleDownUnneededTime,omitempty"` + // ScaleDownUtilizationThreshold defines the threshold in % under which a node is being removed + // +optional + ScaleDownUtilizationThreshold *float64 `json:"scaleDownUtilizationThreshold,omitempty"` + // ScanInterval how often cluster is reevaluated for scale up or down (default: 10 secs). + // +optional + ScanInterval *metav1.Duration `json:"scanInterval,omitempty"` +} + +// KubernetesConfig contains common configuration fields for the control plane components. +type KubernetesConfig struct { + // FeatureGates contains information about enabled feature gates. + // +optional + FeatureGates map[string]bool `json:"featureGates,omitempty"` +} + +// KubeAPIServerConfig contains configuration settings for the kube-apiserver. +type KubeAPIServerConfig struct { + KubernetesConfig `json:",inline"` + // AdmissionPlugins contains the list of user-defined admission plugins (additional to those managed by Gardener), and, if desired, the corresponding + // configuration. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + AdmissionPlugins []AdmissionPlugin `json:"admissionPlugins,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // APIAudiences are the identifiers of the API. The service account token authenticator will + // validate that tokens used against the API are bound to at least one of these audiences. + // If `serviceAccountConfig.issuer` is configured and this is not, this defaults to a single + // element list containing the issuer URL. + // +optional + APIAudiences []string `json:"apiAudiences,omitempty"` + // AuditConfig contains configuration settings for the audit of the kube-apiserver. + // +optional + AuditConfig *AuditConfig `json:"auditConfig,omitempty"` + // EnableBasicAuthentication defines whether basic authentication should be enabled for this cluster or not. + // +optional + EnableBasicAuthentication *bool `json:"enableBasicAuthentication,omitempty"` + // OIDCConfig contains configuration settings for the OIDC provider. + // +optional + OIDCConfig *OIDCConfig `json:"oidcConfig,omitempty"` + // RuntimeConfig contains information about enabled or disabled APIs. + // +optional + RuntimeConfig map[string]bool `json:"runtimeConfig,omitempty"` + // ServiceAccountConfig contains configuration settings for the service account handling + // of the kube-apiserver. + // +optional + ServiceAccountConfig *ServiceAccountConfig `json:"serviceAccountConfig,omitempty"` +} + +// ServiceAccountConfig is the kube-apiserver configuration for service accounts. +type ServiceAccountConfig struct { + // Issuer is the identifier of the service account token issuer. The issuer will assert this + // identifier in "iss" claim of issued tokens. This value is a string or URI. + // +optional + Issuer *string `json:"issuer,omitempty"` + // SigningKeySecret is a reference to a secret that contains the current private key of the + // service account token issuer. The issuer will sign issued ID tokens with this private key. + // (Requires the 'TokenRequest' feature gate.) + // +optional + SigningKeySecret *corev1.LocalObjectReference `json:"signingKeySecretName,omitempty"` +} + +// AuditConfig contains settings for audit of the api server +type AuditConfig struct { + // AuditPolicy contains configuration settings for audit policy of the kube-apiserver. + // +optional + AuditPolicy *AuditPolicy `json:"auditPolicy,omitempty"` +} + +// AuditPolicy contains audit policy for kube-apiserver +type AuditPolicy struct { + // ConfigMapRef is a reference to a ConfigMap object in the same namespace, + // which contains the audit policy for the kube-apiserver. + // +optional + ConfigMapRef *corev1.ObjectReference `json:"configMapRef,omitempty"` +} + +// OIDCConfig contains configuration settings for the OIDC provider. +// Note: Descriptions were taken from the Kubernetes documentation. +type OIDCConfig struct { + // If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used. + // +optional + CABundle *string `json:"caBundle,omitempty"` + // ClientAuthentication can optionally contain client configuration used for kubeconfig generation. + // +optional + ClientAuthentication *OpenIDConnectClientAuthentication `json:"clientAuthentication,omitempty"` + // The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set. + // +optional + ClientID *string `json:"clientID,omitempty"` + // If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be a string or array of strings. This flag is experimental, please see the authentication documentation for further details. + // +optional + GroupsClaim *string `json:"groupsClaim,omitempty"` + // If provided, all groups will be prefixed with this value to prevent conflicts with other authentication strategies. + // +optional + GroupsPrefix *string `json:"groupsPrefix,omitempty"` + // The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT). + // +optional + IssuerURL *string `json:"issuerURL,omitempty"` + // ATTENTION: Only meaningful for Kubernetes >= 1.11 + // key=value pairs that describes a required claim in the ID Token. If set, the claim is verified to be present in the ID Token with a matching value. + // +optional + RequiredClaims map[string]string `json:"requiredClaims,omitempty"` + // List of allowed JOSE asymmetric signing algorithms. JWTs with a 'alg' header value not in this list will be rejected. Values are defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1 + // +optional + SigningAlgs []string `json:"signingAlgs,omitempty"` + // The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details. (default "sub") + // +optional + UsernameClaim *string `json:"usernameClaim,omitempty"` + // If provided, all usernames will be prefixed with this value. If not provided, username claims other than 'email' are prefixed by the issuer URL to avoid clashes. To skip any prefixing, provide the value '-'. + // +optional + UsernamePrefix *string `json:"usernamePrefix,omitempty"` +} + +// OpenIDConnectClientAuthentication contains configuration for OIDC clients. +type OpenIDConnectClientAuthentication struct { + // Extra configuration added to kubeconfig's auth-provider. + // Must not be any of idp-issuer-url, client-id, client-secret, idp-certificate-authority, idp-certificate-authority-data, id-token or refresh-token + // +optional + ExtraConfig map[string]string `json:"extraConfig,omitempty"` + // The client Secret for the OpenID Connect client. + // +optional + Secret *string `json:"secret,omitempty"` +} + +// AdmissionPlugin contains information about a specific admission plugin and its corresponding configuration. +type AdmissionPlugin struct { + // Name is the name of the plugin. + Name string `json:"name"` + // Config is the configuration of the plugin. + // +optional + Config *ProviderConfig `json:"config,omitempty"` +} + +// KubeControllerManagerConfig contains configuration settings for the kube-controller-manager. +type KubeControllerManagerConfig struct { + KubernetesConfig `json:",inline"` + // HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. + // +optional + HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig `json:"horizontalPodAutoscaler,omitempty"` + // NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24) + // +optional + NodeCIDRMaskSize *int32 `json:"nodeCIDRMaskSize,omitempty"` +} + +// HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager. +// Note: Descriptions were taken from the Kubernetes documentation. +type HorizontalPodAutoscalerConfig struct { + // The period after which a ready pod transition is considered to be the first. + // +optional + CPUInitializationPeriod *metav1.Duration `json:"cpuInitializationPeriod,omitempty"` + // The period since last downscale, before another downscale can be performed in horizontal pod autoscaler. + // +optional + DownscaleDelay *metav1.Duration `json:"downscaleDelay,omitempty"` + // The configurable window at which the controller will choose the highest recommendation for autoscaling. + // +optional + DownscaleStabilization *metav1.Duration `json:"downscaleStabilization,omitempty"` + // The configurable period at which the horizontal pod autoscaler considers a Pod “not yet ready” given that it’s unready and it has transitioned to unready during that time. + // +optional + InitialReadinessDelay *metav1.Duration `json:"initialReadinessDelay,omitempty"` + // The period for syncing the number of pods in horizontal pod autoscaler. + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` + // The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling. + // +optional + Tolerance *float64 `json:"tolerance,omitempty"` + // The period since last upscale, before another upscale can be performed in horizontal pod autoscaler. + // +optional + UpscaleDelay *metav1.Duration `json:"upscaleDelay,omitempty"` +} + +const ( + // DefaultHPADownscaleDelay is a constant for the default HPA downscale delay for a Shoot cluster. + DefaultHPADownscaleDelay = 15 * time.Minute + // DefaultHPASyncPeriod is a constant for the default HPA sync period for a Shoot cluster. + DefaultHPASyncPeriod = 30 * time.Second + // DefaultHPATolerance is a constant for the default HPA tolerance for a Shoot cluster. + DefaultHPATolerance = 0.1 + // DefaultHPAUpscaleDelay is for the default HPA upscale delay for a Shoot cluster. + DefaultHPAUpscaleDelay = 1 * time.Minute + // DefaultDownscaleStabilization is the default HPA downscale stabilization window for a Shoot cluster + DefaultDownscaleStabilization = 5 * time.Minute + // DefaultInitialReadinessDelay is for the default HPA ReadinessDelay value in the Shoot cluster + DefaultInitialReadinessDelay = 30 * time.Second + // DefaultCPUInitializationPeriod is the for the default value of the CPUInitializationPeriod in the Shoot cluster + DefaultCPUInitializationPeriod = 5 * time.Minute +) + +// KubeSchedulerConfig contains configuration settings for the kube-scheduler. +type KubeSchedulerConfig struct { + KubernetesConfig `json:",inline"` +} + +// KubeProxyConfig contains configuration settings for the kube-proxy. +type KubeProxyConfig struct { + KubernetesConfig `json:",inline"` + // Mode specifies which proxy mode to use. + // defaults to IPTables. + // +optional + Mode *ProxyMode `json:"mode,omitempty"` +} + +// ProxyMode available in Linux platform: 'userspace' (older, going to be EOL), 'iptables' +// (newer, faster), 'ipvs' (newest, better in performance and scalability). +// As of now only 'iptables' and 'ipvs' is supported by Gardener. +// In Linux platform, if the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are +// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs', +// and the fall back path is firstly iptables and then userspace. +type ProxyMode string + +const ( + // ProxyModeIPTables uses iptables as proxy implementation. + ProxyModeIPTables ProxyMode = "IPTables" + // ProxyModeIPVS uses ipvs as proxy implementation. + ProxyModeIPVS ProxyMode = "IPVS" +) + +// KubeletConfig contains configuration settings for the kubelet. +type KubeletConfig struct { + KubernetesConfig `json:",inline"` + // CPUCFSQuota allows you to disable/enable CPU throttling for Pods. + // +optional + CPUCFSQuota *bool `json:"cpuCFSQuota,omitempty"` + // CPUManagerPolicy allows to set alternative CPU management policies (default: none). + // +optional + CPUManagerPolicy *string `json:"cpuManagerPolicy,omitempty"` + // EvictionHard describes a set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a Pod eviction. + // +optional + // Default: + // memory.available: "100Mi/1Gi/5%" + // nodefs.available: "5%" + // nodefs.inodesFree: "5%" + // imagefs.available: "5%" + // imagefs.inodesFree: "5%" + EvictionHard *KubeletConfigEviction `json:"evictionHard,omitempty"` + // EvictionMaxPodGracePeriod describes the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. + // +optional + // Default: 90 + EvictionMaxPodGracePeriod *int32 `json:"evictionMaxPodGracePeriod,omitempty"` + // EvictionMinimumReclaim configures the amount of resources below the configured eviction threshold that the kubelet attempts to reclaim whenever the kubelet observes resource pressure. + // +optional + // Default: 0 for each resource + EvictionMinimumReclaim *KubeletConfigEvictionMinimumReclaim `json:"evictionMinimumReclaim,omitempty"` + // EvictionPressureTransitionPeriod is the duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. + // +optional + // Default: 4m0s + EvictionPressureTransitionPeriod *metav1.Duration `json:"evictionPressureTransitionPeriod,omitempty"` + // EvictionSoft describes a set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a Pod eviction. + // +optional + // Default: + // memory.available: "200Mi/1.5Gi/10%" + // nodefs.available: "10%" + // nodefs.inodesFree: "10%" + // imagefs.available: "10%" + // imagefs.inodesFree: "10%" + EvictionSoft *KubeletConfigEviction `json:"evictionSoft,omitempty"` + // EvictionSoftGracePeriod describes a set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a Pod eviction. + // +optional + // Default: + // memory.available: 1m30s + // nodefs.available: 1m30s + // nodefs.inodesFree: 1m30s + // imagefs.available: 1m30s + // imagefs.inodesFree: 1m30s + EvictionSoftGracePeriod *KubeletConfigEvictionSoftGracePeriod `json:"evictionSoftGracePeriod,omitempty"` + // MaxPods is the maximum number of Pods that are allowed by the Kubelet. + // +optional + // Default: 110 + MaxPods *int32 `json:"maxPods,omitempty"` + // PodPIDsLimit is the maximum number of process IDs per pod allowed by the kubelet. + // +optional + PodPIDsLimit *int64 `json:"podPidsLimit,omitempty"` +} + +// KubeletConfigEviction contains kubelet eviction thresholds supporting either a resource.Quantity or a percentage based value. +type KubeletConfigEviction struct { + // MemoryAvailable is the threshold for the free memory on the host server. + // +optional + MemoryAvailable *string `json:"memoryAvailable,omitempty"` + // ImageFSAvailable is the threshold for the free disk space in the imagefs filesystem (docker images and container writable layers). + // +optional + ImageFSAvailable *string `json:"imageFSAvailable,omitempty"` + // ImageFSInodesFree is the threshold for the available inodes in the imagefs filesystem. + // +optional + ImageFSInodesFree *string `json:"imageFSInodesFree,omitempty"` + // NodeFSAvailable is the threshold for the free disk space in the nodefs filesystem (docker volumes, logs, etc). + // +optional + NodeFSAvailable *string `json:"nodeFSAvailable,omitempty"` + // NodeFSInodesFree is the threshold for the available inodes in the nodefs filesystem. + // +optional + NodeFSInodesFree *string `json:"nodeFSInodesFree,omitempty"` +} + +// KubeletConfigEvictionMinimumReclaim contains configuration for the kubelet eviction minimum reclaim. +type KubeletConfigEvictionMinimumReclaim struct { + // MemoryAvailable is the threshold for the memory reclaim on the host server. + // +optional + MemoryAvailable *resource.Quantity `json:"memoryAvailable,omitempty"` + // ImageFSAvailable is the threshold for the disk space reclaim in the imagefs filesystem (docker images and container writable layers). + // +optional + ImageFSAvailable *resource.Quantity `json:"imageFSAvailable,omitempty"` + // ImageFSInodesFree is the threshold for the inodes reclaim in the imagefs filesystem. + // +optional + ImageFSInodesFree *resource.Quantity `json:"imageFSInodesFree,omitempty"` + // NodeFSAvailable is the threshold for the disk space reclaim in the nodefs filesystem (docker volumes, logs, etc). + // +optional + NodeFSAvailable *resource.Quantity `json:"nodeFSAvailable,omitempty"` + // NodeFSInodesFree is the threshold for the inodes reclaim in the nodefs filesystem. + // +optional + NodeFSInodesFree *resource.Quantity `json:"nodeFSInodesFree,omitempty"` +} + +// KubeletConfigEvictionSoftGracePeriod contains grace periods for kubelet eviction thresholds. +type KubeletConfigEvictionSoftGracePeriod struct { + // MemoryAvailable is the grace period for the MemoryAvailable eviction threshold. + // +optional + MemoryAvailable *metav1.Duration `json:"memoryAvailable,omitempty"` + // ImageFSAvailable is the grace period for the ImageFSAvailable eviction threshold. + // +optional + ImageFSAvailable *metav1.Duration `json:"imageFSAvailable,omitempty"` + // ImageFSInodesFree is the grace period for the ImageFSInodesFree eviction threshold. + // +optional + ImageFSInodesFree *metav1.Duration `json:"imageFSInodesFree,omitempty"` + // NodeFSAvailable is the grace period for the NodeFSAvailable eviction threshold. + // +optional + NodeFSAvailable *metav1.Duration `json:"nodeFSAvailable,omitempty"` + // NodeFSInodesFree is the grace period for the NodeFSInodesFree eviction threshold. + // +optional + NodeFSInodesFree *metav1.Duration `json:"nodeFSInodesFree,omitempty"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Networking relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Networking defines networking parameters for the shoot cluster. +type Networking struct { + // Type identifies the type of the networking plugin. + Type string `json:"type"` + // ProviderConfig is the configuration passed to network resource. + // +optional + ProviderConfig *ProviderConfig `json:"providerConfig,omitempty"` + // Pods is the CIDR of the pod network. + // +optional + Pods *string `json:"pods,omitempty"` + // Nodes is the CIDR of the entire node network. + // +optional + Nodes *string `json:"nodes,omitempty"` + // Services is the CIDR of the service network. + // +optional + Services *string `json:"services,omitempty"` +} + +const ( + // DefaultPodNetworkCIDR is a constant for the default pod network CIDR of a Shoot cluster. + DefaultPodNetworkCIDR = "100.96.0.0/11" + // DefaultServiceNetworkCIDR is a constant for the default service network CIDR of a Shoot cluster. + DefaultServiceNetworkCIDR = "100.64.0.0/13" +) + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Maintenance relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Maintenance contains information about the time window for maintenance operations and which +// operations should be performed. +type Maintenance struct { + // AutoUpdate contains information about which constraints should be automatically updated. + // +optional + AutoUpdate *MaintenanceAutoUpdate `json:"autoUpdate,omitempty"` + // TimeWindow contains information about the time window for maintenance operations. + // +optional + TimeWindow *MaintenanceTimeWindow `json:"timeWindow,omitempty"` +} + +// MaintenanceAutoUpdate contains information about which constraints should be automatically updated. +type MaintenanceAutoUpdate struct { + // KubernetesVersion indicates whether the patch Kubernetes version may be automatically updated (default: true). + KubernetesVersion bool `json:"kubernetesVersion"` + // MachineImageVersion indicates whether the machine image version may be automatically updated (default: true). + MachineImageVersion bool `json:"machineImageVersion"` +} + +// MaintenanceTimeWindow contains information about the time window for maintenance operations. +type MaintenanceTimeWindow struct { + // Begin is the beginning of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + // If not present, a random value will be computed. + Begin string `json:"begin"` + // End is the end of the time window in the format HHMMSS+ZONE, e.g. "220000+0100". + // If not present, the value will be computed based on the "Begin" value. + End string `json:"end"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Monitoring relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Monitoring contains information about the monitoring configuration for the shoot. +type Monitoring struct { + // Alerting contains information about the alerting configuration for the shoot cluster. + // +optional + Alerting *Alerting `json:"alerting,omitempty"` +} + +// Alerting contains information about how alerting will be done (i.e. who will receive alerts and how). +type Alerting struct { + // MonitoringEmailReceivers is a list of recipients for alerts + // +optional + EmailReceivers []string `json:"emailReceivers,omitempty"` +} + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Provider relevant types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +// Provider contains provider-specific information that are handed-over to the provider-specific +// extension controller. +type Provider struct { + // Type is the type of the provider. + Type string `json:"type"` + // ControlPlaneConfig contains the provider-specific control plane config blob. Please look up the concrete + // definition in the documentation of your provider extension. + // +optional + ControlPlaneConfig *ProviderConfig `json:"controlPlaneConfig,omitempty"` + // InfrastructureConfig contains the provider-specific infrastructure config blob. Please look up the concrete + // definition in the documentation of your provider extension. + // +optional + InfrastructureConfig *ProviderConfig `json:"infrastructureConfig,omitempty"` + // Workers is a list of worker groups. + // +patchMergeKey=name + // +patchStrategy=merge + Workers []Worker `json:"workers" patchStrategy:"merge" patchMergeKey:"name"` +} + +// Worker is the base definition of a worker group. +type Worker struct { + // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + // CABundle is a certificate bundle which will be installed onto every machine of this worker pool. + // +optional + CABundle *string `json:"caBundle,omitempty"` + // Kubernetes contains configuration for Kubernetes components related to this worker pool. + // +optional + Kubernetes *WorkerKubernetes `json:"kubernetes,omitempty"` + // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // Name is the name of the worker group. + Name string `json:"name"` + // Machine contains information about the machine type and image. + Machine Machine `json:"machine"` + // Maximum is the maximum number of VMs to create. + Maximum int32 `json:"maximum"` + // Minimum is the minimum number of VMs to create. + Minimum int32 `json:"minimum"` + // MaxSurge is maximum number of VMs that are created during an update. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` + // MaxUnavailable is the maximum number of VMs that can be unavailable during an update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + // ProviderConfig is the provider-specific configuration for this worker pool. + // +optional + ProviderConfig *ProviderConfig `json:"providerConfig,omitempty"` + // Taints is a list of taints for all the `Node` objects in this worker pool. + // +optional + Taints []corev1.Taint `json:"taints,omitempty"` + // Volume contains information about the volume type and size. + // +optional + Volume *Volume `json:"volume,omitempty"` + // DataVolumes contains a list of additional worker volumes. + // +optional + DataVolumes []Volume `json:"dataVolumes,omitempty"` + // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state. + // +optional + KubeletDataVolumeName *string `json:"kubeletDataVolumeName,omitempty"` + // Zones is a list of availability zones that are used to evenly distribute this worker pool. Optional + // as not every provider may support availability zones. + // +optional + Zones []string `json:"zones,omitempty"` +} + +// WorkerKubernetes contains configuration for Kubernetes components related to this worker pool. +type WorkerKubernetes struct { + // Kubelet contains configuration settings for all kubelets of this worker pool. + // +optional + Kubelet *KubeletConfig `json:"kubelet,omitempty"` +} + +// Machine contains information about the machine type and image. +type Machine struct { + // Type is the machine type of the worker group. + Type string `json:"type"` + // Image holds information about the machine image to use for all nodes of this pool. It will default to the + // latest version of the first image stated in the referenced CloudProfile if no value has been provided. + // +optional + Image *ShootMachineImage `json:"image,omitempty"` +} + +// ShootMachineImage defines the name and the version of the shoot's machine image in any environment. Has to be +// defined in the respective CloudProfile. +type ShootMachineImage struct { + // Name is the name of the image. + Name string `json:"name"` + // ProviderConfig is the shoot's individual configuration passed to an extension resource. + // +optional + ProviderConfig *ProviderConfig `json:"providerConfig,omitempty"` + // Version is the version of the shoot's image. + Version string `json:"version"` +} + +// Volume contains information about the volume type and size. +type Volume struct { + // Name of the volume to make it referencable. + // +optional + Name *string `json:"name,omitempty"` + // Type is the type of the volume. + // +optional + Type *string `json:"type,omitempty"` + // Size is the size of the volume. + Size string `json:"size"` + // Encrypted determines if the volume should be encrypted. + // +optional + Encrypted *bool `json:"encrypted,omitempty"` +} + +var ( + // DefaultWorkerMaxSurge is the default value for Worker MaxSurge. + DefaultWorkerMaxSurge = intstr.FromInt(1) + // DefaultWorkerMaxUnavailable is the default value for Worker MaxUnavailable. + DefaultWorkerMaxUnavailable = intstr.FromInt(0) +) + +////////////////////////////////////////////////////////////////////////////////////////////////// +// Other/miscellaneous constants and types // +////////////////////////////////////////////////////////////////////////////////////////////////// + +const ( + // ShootEventMaintenanceDone indicates that a maintenance operation has been performed. + ShootEventMaintenanceDone = "MaintenanceDone" + // ShootEventMaintenanceError indicates that a maintenance operation has failed. + ShootEventMaintenanceError = "MaintenanceError" + + // ShootEventSchedulingSuccessful indicates that a scheduling decision was taken successfully. + ShootEventSchedulingSuccessful = "SchedulingSuccessful" + // ShootEventSchedulingFailed indicates that a scheduling decision failed. + ShootEventSchedulingFailed = "SchedulingFailed" +) + +const ( + // ShootAPIServerAvailable is a constant for a condition type indicating that the Shoot cluster's API server is available. + ShootAPIServerAvailable ConditionType = "APIServerAvailable" + // ShootControlPlaneHealthy is a constant for a condition type indicating the control plane health. + ShootControlPlaneHealthy ConditionType = "ControlPlaneHealthy" + // ShootEveryNodeReady is a constant for a condition type indicating the node health. + ShootEveryNodeReady ConditionType = "EveryNodeReady" + // ShootSystemComponentsHealthy is a constant for a condition type indicating the system components health. + ShootSystemComponentsHealthy ConditionType = "SystemComponentsHealthy" + // ShootHibernationPossible is a constant for a condition type indicating whether the Shoot can be hibernated. + ShootHibernationPossible ConditionType = "HibernationPossible" +) + +// ShootPurpose is a type alias for string. +type ShootPurpose string + +const ( + // ShootPurposeEvaluation is a constant for the evaluation purpose. + ShootPurposeEvaluation ShootPurpose = "evaluation" + // ShootPurposeTesting is a constant for the testing purpose. + ShootPurposeTesting ShootPurpose = "testing" + // ShootPurposeDevelopment is a constant for the development purpose. + ShootPurposeDevelopment ShootPurpose = "development" + // ShootPurposeProduction is a constant for the production purpose. + ShootPurposeProduction ShootPurpose = "production" + // ShootPurposeInfrastructure is a constant for the infrastructure purpose. + ShootPurposeInfrastructure ShootPurpose = "infrastructure" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go new file mode 100644 index 000000000..b5e4e0b71 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_utils.go @@ -0,0 +1,80 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // EventSchedulingSuccessful is an event reason for successful scheduling. + EventSchedulingSuccessful = "SchedulingSuccessful" + // EventSchedulingFailed is an event reason for failed scheduling. + EventSchedulingFailed = "SchedulingFailed" +) + +// ProviderConfig is a workaround for missing OpenAPI functions on runtime.RawExtension struct. +// https://github.com/kubernetes/kubernetes/issues/55890 +// https://github.com/kubernetes-sigs/cluster-api/issues/137 +type ProviderConfig struct { + runtime.RawExtension `json:",inline"` +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (ProviderConfig) OpenAPISchemaType() []string { return []string{"object"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (ProviderConfig) OpenAPISchemaFormat() string { return "" } + +// ConditionStatus is the status of a condition. +type ConditionStatus string + +// ConditionType is a string alias. +type ConditionType string + +// Condition holds the information about the state of a resource. +type Condition struct { + // Type of the Shoot condition. + Type ConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + // Last time the condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime"` + // The reason for the condition's last transition. + Reason string `json:"reason"` + // A human readable message indicating details about the transition. + Message string `json:"message"` +} + +const ( + // ConditionTrue means a resource is in the condition. + ConditionTrue ConditionStatus = "True" + // ConditionFalse means a resource is not in the condition. + ConditionFalse ConditionStatus = "False" + // ConditionUnknown means Gardener can't decide if a resource is in the condition or not. + ConditionUnknown ConditionStatus = "Unknown" + // ConditionProgressing means the condition was seen true, failed but stayed within a predefined failure threshold. + // In the future, we could add other intermediate conditions, e.g. ConditionDegraded. + ConditionProgressing ConditionStatus = "Progressing" + + // ConditionCheckError is a constant for a reason in condition. + ConditionCheckError = "ConditionCheckError" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..50c040a8f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go @@ -0,0 +1,3974 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + core "github.com/gardener/gardener/pkg/apis/core" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Addon)(nil), (*core.Addon)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Addon_To_core_Addon(a.(*Addon), b.(*core.Addon), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Addon)(nil), (*Addon)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Addon_To_v1beta1_Addon(a.(*core.Addon), b.(*Addon), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Addons)(nil), (*core.Addons)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Addons_To_core_Addons(a.(*Addons), b.(*core.Addons), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Addons)(nil), (*Addons)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Addons_To_v1beta1_Addons(a.(*core.Addons), b.(*Addons), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdmissionPlugin)(nil), (*core.AdmissionPlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(a.(*AdmissionPlugin), b.(*core.AdmissionPlugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AdmissionPlugin)(nil), (*AdmissionPlugin)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin(a.(*core.AdmissionPlugin), b.(*AdmissionPlugin), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Alerting)(nil), (*core.Alerting)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Alerting_To_core_Alerting(a.(*Alerting), b.(*core.Alerting), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Alerting)(nil), (*Alerting)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Alerting_To_v1beta1_Alerting(a.(*core.Alerting), b.(*Alerting), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuditConfig)(nil), (*core.AuditConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AuditConfig_To_core_AuditConfig(a.(*AuditConfig), b.(*core.AuditConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AuditConfig)(nil), (*AuditConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AuditConfig_To_v1beta1_AuditConfig(a.(*core.AuditConfig), b.(*AuditConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuditPolicy)(nil), (*core.AuditPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AuditPolicy_To_core_AuditPolicy(a.(*AuditPolicy), b.(*core.AuditPolicy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AuditPolicy)(nil), (*AuditPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AuditPolicy_To_v1beta1_AuditPolicy(a.(*core.AuditPolicy), b.(*AuditPolicy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AvailabilityZone)(nil), (*core.AvailabilityZone)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AvailabilityZone_To_core_AvailabilityZone(a.(*AvailabilityZone), b.(*core.AvailabilityZone), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AvailabilityZone)(nil), (*AvailabilityZone)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AvailabilityZone_To_v1beta1_AvailabilityZone(a.(*core.AvailabilityZone), b.(*AvailabilityZone), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupBucket)(nil), (*core.BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupBucket_To_core_BackupBucket(a.(*BackupBucket), b.(*core.BackupBucket), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupBucket)(nil), (*BackupBucket)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucket_To_v1beta1_BackupBucket(a.(*core.BackupBucket), b.(*BackupBucket), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupBucketList)(nil), (*core.BackupBucketList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupBucketList_To_core_BackupBucketList(a.(*BackupBucketList), b.(*core.BackupBucketList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupBucketList)(nil), (*BackupBucketList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucketList_To_v1beta1_BackupBucketList(a.(*core.BackupBucketList), b.(*BackupBucketList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupBucketProvider)(nil), (*core.BackupBucketProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(a.(*BackupBucketProvider), b.(*core.BackupBucketProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupBucketProvider)(nil), (*BackupBucketProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(a.(*core.BackupBucketProvider), b.(*BackupBucketProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupBucketSpec)(nil), (*core.BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(a.(*BackupBucketSpec), b.(*core.BackupBucketSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupBucketSpec)(nil), (*BackupBucketSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(a.(*core.BackupBucketSpec), b.(*BackupBucketSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupBucketStatus)(nil), (*core.BackupBucketStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(a.(*BackupBucketStatus), b.(*core.BackupBucketStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupBucketStatus)(nil), (*BackupBucketStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(a.(*core.BackupBucketStatus), b.(*BackupBucketStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupEntry)(nil), (*core.BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupEntry_To_core_BackupEntry(a.(*BackupEntry), b.(*core.BackupEntry), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupEntry)(nil), (*BackupEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntry_To_v1beta1_BackupEntry(a.(*core.BackupEntry), b.(*BackupEntry), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupEntryList)(nil), (*core.BackupEntryList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupEntryList_To_core_BackupEntryList(a.(*BackupEntryList), b.(*core.BackupEntryList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupEntryList)(nil), (*BackupEntryList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntryList_To_v1beta1_BackupEntryList(a.(*core.BackupEntryList), b.(*BackupEntryList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupEntrySpec)(nil), (*core.BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(a.(*BackupEntrySpec), b.(*core.BackupEntrySpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupEntrySpec)(nil), (*BackupEntrySpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(a.(*core.BackupEntrySpec), b.(*BackupEntrySpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupEntryStatus)(nil), (*core.BackupEntryStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(a.(*BackupEntryStatus), b.(*core.BackupEntryStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.BackupEntryStatus)(nil), (*BackupEntryStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(a.(*core.BackupEntryStatus), b.(*BackupEntryStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CloudInfo)(nil), (*core.CloudInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CloudInfo_To_core_CloudInfo(a.(*CloudInfo), b.(*core.CloudInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CloudInfo)(nil), (*CloudInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CloudInfo_To_v1beta1_CloudInfo(a.(*core.CloudInfo), b.(*CloudInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CloudProfile)(nil), (*core.CloudProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CloudProfile_To_core_CloudProfile(a.(*CloudProfile), b.(*core.CloudProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CloudProfile)(nil), (*CloudProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CloudProfile_To_v1beta1_CloudProfile(a.(*core.CloudProfile), b.(*CloudProfile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CloudProfileList)(nil), (*core.CloudProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CloudProfileList_To_core_CloudProfileList(a.(*CloudProfileList), b.(*core.CloudProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CloudProfileList)(nil), (*CloudProfileList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CloudProfileList_To_v1beta1_CloudProfileList(a.(*core.CloudProfileList), b.(*CloudProfileList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CloudProfileSpec)(nil), (*core.CloudProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(a.(*CloudProfileSpec), b.(*core.CloudProfileSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CloudProfileSpec)(nil), (*CloudProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(a.(*core.CloudProfileSpec), b.(*CloudProfileSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterAutoscaler)(nil), (*core.ClusterAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler(a.(*ClusterAutoscaler), b.(*core.ClusterAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ClusterAutoscaler)(nil), (*ClusterAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(a.(*core.ClusterAutoscaler), b.(*ClusterAutoscaler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterInfo)(nil), (*core.ClusterInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClusterInfo_To_core_ClusterInfo(a.(*ClusterInfo), b.(*core.ClusterInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ClusterInfo)(nil), (*ClusterInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ClusterInfo_To_v1beta1_ClusterInfo(a.(*core.ClusterInfo), b.(*ClusterInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Condition)(nil), (*core.Condition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Condition_To_core_Condition(a.(*Condition), b.(*core.Condition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Condition)(nil), (*Condition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Condition_To_v1beta1_Condition(a.(*core.Condition), b.(*Condition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerDeployment)(nil), (*core.ControllerDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(a.(*ControllerDeployment), b.(*core.ControllerDeployment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerDeployment)(nil), (*ControllerDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerDeployment_To_v1beta1_ControllerDeployment(a.(*core.ControllerDeployment), b.(*ControllerDeployment), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallation)(nil), (*core.ControllerInstallation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControllerInstallation_To_core_ControllerInstallation(a.(*ControllerInstallation), b.(*core.ControllerInstallation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerInstallation)(nil), (*ControllerInstallation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerInstallation_To_v1beta1_ControllerInstallation(a.(*core.ControllerInstallation), b.(*ControllerInstallation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallationList)(nil), (*core.ControllerInstallationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList(a.(*ControllerInstallationList), b.(*core.ControllerInstallationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationList)(nil), (*ControllerInstallationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList(a.(*core.ControllerInstallationList), b.(*ControllerInstallationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallationSpec)(nil), (*core.ControllerInstallationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(a.(*ControllerInstallationSpec), b.(*core.ControllerInstallationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationSpec)(nil), (*ControllerInstallationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(a.(*core.ControllerInstallationSpec), b.(*ControllerInstallationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerInstallationStatus)(nil), (*core.ControllerInstallationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(a.(*ControllerInstallationStatus), b.(*core.ControllerInstallationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerInstallationStatus)(nil), (*ControllerInstallationStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(a.(*core.ControllerInstallationStatus), b.(*ControllerInstallationStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerRegistration)(nil), (*core.ControllerRegistration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControllerRegistration_To_core_ControllerRegistration(a.(*ControllerRegistration), b.(*core.ControllerRegistration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerRegistration)(nil), (*ControllerRegistration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerRegistration_To_v1beta1_ControllerRegistration(a.(*core.ControllerRegistration), b.(*ControllerRegistration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerRegistrationList)(nil), (*core.ControllerRegistrationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList(a.(*ControllerRegistrationList), b.(*core.ControllerRegistrationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerRegistrationList)(nil), (*ControllerRegistrationList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList(a.(*core.ControllerRegistrationList), b.(*ControllerRegistrationList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerRegistrationSpec)(nil), (*core.ControllerRegistrationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(a.(*ControllerRegistrationSpec), b.(*core.ControllerRegistrationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerRegistrationSpec)(nil), (*ControllerRegistrationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(a.(*core.ControllerRegistrationSpec), b.(*ControllerRegistrationSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControllerResource)(nil), (*core.ControllerResource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControllerResource_To_core_ControllerResource(a.(*ControllerResource), b.(*core.ControllerResource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ControllerResource)(nil), (*ControllerResource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ControllerResource_To_v1beta1_ControllerResource(a.(*core.ControllerResource), b.(*ControllerResource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DNS)(nil), (*core.DNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DNS_To_core_DNS(a.(*DNS), b.(*core.DNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DNS)(nil), (*DNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DNS_To_v1beta1_DNS(a.(*core.DNS), b.(*DNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DNSIncludeExclude)(nil), (*core.DNSIncludeExclude)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude(a.(*DNSIncludeExclude), b.(*core.DNSIncludeExclude), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DNSIncludeExclude)(nil), (*DNSIncludeExclude)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude(a.(*core.DNSIncludeExclude), b.(*DNSIncludeExclude), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DNSProvider)(nil), (*core.DNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DNSProvider_To_core_DNSProvider(a.(*DNSProvider), b.(*core.DNSProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DNSProvider)(nil), (*DNSProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DNSProvider_To_v1beta1_DNSProvider(a.(*core.DNSProvider), b.(*DNSProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Endpoint)(nil), (*core.Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Endpoint_To_core_Endpoint(a.(*Endpoint), b.(*core.Endpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Endpoint)(nil), (*Endpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Endpoint_To_v1beta1_Endpoint(a.(*core.Endpoint), b.(*Endpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExpirableVersion)(nil), (*core.ExpirableVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(a.(*ExpirableVersion), b.(*core.ExpirableVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ExpirableVersion)(nil), (*ExpirableVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(a.(*core.ExpirableVersion), b.(*ExpirableVersion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Extension)(nil), (*core.Extension)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Extension_To_core_Extension(a.(*Extension), b.(*core.Extension), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Extension)(nil), (*Extension)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Extension_To_v1beta1_Extension(a.(*core.Extension), b.(*Extension), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Gardener)(nil), (*core.Gardener)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Gardener_To_core_Gardener(a.(*Gardener), b.(*core.Gardener), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Gardener)(nil), (*Gardener)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Gardener_To_v1beta1_Gardener(a.(*core.Gardener), b.(*Gardener), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Hibernation)(nil), (*core.Hibernation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Hibernation_To_core_Hibernation(a.(*Hibernation), b.(*core.Hibernation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Hibernation)(nil), (*Hibernation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Hibernation_To_v1beta1_Hibernation(a.(*core.Hibernation), b.(*Hibernation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*HibernationSchedule)(nil), (*core.HibernationSchedule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_HibernationSchedule_To_core_HibernationSchedule(a.(*HibernationSchedule), b.(*core.HibernationSchedule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.HibernationSchedule)(nil), (*HibernationSchedule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HibernationSchedule_To_v1beta1_HibernationSchedule(a.(*core.HibernationSchedule), b.(*HibernationSchedule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*HorizontalPodAutoscalerConfig)(nil), (*core.HorizontalPodAutoscalerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(a.(*HorizontalPodAutoscalerConfig), b.(*core.HorizontalPodAutoscalerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.HorizontalPodAutoscalerConfig)(nil), (*HorizontalPodAutoscalerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig(a.(*core.HorizontalPodAutoscalerConfig), b.(*HorizontalPodAutoscalerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeAPIServerConfig)(nil), (*core.KubeAPIServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(a.(*KubeAPIServerConfig), b.(*core.KubeAPIServerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeAPIServerConfig)(nil), (*KubeAPIServerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(a.(*core.KubeAPIServerConfig), b.(*KubeAPIServerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeControllerManagerConfig)(nil), (*core.KubeControllerManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(a.(*KubeControllerManagerConfig), b.(*core.KubeControllerManagerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeControllerManagerConfig)(nil), (*KubeControllerManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig(a.(*core.KubeControllerManagerConfig), b.(*KubeControllerManagerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeProxyConfig)(nil), (*core.KubeProxyConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig(a.(*KubeProxyConfig), b.(*core.KubeProxyConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeProxyConfig)(nil), (*KubeProxyConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig(a.(*core.KubeProxyConfig), b.(*KubeProxyConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeSchedulerConfig)(nil), (*core.KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(a.(*KubeSchedulerConfig), b.(*core.KubeSchedulerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeSchedulerConfig)(nil), (*KubeSchedulerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig(a.(*core.KubeSchedulerConfig), b.(*KubeSchedulerConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeletConfig)(nil), (*core.KubeletConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletConfig_To_core_KubeletConfig(a.(*KubeletConfig), b.(*core.KubeletConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfig)(nil), (*KubeletConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfig_To_v1beta1_KubeletConfig(a.(*core.KubeletConfig), b.(*KubeletConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeletConfigEviction)(nil), (*core.KubeletConfigEviction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction(a.(*KubeletConfigEviction), b.(*core.KubeletConfigEviction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEviction)(nil), (*KubeletConfigEviction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction(a.(*core.KubeletConfigEviction), b.(*KubeletConfigEviction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeletConfigEvictionMinimumReclaim)(nil), (*core.KubeletConfigEvictionMinimumReclaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(a.(*KubeletConfigEvictionMinimumReclaim), b.(*core.KubeletConfigEvictionMinimumReclaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEvictionMinimumReclaim)(nil), (*KubeletConfigEvictionMinimumReclaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim(a.(*core.KubeletConfigEvictionMinimumReclaim), b.(*KubeletConfigEvictionMinimumReclaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubeletConfigEvictionSoftGracePeriod)(nil), (*core.KubeletConfigEvictionSoftGracePeriod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(a.(*KubeletConfigEvictionSoftGracePeriod), b.(*core.KubeletConfigEvictionSoftGracePeriod), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubeletConfigEvictionSoftGracePeriod)(nil), (*KubeletConfigEvictionSoftGracePeriod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod(a.(*core.KubeletConfigEvictionSoftGracePeriod), b.(*KubeletConfigEvictionSoftGracePeriod), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Kubernetes)(nil), (*core.Kubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Kubernetes_To_core_Kubernetes(a.(*Kubernetes), b.(*core.Kubernetes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Kubernetes)(nil), (*Kubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Kubernetes_To_v1beta1_Kubernetes(a.(*core.Kubernetes), b.(*Kubernetes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubernetesConfig)(nil), (*core.KubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(a.(*KubernetesConfig), b.(*core.KubernetesConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubernetesConfig)(nil), (*KubernetesConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(a.(*core.KubernetesConfig), b.(*KubernetesConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubernetesDashboard)(nil), (*core.KubernetesDashboard)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard(a.(*KubernetesDashboard), b.(*core.KubernetesDashboard), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubernetesDashboard)(nil), (*KubernetesDashboard)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard(a.(*core.KubernetesDashboard), b.(*KubernetesDashboard), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubernetesInfo)(nil), (*core.KubernetesInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(a.(*KubernetesInfo), b.(*core.KubernetesInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubernetesInfo)(nil), (*KubernetesInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(a.(*core.KubernetesInfo), b.(*KubernetesInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KubernetesSettings)(nil), (*core.KubernetesSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(a.(*KubernetesSettings), b.(*core.KubernetesSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KubernetesSettings)(nil), (*KubernetesSettings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(a.(*core.KubernetesSettings), b.(*KubernetesSettings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*LastError)(nil), (*core.LastError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_LastError_To_core_LastError(a.(*LastError), b.(*core.LastError), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LastError)(nil), (*LastError)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LastError_To_v1beta1_LastError(a.(*core.LastError), b.(*LastError), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*LastOperation)(nil), (*core.LastOperation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_LastOperation_To_core_LastOperation(a.(*LastOperation), b.(*core.LastOperation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LastOperation)(nil), (*LastOperation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LastOperation_To_v1beta1_LastOperation(a.(*core.LastOperation), b.(*LastOperation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Machine)(nil), (*core.Machine)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Machine_To_core_Machine(a.(*Machine), b.(*core.Machine), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Machine)(nil), (*Machine)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Machine_To_v1beta1_Machine(a.(*core.Machine), b.(*Machine), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MachineImage)(nil), (*core.MachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachineImage_To_core_MachineImage(a.(*MachineImage), b.(*core.MachineImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineImage)(nil), (*MachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineImage_To_v1beta1_MachineImage(a.(*core.MachineImage), b.(*MachineImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MachineType)(nil), (*core.MachineType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachineType_To_core_MachineType(a.(*MachineType), b.(*core.MachineType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineType)(nil), (*MachineType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineType_To_v1beta1_MachineType(a.(*core.MachineType), b.(*MachineType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MachineTypeStorage)(nil), (*core.MachineTypeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage(a.(*MachineTypeStorage), b.(*core.MachineTypeStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MachineTypeStorage)(nil), (*MachineTypeStorage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage(a.(*core.MachineTypeStorage), b.(*MachineTypeStorage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Maintenance)(nil), (*core.Maintenance)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Maintenance_To_core_Maintenance(a.(*Maintenance), b.(*core.Maintenance), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Maintenance)(nil), (*Maintenance)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Maintenance_To_v1beta1_Maintenance(a.(*core.Maintenance), b.(*Maintenance), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MaintenanceAutoUpdate)(nil), (*core.MaintenanceAutoUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(a.(*MaintenanceAutoUpdate), b.(*core.MaintenanceAutoUpdate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MaintenanceAutoUpdate)(nil), (*MaintenanceAutoUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate(a.(*core.MaintenanceAutoUpdate), b.(*MaintenanceAutoUpdate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MaintenanceTimeWindow)(nil), (*core.MaintenanceTimeWindow)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(a.(*MaintenanceTimeWindow), b.(*core.MaintenanceTimeWindow), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.MaintenanceTimeWindow)(nil), (*MaintenanceTimeWindow)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow(a.(*core.MaintenanceTimeWindow), b.(*MaintenanceTimeWindow), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Monitoring)(nil), (*core.Monitoring)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Monitoring_To_core_Monitoring(a.(*Monitoring), b.(*core.Monitoring), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Monitoring)(nil), (*Monitoring)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Monitoring_To_v1beta1_Monitoring(a.(*core.Monitoring), b.(*Monitoring), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Networking)(nil), (*core.Networking)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Networking_To_core_Networking(a.(*Networking), b.(*core.Networking), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Networking)(nil), (*Networking)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Networking_To_v1beta1_Networking(a.(*core.Networking), b.(*Networking), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NginxIngress)(nil), (*core.NginxIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NginxIngress_To_core_NginxIngress(a.(*NginxIngress), b.(*core.NginxIngress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NginxIngress)(nil), (*NginxIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NginxIngress_To_v1beta1_NginxIngress(a.(*core.NginxIngress), b.(*NginxIngress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OIDCConfig)(nil), (*core.OIDCConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_OIDCConfig_To_core_OIDCConfig(a.(*OIDCConfig), b.(*core.OIDCConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.OIDCConfig)(nil), (*OIDCConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_OIDCConfig_To_v1beta1_OIDCConfig(a.(*core.OIDCConfig), b.(*OIDCConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*OpenIDConnectClientAuthentication)(nil), (*core.OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(a.(*OpenIDConnectClientAuthentication), b.(*core.OpenIDConnectClientAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.OpenIDConnectClientAuthentication)(nil), (*OpenIDConnectClientAuthentication)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication(a.(*core.OpenIDConnectClientAuthentication), b.(*OpenIDConnectClientAuthentication), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Plant)(nil), (*core.Plant)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Plant_To_core_Plant(a.(*Plant), b.(*core.Plant), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Plant)(nil), (*Plant)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Plant_To_v1beta1_Plant(a.(*core.Plant), b.(*Plant), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PlantList)(nil), (*core.PlantList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PlantList_To_core_PlantList(a.(*PlantList), b.(*core.PlantList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PlantList)(nil), (*PlantList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PlantList_To_v1beta1_PlantList(a.(*core.PlantList), b.(*PlantList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PlantSpec)(nil), (*core.PlantSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PlantSpec_To_core_PlantSpec(a.(*PlantSpec), b.(*core.PlantSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PlantSpec)(nil), (*PlantSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PlantSpec_To_v1beta1_PlantSpec(a.(*core.PlantSpec), b.(*PlantSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PlantStatus)(nil), (*core.PlantStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PlantStatus_To_core_PlantStatus(a.(*PlantStatus), b.(*core.PlantStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PlantStatus)(nil), (*PlantStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PlantStatus_To_v1beta1_PlantStatus(a.(*core.PlantStatus), b.(*PlantStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Project)(nil), (*core.Project)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Project_To_core_Project(a.(*Project), b.(*core.Project), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Project)(nil), (*Project)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Project_To_v1beta1_Project(a.(*core.Project), b.(*Project), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProjectList)(nil), (*core.ProjectList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ProjectList_To_core_ProjectList(a.(*ProjectList), b.(*core.ProjectList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ProjectList)(nil), (*ProjectList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectList_To_v1beta1_ProjectList(a.(*core.ProjectList), b.(*ProjectList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProjectMember)(nil), (*core.ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ProjectMember_To_core_ProjectMember(a.(*ProjectMember), b.(*core.ProjectMember), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ProjectMember)(nil), (*ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectMember_To_v1beta1_ProjectMember(a.(*core.ProjectMember), b.(*ProjectMember), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProjectSpec)(nil), (*core.ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ProjectSpec_To_core_ProjectSpec(a.(*ProjectSpec), b.(*core.ProjectSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ProjectSpec)(nil), (*ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectSpec_To_v1beta1_ProjectSpec(a.(*core.ProjectSpec), b.(*ProjectSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProjectStatus)(nil), (*core.ProjectStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ProjectStatus_To_core_ProjectStatus(a.(*ProjectStatus), b.(*core.ProjectStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ProjectStatus)(nil), (*ProjectStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectStatus_To_v1beta1_ProjectStatus(a.(*core.ProjectStatus), b.(*ProjectStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Provider)(nil), (*core.Provider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Provider_To_core_Provider(a.(*Provider), b.(*core.Provider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Provider)(nil), (*Provider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Provider_To_v1beta1_Provider(a.(*core.Provider), b.(*Provider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProviderConfig)(nil), (*core.ProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ProviderConfig_To_core_ProviderConfig(a.(*ProviderConfig), b.(*core.ProviderConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ProviderConfig)(nil), (*ProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProviderConfig_To_v1beta1_ProviderConfig(a.(*core.ProviderConfig), b.(*ProviderConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Quota)(nil), (*core.Quota)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Quota_To_core_Quota(a.(*Quota), b.(*core.Quota), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Quota)(nil), (*Quota)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Quota_To_v1beta1_Quota(a.(*core.Quota), b.(*Quota), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*QuotaList)(nil), (*core.QuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_QuotaList_To_core_QuotaList(a.(*QuotaList), b.(*core.QuotaList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.QuotaList)(nil), (*QuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_QuotaList_To_v1beta1_QuotaList(a.(*core.QuotaList), b.(*QuotaList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*QuotaSpec)(nil), (*core.QuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_QuotaSpec_To_core_QuotaSpec(a.(*QuotaSpec), b.(*core.QuotaSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.QuotaSpec)(nil), (*QuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_QuotaSpec_To_v1beta1_QuotaSpec(a.(*core.QuotaSpec), b.(*QuotaSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Region)(nil), (*core.Region)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Region_To_core_Region(a.(*Region), b.(*core.Region), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Region)(nil), (*Region)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Region_To_v1beta1_Region(a.(*core.Region), b.(*Region), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SecretBinding)(nil), (*core.SecretBinding)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SecretBinding_To_core_SecretBinding(a.(*SecretBinding), b.(*core.SecretBinding), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretBinding)(nil), (*SecretBinding)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretBinding_To_v1beta1_SecretBinding(a.(*core.SecretBinding), b.(*SecretBinding), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SecretBindingList)(nil), (*core.SecretBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SecretBindingList_To_core_SecretBindingList(a.(*SecretBindingList), b.(*core.SecretBindingList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretBindingList)(nil), (*SecretBindingList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretBindingList_To_v1beta1_SecretBindingList(a.(*core.SecretBindingList), b.(*SecretBindingList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Seed)(nil), (*core.Seed)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Seed_To_core_Seed(a.(*Seed), b.(*core.Seed), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Seed)(nil), (*Seed)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Seed_To_v1beta1_Seed(a.(*core.Seed), b.(*Seed), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedBackup)(nil), (*core.SeedBackup)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedBackup_To_core_SeedBackup(a.(*SeedBackup), b.(*core.SeedBackup), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedBackup)(nil), (*SeedBackup)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedBackup_To_v1beta1_SeedBackup(a.(*core.SeedBackup), b.(*SeedBackup), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedDNS)(nil), (*core.SeedDNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedDNS_To_core_SeedDNS(a.(*SeedDNS), b.(*core.SeedDNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedDNS)(nil), (*SeedDNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedDNS_To_v1beta1_SeedDNS(a.(*core.SeedDNS), b.(*SeedDNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedList)(nil), (*core.SeedList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedList_To_core_SeedList(a.(*SeedList), b.(*core.SeedList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedList)(nil), (*SeedList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedList_To_v1beta1_SeedList(a.(*core.SeedList), b.(*SeedList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedNetworks)(nil), (*core.SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedNetworks_To_core_SeedNetworks(a.(*SeedNetworks), b.(*core.SeedNetworks), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedNetworks)(nil), (*SeedNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedNetworks_To_v1beta1_SeedNetworks(a.(*core.SeedNetworks), b.(*SeedNetworks), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedProvider)(nil), (*core.SeedProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedProvider_To_core_SeedProvider(a.(*SeedProvider), b.(*core.SeedProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedProvider)(nil), (*SeedProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedProvider_To_v1beta1_SeedProvider(a.(*core.SeedProvider), b.(*SeedProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedSpec)(nil), (*core.SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedSpec_To_core_SeedSpec(a.(*SeedSpec), b.(*core.SeedSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedSpec)(nil), (*SeedSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedSpec_To_v1beta1_SeedSpec(a.(*core.SeedSpec), b.(*SeedSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedStatus)(nil), (*core.SeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedStatus_To_core_SeedStatus(a.(*SeedStatus), b.(*core.SeedStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedStatus)(nil), (*SeedStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedStatus_To_v1beta1_SeedStatus(a.(*core.SeedStatus), b.(*SeedStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedTaint)(nil), (*core.SeedTaint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedTaint_To_core_SeedTaint(a.(*SeedTaint), b.(*core.SeedTaint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedTaint)(nil), (*SeedTaint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedTaint_To_v1beta1_SeedTaint(a.(*core.SeedTaint), b.(*SeedTaint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedVolume)(nil), (*core.SeedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedVolume_To_core_SeedVolume(a.(*SeedVolume), b.(*core.SeedVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedVolume)(nil), (*SeedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedVolume_To_v1beta1_SeedVolume(a.(*core.SeedVolume), b.(*SeedVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SeedVolumeProvider)(nil), (*core.SeedVolumeProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider(a.(*SeedVolumeProvider), b.(*core.SeedVolumeProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SeedVolumeProvider)(nil), (*SeedVolumeProvider)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider(a.(*core.SeedVolumeProvider), b.(*SeedVolumeProvider), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServiceAccountConfig)(nil), (*core.ServiceAccountConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig(a.(*ServiceAccountConfig), b.(*core.ServiceAccountConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServiceAccountConfig)(nil), (*ServiceAccountConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig(a.(*core.ServiceAccountConfig), b.(*ServiceAccountConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Shoot)(nil), (*core.Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Shoot_To_core_Shoot(a.(*Shoot), b.(*core.Shoot), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Shoot)(nil), (*Shoot)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Shoot_To_v1beta1_Shoot(a.(*core.Shoot), b.(*Shoot), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootList)(nil), (*core.ShootList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ShootList_To_core_ShootList(a.(*ShootList), b.(*core.ShootList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootList)(nil), (*ShootList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootList_To_v1beta1_ShootList(a.(*core.ShootList), b.(*ShootList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootMachineImage)(nil), (*core.ShootMachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(a.(*ShootMachineImage), b.(*core.ShootMachineImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootMachineImage)(nil), (*ShootMachineImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(a.(*core.ShootMachineImage), b.(*ShootMachineImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootNetworks)(nil), (*core.ShootNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ShootNetworks_To_core_ShootNetworks(a.(*ShootNetworks), b.(*core.ShootNetworks), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootNetworks)(nil), (*ShootNetworks)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootNetworks_To_v1beta1_ShootNetworks(a.(*core.ShootNetworks), b.(*ShootNetworks), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootSpec)(nil), (*core.ShootSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ShootSpec_To_core_ShootSpec(a.(*ShootSpec), b.(*core.ShootSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootSpec)(nil), (*ShootSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootSpec_To_v1beta1_ShootSpec(a.(*core.ShootSpec), b.(*ShootSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ShootStatus)(nil), (*core.ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ShootStatus_To_core_ShootStatus(a.(*ShootStatus), b.(*core.ShootStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ShootStatus)(nil), (*ShootStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ShootStatus_To_v1beta1_ShootStatus(a.(*core.ShootStatus), b.(*ShootStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Volume_To_core_Volume(a.(*Volume), b.(*core.Volume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Volume_To_v1beta1_Volume(a.(*core.Volume), b.(*Volume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*VolumeType)(nil), (*core.VolumeType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_VolumeType_To_core_VolumeType(a.(*VolumeType), b.(*core.VolumeType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VolumeType)(nil), (*VolumeType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeType_To_v1beta1_VolumeType(a.(*core.VolumeType), b.(*VolumeType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Worker)(nil), (*core.Worker)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Worker_To_core_Worker(a.(*Worker), b.(*core.Worker), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Worker)(nil), (*Worker)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Worker_To_v1beta1_Worker(a.(*core.Worker), b.(*Worker), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WorkerKubernetes)(nil), (*core.WorkerKubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes(a.(*WorkerKubernetes), b.(*core.WorkerKubernetes), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.WorkerKubernetes)(nil), (*WorkerKubernetes)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(a.(*core.WorkerKubernetes), b.(*WorkerKubernetes), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.ProjectMember)(nil), (*ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectMember_To_v1beta1_ProjectMember(a.(*core.ProjectMember), b.(*ProjectMember), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.ProjectSpec)(nil), (*ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectSpec_To_v1beta1_ProjectSpec(a.(*core.ProjectSpec), b.(*ProjectSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ProjectMember)(nil), (*core.ProjectMember)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ProjectMember_To_core_ProjectMember(a.(*ProjectMember), b.(*core.ProjectMember), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ProjectSpec)(nil), (*core.ProjectSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ProjectSpec_To_core_ProjectSpec(a.(*ProjectSpec), b.(*core.ProjectSpec), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_Addon_To_core_Addon(in *Addon, out *core.Addon, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_v1beta1_Addon_To_core_Addon is an autogenerated conversion function. +func Convert_v1beta1_Addon_To_core_Addon(in *Addon, out *core.Addon, s conversion.Scope) error { + return autoConvert_v1beta1_Addon_To_core_Addon(in, out, s) +} + +func autoConvert_core_Addon_To_v1beta1_Addon(in *core.Addon, out *Addon, s conversion.Scope) error { + out.Enabled = in.Enabled + return nil +} + +// Convert_core_Addon_To_v1beta1_Addon is an autogenerated conversion function. +func Convert_core_Addon_To_v1beta1_Addon(in *core.Addon, out *Addon, s conversion.Scope) error { + return autoConvert_core_Addon_To_v1beta1_Addon(in, out, s) +} + +func autoConvert_v1beta1_Addons_To_core_Addons(in *Addons, out *core.Addons, s conversion.Scope) error { + out.KubernetesDashboard = (*core.KubernetesDashboard)(unsafe.Pointer(in.KubernetesDashboard)) + out.NginxIngress = (*core.NginxIngress)(unsafe.Pointer(in.NginxIngress)) + return nil +} + +// Convert_v1beta1_Addons_To_core_Addons is an autogenerated conversion function. +func Convert_v1beta1_Addons_To_core_Addons(in *Addons, out *core.Addons, s conversion.Scope) error { + return autoConvert_v1beta1_Addons_To_core_Addons(in, out, s) +} + +func autoConvert_core_Addons_To_v1beta1_Addons(in *core.Addons, out *Addons, s conversion.Scope) error { + out.KubernetesDashboard = (*KubernetesDashboard)(unsafe.Pointer(in.KubernetesDashboard)) + out.NginxIngress = (*NginxIngress)(unsafe.Pointer(in.NginxIngress)) + return nil +} + +// Convert_core_Addons_To_v1beta1_Addons is an autogenerated conversion function. +func Convert_core_Addons_To_v1beta1_Addons(in *core.Addons, out *Addons, s conversion.Scope) error { + return autoConvert_core_Addons_To_v1beta1_Addons(in, out, s) +} + +func autoConvert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin, out *core.AdmissionPlugin, s conversion.Scope) error { + out.Name = in.Name + out.Config = (*core.ProviderConfig)(unsafe.Pointer(in.Config)) + return nil +} + +// Convert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin is an autogenerated conversion function. +func Convert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(in *AdmissionPlugin, out *core.AdmissionPlugin, s conversion.Scope) error { + return autoConvert_v1beta1_AdmissionPlugin_To_core_AdmissionPlugin(in, out, s) +} + +func autoConvert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin(in *core.AdmissionPlugin, out *AdmissionPlugin, s conversion.Scope) error { + out.Name = in.Name + out.Config = (*ProviderConfig)(unsafe.Pointer(in.Config)) + return nil +} + +// Convert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin is an autogenerated conversion function. +func Convert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin(in *core.AdmissionPlugin, out *AdmissionPlugin, s conversion.Scope) error { + return autoConvert_core_AdmissionPlugin_To_v1beta1_AdmissionPlugin(in, out, s) +} + +func autoConvert_v1beta1_Alerting_To_core_Alerting(in *Alerting, out *core.Alerting, s conversion.Scope) error { + out.EmailReceivers = *(*[]string)(unsafe.Pointer(&in.EmailReceivers)) + return nil +} + +// Convert_v1beta1_Alerting_To_core_Alerting is an autogenerated conversion function. +func Convert_v1beta1_Alerting_To_core_Alerting(in *Alerting, out *core.Alerting, s conversion.Scope) error { + return autoConvert_v1beta1_Alerting_To_core_Alerting(in, out, s) +} + +func autoConvert_core_Alerting_To_v1beta1_Alerting(in *core.Alerting, out *Alerting, s conversion.Scope) error { + out.EmailReceivers = *(*[]string)(unsafe.Pointer(&in.EmailReceivers)) + return nil +} + +// Convert_core_Alerting_To_v1beta1_Alerting is an autogenerated conversion function. +func Convert_core_Alerting_To_v1beta1_Alerting(in *core.Alerting, out *Alerting, s conversion.Scope) error { + return autoConvert_core_Alerting_To_v1beta1_Alerting(in, out, s) +} + +func autoConvert_v1beta1_AuditConfig_To_core_AuditConfig(in *AuditConfig, out *core.AuditConfig, s conversion.Scope) error { + out.AuditPolicy = (*core.AuditPolicy)(unsafe.Pointer(in.AuditPolicy)) + return nil +} + +// Convert_v1beta1_AuditConfig_To_core_AuditConfig is an autogenerated conversion function. +func Convert_v1beta1_AuditConfig_To_core_AuditConfig(in *AuditConfig, out *core.AuditConfig, s conversion.Scope) error { + return autoConvert_v1beta1_AuditConfig_To_core_AuditConfig(in, out, s) +} + +func autoConvert_core_AuditConfig_To_v1beta1_AuditConfig(in *core.AuditConfig, out *AuditConfig, s conversion.Scope) error { + out.AuditPolicy = (*AuditPolicy)(unsafe.Pointer(in.AuditPolicy)) + return nil +} + +// Convert_core_AuditConfig_To_v1beta1_AuditConfig is an autogenerated conversion function. +func Convert_core_AuditConfig_To_v1beta1_AuditConfig(in *core.AuditConfig, out *AuditConfig, s conversion.Scope) error { + return autoConvert_core_AuditConfig_To_v1beta1_AuditConfig(in, out, s) +} + +func autoConvert_v1beta1_AuditPolicy_To_core_AuditPolicy(in *AuditPolicy, out *core.AuditPolicy, s conversion.Scope) error { + out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_v1beta1_AuditPolicy_To_core_AuditPolicy is an autogenerated conversion function. +func Convert_v1beta1_AuditPolicy_To_core_AuditPolicy(in *AuditPolicy, out *core.AuditPolicy, s conversion.Scope) error { + return autoConvert_v1beta1_AuditPolicy_To_core_AuditPolicy(in, out, s) +} + +func autoConvert_core_AuditPolicy_To_v1beta1_AuditPolicy(in *core.AuditPolicy, out *AuditPolicy, s conversion.Scope) error { + out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_core_AuditPolicy_To_v1beta1_AuditPolicy is an autogenerated conversion function. +func Convert_core_AuditPolicy_To_v1beta1_AuditPolicy(in *core.AuditPolicy, out *AuditPolicy, s conversion.Scope) error { + return autoConvert_core_AuditPolicy_To_v1beta1_AuditPolicy(in, out, s) +} + +func autoConvert_v1beta1_AvailabilityZone_To_core_AvailabilityZone(in *AvailabilityZone, out *core.AvailabilityZone, s conversion.Scope) error { + out.Name = in.Name + out.UnavailableMachineTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableMachineTypes)) + out.UnavailableVolumeTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableVolumeTypes)) + return nil +} + +// Convert_v1beta1_AvailabilityZone_To_core_AvailabilityZone is an autogenerated conversion function. +func Convert_v1beta1_AvailabilityZone_To_core_AvailabilityZone(in *AvailabilityZone, out *core.AvailabilityZone, s conversion.Scope) error { + return autoConvert_v1beta1_AvailabilityZone_To_core_AvailabilityZone(in, out, s) +} + +func autoConvert_core_AvailabilityZone_To_v1beta1_AvailabilityZone(in *core.AvailabilityZone, out *AvailabilityZone, s conversion.Scope) error { + out.Name = in.Name + out.UnavailableMachineTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableMachineTypes)) + out.UnavailableVolumeTypes = *(*[]string)(unsafe.Pointer(&in.UnavailableVolumeTypes)) + return nil +} + +// Convert_core_AvailabilityZone_To_v1beta1_AvailabilityZone is an autogenerated conversion function. +func Convert_core_AvailabilityZone_To_v1beta1_AvailabilityZone(in *core.AvailabilityZone, out *AvailabilityZone, s conversion.Scope) error { + return autoConvert_core_AvailabilityZone_To_v1beta1_AvailabilityZone(in, out, s) +} + +func autoConvert_v1beta1_BackupBucket_To_core_BackupBucket(in *BackupBucket, out *core.BackupBucket, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_BackupBucket_To_core_BackupBucket is an autogenerated conversion function. +func Convert_v1beta1_BackupBucket_To_core_BackupBucket(in *BackupBucket, out *core.BackupBucket, s conversion.Scope) error { + return autoConvert_v1beta1_BackupBucket_To_core_BackupBucket(in, out, s) +} + +func autoConvert_core_BackupBucket_To_v1beta1_BackupBucket(in *core.BackupBucket, out *BackupBucket, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_BackupBucket_To_v1beta1_BackupBucket is an autogenerated conversion function. +func Convert_core_BackupBucket_To_v1beta1_BackupBucket(in *core.BackupBucket, out *BackupBucket, s conversion.Scope) error { + return autoConvert_core_BackupBucket_To_v1beta1_BackupBucket(in, out, s) +} + +func autoConvert_v1beta1_BackupBucketList_To_core_BackupBucketList(in *BackupBucketList, out *core.BackupBucketList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.BackupBucket)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_BackupBucketList_To_core_BackupBucketList is an autogenerated conversion function. +func Convert_v1beta1_BackupBucketList_To_core_BackupBucketList(in *BackupBucketList, out *core.BackupBucketList, s conversion.Scope) error { + return autoConvert_v1beta1_BackupBucketList_To_core_BackupBucketList(in, out, s) +} + +func autoConvert_core_BackupBucketList_To_v1beta1_BackupBucketList(in *core.BackupBucketList, out *BackupBucketList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]BackupBucket)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_BackupBucketList_To_v1beta1_BackupBucketList is an autogenerated conversion function. +func Convert_core_BackupBucketList_To_v1beta1_BackupBucketList(in *core.BackupBucketList, out *BackupBucketList, s conversion.Scope) error { + return autoConvert_core_BackupBucketList_To_v1beta1_BackupBucketList(in, out, s) +} + +func autoConvert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(in *BackupBucketProvider, out *core.BackupBucketProvider, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider is an autogenerated conversion function. +func Convert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(in *BackupBucketProvider, out *core.BackupBucketProvider, s conversion.Scope) error { + return autoConvert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(in, out, s) +} + +func autoConvert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(in *core.BackupBucketProvider, out *BackupBucketProvider, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider is an autogenerated conversion function. +func Convert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(in *core.BackupBucketProvider, out *BackupBucketProvider, s conversion.Scope) error { + return autoConvert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(in, out, s) +} + +func autoConvert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(in *BackupBucketSpec, out *core.BackupBucketSpec, s conversion.Scope) error { + if err := Convert_v1beta1_BackupBucketProvider_To_core_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.SecretRef = in.SecretRef + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + return nil +} + +// Convert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec is an autogenerated conversion function. +func Convert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(in *BackupBucketSpec, out *core.BackupBucketSpec, s conversion.Scope) error { + return autoConvert_v1beta1_BackupBucketSpec_To_core_BackupBucketSpec(in, out, s) +} + +func autoConvert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(in *core.BackupBucketSpec, out *BackupBucketSpec, s conversion.Scope) error { + if err := Convert_core_BackupBucketProvider_To_v1beta1_BackupBucketProvider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.SecretRef = in.SecretRef + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + return nil +} + +// Convert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec is an autogenerated conversion function. +func Convert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(in *core.BackupBucketSpec, out *BackupBucketSpec, s conversion.Scope) error { + return autoConvert_core_BackupBucketSpec_To_v1beta1_BackupBucketSpec(in, out, s) +} + +func autoConvert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBucketStatus, out *core.BackupBucketStatus, s conversion.Scope) error { + out.ProviderStatus = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderStatus)) + out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastError = (*core.LastError)(unsafe.Pointer(in.LastError)) + out.ObservedGeneration = in.ObservedGeneration + out.GeneratedSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.GeneratedSecretRef)) + return nil +} + +// Convert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus is an autogenerated conversion function. +func Convert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(in *BackupBucketStatus, out *core.BackupBucketStatus, s conversion.Scope) error { + return autoConvert_v1beta1_BackupBucketStatus_To_core_BackupBucketStatus(in, out, s) +} + +func autoConvert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(in *core.BackupBucketStatus, out *BackupBucketStatus, s conversion.Scope) error { + out.ProviderStatus = (*ProviderConfig)(unsafe.Pointer(in.ProviderStatus)) + out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastError = (*LastError)(unsafe.Pointer(in.LastError)) + out.ObservedGeneration = in.ObservedGeneration + out.GeneratedSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.GeneratedSecretRef)) + return nil +} + +// Convert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus is an autogenerated conversion function. +func Convert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(in *core.BackupBucketStatus, out *BackupBucketStatus, s conversion.Scope) error { + return autoConvert_core_BackupBucketStatus_To_v1beta1_BackupBucketStatus(in, out, s) +} + +func autoConvert_v1beta1_BackupEntry_To_core_BackupEntry(in *BackupEntry, out *core.BackupEntry, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_BackupEntry_To_core_BackupEntry is an autogenerated conversion function. +func Convert_v1beta1_BackupEntry_To_core_BackupEntry(in *BackupEntry, out *core.BackupEntry, s conversion.Scope) error { + return autoConvert_v1beta1_BackupEntry_To_core_BackupEntry(in, out, s) +} + +func autoConvert_core_BackupEntry_To_v1beta1_BackupEntry(in *core.BackupEntry, out *BackupEntry, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_BackupEntry_To_v1beta1_BackupEntry is an autogenerated conversion function. +func Convert_core_BackupEntry_To_v1beta1_BackupEntry(in *core.BackupEntry, out *BackupEntry, s conversion.Scope) error { + return autoConvert_core_BackupEntry_To_v1beta1_BackupEntry(in, out, s) +} + +func autoConvert_v1beta1_BackupEntryList_To_core_BackupEntryList(in *BackupEntryList, out *core.BackupEntryList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.BackupEntry)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_BackupEntryList_To_core_BackupEntryList is an autogenerated conversion function. +func Convert_v1beta1_BackupEntryList_To_core_BackupEntryList(in *BackupEntryList, out *core.BackupEntryList, s conversion.Scope) error { + return autoConvert_v1beta1_BackupEntryList_To_core_BackupEntryList(in, out, s) +} + +func autoConvert_core_BackupEntryList_To_v1beta1_BackupEntryList(in *core.BackupEntryList, out *BackupEntryList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]BackupEntry)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_BackupEntryList_To_v1beta1_BackupEntryList is an autogenerated conversion function. +func Convert_core_BackupEntryList_To_v1beta1_BackupEntryList(in *core.BackupEntryList, out *BackupEntryList, s conversion.Scope) error { + return autoConvert_core_BackupEntryList_To_v1beta1_BackupEntryList(in, out, s) +} + +func autoConvert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(in *BackupEntrySpec, out *core.BackupEntrySpec, s conversion.Scope) error { + out.BucketName = in.BucketName + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + return nil +} + +// Convert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec is an autogenerated conversion function. +func Convert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(in *BackupEntrySpec, out *core.BackupEntrySpec, s conversion.Scope) error { + return autoConvert_v1beta1_BackupEntrySpec_To_core_BackupEntrySpec(in, out, s) +} + +func autoConvert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(in *core.BackupEntrySpec, out *BackupEntrySpec, s conversion.Scope) error { + out.BucketName = in.BucketName + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + return nil +} + +// Convert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec is an autogenerated conversion function. +func Convert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(in *core.BackupEntrySpec, out *BackupEntrySpec, s conversion.Scope) error { + return autoConvert_core_BackupEntrySpec_To_v1beta1_BackupEntrySpec(in, out, s) +} + +func autoConvert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(in *BackupEntryStatus, out *core.BackupEntryStatus, s conversion.Scope) error { + out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastError = (*core.LastError)(unsafe.Pointer(in.LastError)) + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +// Convert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus is an autogenerated conversion function. +func Convert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(in *BackupEntryStatus, out *core.BackupEntryStatus, s conversion.Scope) error { + return autoConvert_v1beta1_BackupEntryStatus_To_core_BackupEntryStatus(in, out, s) +} + +func autoConvert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(in *core.BackupEntryStatus, out *BackupEntryStatus, s conversion.Scope) error { + out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastError = (*LastError)(unsafe.Pointer(in.LastError)) + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +// Convert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus is an autogenerated conversion function. +func Convert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(in *core.BackupEntryStatus, out *BackupEntryStatus, s conversion.Scope) error { + return autoConvert_core_BackupEntryStatus_To_v1beta1_BackupEntryStatus(in, out, s) +} + +func autoConvert_v1beta1_CloudInfo_To_core_CloudInfo(in *CloudInfo, out *core.CloudInfo, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_v1beta1_CloudInfo_To_core_CloudInfo is an autogenerated conversion function. +func Convert_v1beta1_CloudInfo_To_core_CloudInfo(in *CloudInfo, out *core.CloudInfo, s conversion.Scope) error { + return autoConvert_v1beta1_CloudInfo_To_core_CloudInfo(in, out, s) +} + +func autoConvert_core_CloudInfo_To_v1beta1_CloudInfo(in *core.CloudInfo, out *CloudInfo, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_core_CloudInfo_To_v1beta1_CloudInfo is an autogenerated conversion function. +func Convert_core_CloudInfo_To_v1beta1_CloudInfo(in *core.CloudInfo, out *CloudInfo, s conversion.Scope) error { + return autoConvert_core_CloudInfo_To_v1beta1_CloudInfo(in, out, s) +} + +func autoConvert_v1beta1_CloudProfile_To_core_CloudProfile(in *CloudProfile, out *core.CloudProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_CloudProfile_To_core_CloudProfile is an autogenerated conversion function. +func Convert_v1beta1_CloudProfile_To_core_CloudProfile(in *CloudProfile, out *core.CloudProfile, s conversion.Scope) error { + return autoConvert_v1beta1_CloudProfile_To_core_CloudProfile(in, out, s) +} + +func autoConvert_core_CloudProfile_To_v1beta1_CloudProfile(in *core.CloudProfile, out *CloudProfile, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_CloudProfile_To_v1beta1_CloudProfile is an autogenerated conversion function. +func Convert_core_CloudProfile_To_v1beta1_CloudProfile(in *core.CloudProfile, out *CloudProfile, s conversion.Scope) error { + return autoConvert_core_CloudProfile_To_v1beta1_CloudProfile(in, out, s) +} + +func autoConvert_v1beta1_CloudProfileList_To_core_CloudProfileList(in *CloudProfileList, out *core.CloudProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.CloudProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_CloudProfileList_To_core_CloudProfileList is an autogenerated conversion function. +func Convert_v1beta1_CloudProfileList_To_core_CloudProfileList(in *CloudProfileList, out *core.CloudProfileList, s conversion.Scope) error { + return autoConvert_v1beta1_CloudProfileList_To_core_CloudProfileList(in, out, s) +} + +func autoConvert_core_CloudProfileList_To_v1beta1_CloudProfileList(in *core.CloudProfileList, out *CloudProfileList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]CloudProfile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_CloudProfileList_To_v1beta1_CloudProfileList is an autogenerated conversion function. +func Convert_core_CloudProfileList_To_v1beta1_CloudProfileList(in *core.CloudProfileList, out *CloudProfileList, s conversion.Scope) error { + return autoConvert_core_CloudProfileList_To_v1beta1_CloudProfileList(in, out, s) +} + +func autoConvert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(in *CloudProfileSpec, out *core.CloudProfileSpec, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + if err := Convert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + out.MachineImages = *(*[]core.MachineImage)(unsafe.Pointer(&in.MachineImages)) + out.MachineTypes = *(*[]core.MachineType)(unsafe.Pointer(&in.MachineTypes)) + out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Regions = *(*[]core.Region)(unsafe.Pointer(&in.Regions)) + out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) + out.Type = in.Type + out.VolumeTypes = *(*[]core.VolumeType)(unsafe.Pointer(&in.VolumeTypes)) + return nil +} + +// Convert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec is an autogenerated conversion function. +func Convert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(in *CloudProfileSpec, out *core.CloudProfileSpec, s conversion.Scope) error { + return autoConvert_v1beta1_CloudProfileSpec_To_core_CloudProfileSpec(in, out, s) +} + +func autoConvert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(in *core.CloudProfileSpec, out *CloudProfileSpec, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + if err := Convert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + out.MachineImages = *(*[]MachineImage)(unsafe.Pointer(&in.MachineImages)) + out.MachineTypes = *(*[]MachineType)(unsafe.Pointer(&in.MachineTypes)) + out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Regions = *(*[]Region)(unsafe.Pointer(&in.Regions)) + out.SeedSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.SeedSelector)) + out.Type = in.Type + out.VolumeTypes = *(*[]VolumeType)(unsafe.Pointer(&in.VolumeTypes)) + return nil +} + +// Convert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec is an autogenerated conversion function. +func Convert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(in *core.CloudProfileSpec, out *CloudProfileSpec, s conversion.Scope) error { + return autoConvert_core_CloudProfileSpec_To_v1beta1_CloudProfileSpec(in, out, s) +} + +func autoConvert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler(in *ClusterAutoscaler, out *core.ClusterAutoscaler, s conversion.Scope) error { + out.ScaleDownDelayAfterAdd = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterAdd)) + out.ScaleDownDelayAfterDelete = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterDelete)) + out.ScaleDownDelayAfterFailure = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterFailure)) + out.ScaleDownUnneededTime = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownUnneededTime)) + out.ScaleDownUtilizationThreshold = (*float64)(unsafe.Pointer(in.ScaleDownUtilizationThreshold)) + out.ScanInterval = (*metav1.Duration)(unsafe.Pointer(in.ScanInterval)) + return nil +} + +// Convert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler is an autogenerated conversion function. +func Convert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler(in *ClusterAutoscaler, out *core.ClusterAutoscaler, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler(in, out, s) +} + +func autoConvert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(in *core.ClusterAutoscaler, out *ClusterAutoscaler, s conversion.Scope) error { + out.ScaleDownDelayAfterAdd = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterAdd)) + out.ScaleDownDelayAfterDelete = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterDelete)) + out.ScaleDownDelayAfterFailure = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownDelayAfterFailure)) + out.ScaleDownUnneededTime = (*metav1.Duration)(unsafe.Pointer(in.ScaleDownUnneededTime)) + out.ScaleDownUtilizationThreshold = (*float64)(unsafe.Pointer(in.ScaleDownUtilizationThreshold)) + out.ScanInterval = (*metav1.Duration)(unsafe.Pointer(in.ScanInterval)) + return nil +} + +// Convert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler is an autogenerated conversion function. +func Convert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(in *core.ClusterAutoscaler, out *ClusterAutoscaler, s conversion.Scope) error { + return autoConvert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(in, out, s) +} + +func autoConvert_v1beta1_ClusterInfo_To_core_ClusterInfo(in *ClusterInfo, out *core.ClusterInfo, s conversion.Scope) error { + if err := Convert_v1beta1_CloudInfo_To_core_CloudInfo(&in.Cloud, &out.Cloud, s); err != nil { + return err + } + if err := Convert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_ClusterInfo_To_core_ClusterInfo is an autogenerated conversion function. +func Convert_v1beta1_ClusterInfo_To_core_ClusterInfo(in *ClusterInfo, out *core.ClusterInfo, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterInfo_To_core_ClusterInfo(in, out, s) +} + +func autoConvert_core_ClusterInfo_To_v1beta1_ClusterInfo(in *core.ClusterInfo, out *ClusterInfo, s conversion.Scope) error { + if err := Convert_core_CloudInfo_To_v1beta1_CloudInfo(&in.Cloud, &out.Cloud, s); err != nil { + return err + } + if err := Convert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + return nil +} + +// Convert_core_ClusterInfo_To_v1beta1_ClusterInfo is an autogenerated conversion function. +func Convert_core_ClusterInfo_To_v1beta1_ClusterInfo(in *core.ClusterInfo, out *ClusterInfo, s conversion.Scope) error { + return autoConvert_core_ClusterInfo_To_v1beta1_ClusterInfo(in, out, s) +} + +func autoConvert_v1beta1_Condition_To_core_Condition(in *Condition, out *core.Condition, s conversion.Scope) error { + out.Type = core.ConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.LastUpdateTime = in.LastUpdateTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1beta1_Condition_To_core_Condition is an autogenerated conversion function. +func Convert_v1beta1_Condition_To_core_Condition(in *Condition, out *core.Condition, s conversion.Scope) error { + return autoConvert_v1beta1_Condition_To_core_Condition(in, out, s) +} + +func autoConvert_core_Condition_To_v1beta1_Condition(in *core.Condition, out *Condition, s conversion.Scope) error { + out.Type = ConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.LastUpdateTime = in.LastUpdateTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_Condition_To_v1beta1_Condition is an autogenerated conversion function. +func Convert_core_Condition_To_v1beta1_Condition(in *core.Condition, out *Condition, s conversion.Scope) error { + return autoConvert_core_Condition_To_v1beta1_Condition(in, out, s) +} + +func autoConvert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(in *ControllerDeployment, out *core.ControllerDeployment, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_v1beta1_ControllerDeployment_To_core_ControllerDeployment is an autogenerated conversion function. +func Convert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(in *ControllerDeployment, out *core.ControllerDeployment, s conversion.Scope) error { + return autoConvert_v1beta1_ControllerDeployment_To_core_ControllerDeployment(in, out, s) +} + +func autoConvert_core_ControllerDeployment_To_v1beta1_ControllerDeployment(in *core.ControllerDeployment, out *ControllerDeployment, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_core_ControllerDeployment_To_v1beta1_ControllerDeployment is an autogenerated conversion function. +func Convert_core_ControllerDeployment_To_v1beta1_ControllerDeployment(in *core.ControllerDeployment, out *ControllerDeployment, s conversion.Scope) error { + return autoConvert_core_ControllerDeployment_To_v1beta1_ControllerDeployment(in, out, s) +} + +func autoConvert_v1beta1_ControllerInstallation_To_core_ControllerInstallation(in *ControllerInstallation, out *core.ControllerInstallation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_ControllerInstallation_To_core_ControllerInstallation is an autogenerated conversion function. +func Convert_v1beta1_ControllerInstallation_To_core_ControllerInstallation(in *ControllerInstallation, out *core.ControllerInstallation, s conversion.Scope) error { + return autoConvert_v1beta1_ControllerInstallation_To_core_ControllerInstallation(in, out, s) +} + +func autoConvert_core_ControllerInstallation_To_v1beta1_ControllerInstallation(in *core.ControllerInstallation, out *ControllerInstallation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ControllerInstallation_To_v1beta1_ControllerInstallation is an autogenerated conversion function. +func Convert_core_ControllerInstallation_To_v1beta1_ControllerInstallation(in *core.ControllerInstallation, out *ControllerInstallation, s conversion.Scope) error { + return autoConvert_core_ControllerInstallation_To_v1beta1_ControllerInstallation(in, out, s) +} + +func autoConvert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList(in *ControllerInstallationList, out *core.ControllerInstallationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ControllerInstallation)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList is an autogenerated conversion function. +func Convert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList(in *ControllerInstallationList, out *core.ControllerInstallationList, s conversion.Scope) error { + return autoConvert_v1beta1_ControllerInstallationList_To_core_ControllerInstallationList(in, out, s) +} + +func autoConvert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList(in *core.ControllerInstallationList, out *ControllerInstallationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ControllerInstallation)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList is an autogenerated conversion function. +func Convert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList(in *core.ControllerInstallationList, out *ControllerInstallationList, s conversion.Scope) error { + return autoConvert_core_ControllerInstallationList_To_v1beta1_ControllerInstallationList(in, out, s) +} + +func autoConvert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in *ControllerInstallationSpec, out *core.ControllerInstallationSpec, s conversion.Scope) error { + out.RegistrationRef = in.RegistrationRef + out.SeedRef = in.SeedRef + return nil +} + +// Convert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec is an autogenerated conversion function. +func Convert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in *ControllerInstallationSpec, out *core.ControllerInstallationSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ControllerInstallationSpec_To_core_ControllerInstallationSpec(in, out, s) +} + +func autoConvert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(in *core.ControllerInstallationSpec, out *ControllerInstallationSpec, s conversion.Scope) error { + out.RegistrationRef = in.RegistrationRef + out.SeedRef = in.SeedRef + return nil +} + +// Convert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec is an autogenerated conversion function. +func Convert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(in *core.ControllerInstallationSpec, out *ControllerInstallationSpec, s conversion.Scope) error { + return autoConvert_core_ControllerInstallationSpec_To_v1beta1_ControllerInstallationSpec(in, out, s) +} + +func autoConvert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in *ControllerInstallationStatus, out *core.ControllerInstallationStatus, s conversion.Scope) error { + out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) + out.ProviderStatus = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderStatus)) + return nil +} + +// Convert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus is an autogenerated conversion function. +func Convert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in *ControllerInstallationStatus, out *core.ControllerInstallationStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ControllerInstallationStatus_To_core_ControllerInstallationStatus(in, out, s) +} + +func autoConvert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(in *core.ControllerInstallationStatus, out *ControllerInstallationStatus, s conversion.Scope) error { + out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) + out.ProviderStatus = (*ProviderConfig)(unsafe.Pointer(in.ProviderStatus)) + return nil +} + +// Convert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus is an autogenerated conversion function. +func Convert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(in *core.ControllerInstallationStatus, out *ControllerInstallationStatus, s conversion.Scope) error { + return autoConvert_core_ControllerInstallationStatus_To_v1beta1_ControllerInstallationStatus(in, out, s) +} + +func autoConvert_v1beta1_ControllerRegistration_To_core_ControllerRegistration(in *ControllerRegistration, out *core.ControllerRegistration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_ControllerRegistration_To_core_ControllerRegistration is an autogenerated conversion function. +func Convert_v1beta1_ControllerRegistration_To_core_ControllerRegistration(in *ControllerRegistration, out *core.ControllerRegistration, s conversion.Scope) error { + return autoConvert_v1beta1_ControllerRegistration_To_core_ControllerRegistration(in, out, s) +} + +func autoConvert_core_ControllerRegistration_To_v1beta1_ControllerRegistration(in *core.ControllerRegistration, out *ControllerRegistration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_ControllerRegistration_To_v1beta1_ControllerRegistration is an autogenerated conversion function. +func Convert_core_ControllerRegistration_To_v1beta1_ControllerRegistration(in *core.ControllerRegistration, out *ControllerRegistration, s conversion.Scope) error { + return autoConvert_core_ControllerRegistration_To_v1beta1_ControllerRegistration(in, out, s) +} + +func autoConvert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList(in *ControllerRegistrationList, out *core.ControllerRegistrationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ControllerRegistration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList is an autogenerated conversion function. +func Convert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList(in *ControllerRegistrationList, out *core.ControllerRegistrationList, s conversion.Scope) error { + return autoConvert_v1beta1_ControllerRegistrationList_To_core_ControllerRegistrationList(in, out, s) +} + +func autoConvert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList(in *core.ControllerRegistrationList, out *ControllerRegistrationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]ControllerRegistration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList is an autogenerated conversion function. +func Convert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList(in *core.ControllerRegistrationList, out *ControllerRegistrationList, s conversion.Scope) error { + return autoConvert_core_ControllerRegistrationList_To_v1beta1_ControllerRegistrationList(in, out, s) +} + +func autoConvert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in *ControllerRegistrationSpec, out *core.ControllerRegistrationSpec, s conversion.Scope) error { + out.Resources = *(*[]core.ControllerResource)(unsafe.Pointer(&in.Resources)) + out.Deployment = (*core.ControllerDeployment)(unsafe.Pointer(in.Deployment)) + return nil +} + +// Convert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec is an autogenerated conversion function. +func Convert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in *ControllerRegistrationSpec, out *core.ControllerRegistrationSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ControllerRegistrationSpec_To_core_ControllerRegistrationSpec(in, out, s) +} + +func autoConvert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(in *core.ControllerRegistrationSpec, out *ControllerRegistrationSpec, s conversion.Scope) error { + out.Resources = *(*[]ControllerResource)(unsafe.Pointer(&in.Resources)) + out.Deployment = (*ControllerDeployment)(unsafe.Pointer(in.Deployment)) + return nil +} + +// Convert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec is an autogenerated conversion function. +func Convert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(in *core.ControllerRegistrationSpec, out *ControllerRegistrationSpec, s conversion.Scope) error { + return autoConvert_core_ControllerRegistrationSpec_To_v1beta1_ControllerRegistrationSpec(in, out, s) +} + +func autoConvert_v1beta1_ControllerResource_To_core_ControllerResource(in *ControllerResource, out *core.ControllerResource, s conversion.Scope) error { + out.Kind = in.Kind + out.Type = in.Type + out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled)) + out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout)) + return nil +} + +// Convert_v1beta1_ControllerResource_To_core_ControllerResource is an autogenerated conversion function. +func Convert_v1beta1_ControllerResource_To_core_ControllerResource(in *ControllerResource, out *core.ControllerResource, s conversion.Scope) error { + return autoConvert_v1beta1_ControllerResource_To_core_ControllerResource(in, out, s) +} + +func autoConvert_core_ControllerResource_To_v1beta1_ControllerResource(in *core.ControllerResource, out *ControllerResource, s conversion.Scope) error { + out.Kind = in.Kind + out.Type = in.Type + out.GloballyEnabled = (*bool)(unsafe.Pointer(in.GloballyEnabled)) + out.ReconcileTimeout = (*metav1.Duration)(unsafe.Pointer(in.ReconcileTimeout)) + return nil +} + +// Convert_core_ControllerResource_To_v1beta1_ControllerResource is an autogenerated conversion function. +func Convert_core_ControllerResource_To_v1beta1_ControllerResource(in *core.ControllerResource, out *ControllerResource, s conversion.Scope) error { + return autoConvert_core_ControllerResource_To_v1beta1_ControllerResource(in, out, s) +} + +func autoConvert_v1beta1_DNS_To_core_DNS(in *DNS, out *core.DNS, s conversion.Scope) error { + out.Domain = (*string)(unsafe.Pointer(in.Domain)) + out.Providers = *(*[]core.DNSProvider)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_v1beta1_DNS_To_core_DNS is an autogenerated conversion function. +func Convert_v1beta1_DNS_To_core_DNS(in *DNS, out *core.DNS, s conversion.Scope) error { + return autoConvert_v1beta1_DNS_To_core_DNS(in, out, s) +} + +func autoConvert_core_DNS_To_v1beta1_DNS(in *core.DNS, out *DNS, s conversion.Scope) error { + out.Domain = (*string)(unsafe.Pointer(in.Domain)) + out.Providers = *(*[]DNSProvider)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_core_DNS_To_v1beta1_DNS is an autogenerated conversion function. +func Convert_core_DNS_To_v1beta1_DNS(in *core.DNS, out *DNS, s conversion.Scope) error { + return autoConvert_core_DNS_To_v1beta1_DNS(in, out, s) +} + +func autoConvert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude(in *DNSIncludeExclude, out *core.DNSIncludeExclude, s conversion.Scope) error { + out.Include = *(*[]string)(unsafe.Pointer(&in.Include)) + out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude)) + return nil +} + +// Convert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude is an autogenerated conversion function. +func Convert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude(in *DNSIncludeExclude, out *core.DNSIncludeExclude, s conversion.Scope) error { + return autoConvert_v1beta1_DNSIncludeExclude_To_core_DNSIncludeExclude(in, out, s) +} + +func autoConvert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude(in *core.DNSIncludeExclude, out *DNSIncludeExclude, s conversion.Scope) error { + out.Include = *(*[]string)(unsafe.Pointer(&in.Include)) + out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude)) + return nil +} + +// Convert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude is an autogenerated conversion function. +func Convert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude(in *core.DNSIncludeExclude, out *DNSIncludeExclude, s conversion.Scope) error { + return autoConvert_core_DNSIncludeExclude_To_v1beta1_DNSIncludeExclude(in, out, s) +} + +func autoConvert_v1beta1_DNSProvider_To_core_DNSProvider(in *DNSProvider, out *core.DNSProvider, s conversion.Scope) error { + out.Domains = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Domains)) + out.Primary = (*bool)(unsafe.Pointer(in.Primary)) + out.SecretName = (*string)(unsafe.Pointer(in.SecretName)) + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.Zones = (*core.DNSIncludeExclude)(unsafe.Pointer(in.Zones)) + return nil +} + +// Convert_v1beta1_DNSProvider_To_core_DNSProvider is an autogenerated conversion function. +func Convert_v1beta1_DNSProvider_To_core_DNSProvider(in *DNSProvider, out *core.DNSProvider, s conversion.Scope) error { + return autoConvert_v1beta1_DNSProvider_To_core_DNSProvider(in, out, s) +} + +func autoConvert_core_DNSProvider_To_v1beta1_DNSProvider(in *core.DNSProvider, out *DNSProvider, s conversion.Scope) error { + out.Domains = (*DNSIncludeExclude)(unsafe.Pointer(in.Domains)) + out.Primary = (*bool)(unsafe.Pointer(in.Primary)) + out.SecretName = (*string)(unsafe.Pointer(in.SecretName)) + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.Zones = (*DNSIncludeExclude)(unsafe.Pointer(in.Zones)) + return nil +} + +// Convert_core_DNSProvider_To_v1beta1_DNSProvider is an autogenerated conversion function. +func Convert_core_DNSProvider_To_v1beta1_DNSProvider(in *core.DNSProvider, out *DNSProvider, s conversion.Scope) error { + return autoConvert_core_DNSProvider_To_v1beta1_DNSProvider(in, out, s) +} + +func autoConvert_v1beta1_Endpoint_To_core_Endpoint(in *Endpoint, out *core.Endpoint, s conversion.Scope) error { + out.Name = in.Name + out.URL = in.URL + out.Purpose = in.Purpose + return nil +} + +// Convert_v1beta1_Endpoint_To_core_Endpoint is an autogenerated conversion function. +func Convert_v1beta1_Endpoint_To_core_Endpoint(in *Endpoint, out *core.Endpoint, s conversion.Scope) error { + return autoConvert_v1beta1_Endpoint_To_core_Endpoint(in, out, s) +} + +func autoConvert_core_Endpoint_To_v1beta1_Endpoint(in *core.Endpoint, out *Endpoint, s conversion.Scope) error { + out.Name = in.Name + out.URL = in.URL + out.Purpose = in.Purpose + return nil +} + +// Convert_core_Endpoint_To_v1beta1_Endpoint is an autogenerated conversion function. +func Convert_core_Endpoint_To_v1beta1_Endpoint(in *core.Endpoint, out *Endpoint, s conversion.Scope) error { + return autoConvert_core_Endpoint_To_v1beta1_Endpoint(in, out, s) +} + +func autoConvert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(in *ExpirableVersion, out *core.ExpirableVersion, s conversion.Scope) error { + out.Version = in.Version + out.ExpirationDate = (*metav1.Time)(unsafe.Pointer(in.ExpirationDate)) + return nil +} + +// Convert_v1beta1_ExpirableVersion_To_core_ExpirableVersion is an autogenerated conversion function. +func Convert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(in *ExpirableVersion, out *core.ExpirableVersion, s conversion.Scope) error { + return autoConvert_v1beta1_ExpirableVersion_To_core_ExpirableVersion(in, out, s) +} + +func autoConvert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(in *core.ExpirableVersion, out *ExpirableVersion, s conversion.Scope) error { + out.Version = in.Version + out.ExpirationDate = (*metav1.Time)(unsafe.Pointer(in.ExpirationDate)) + return nil +} + +// Convert_core_ExpirableVersion_To_v1beta1_ExpirableVersion is an autogenerated conversion function. +func Convert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(in *core.ExpirableVersion, out *ExpirableVersion, s conversion.Scope) error { + return autoConvert_core_ExpirableVersion_To_v1beta1_ExpirableVersion(in, out, s) +} + +func autoConvert_v1beta1_Extension_To_core_Extension(in *Extension, out *core.Extension, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_v1beta1_Extension_To_core_Extension is an autogenerated conversion function. +func Convert_v1beta1_Extension_To_core_Extension(in *Extension, out *core.Extension, s conversion.Scope) error { + return autoConvert_v1beta1_Extension_To_core_Extension(in, out, s) +} + +func autoConvert_core_Extension_To_v1beta1_Extension(in *core.Extension, out *Extension, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + return nil +} + +// Convert_core_Extension_To_v1beta1_Extension is an autogenerated conversion function. +func Convert_core_Extension_To_v1beta1_Extension(in *core.Extension, out *Extension, s conversion.Scope) error { + return autoConvert_core_Extension_To_v1beta1_Extension(in, out, s) +} + +func autoConvert_v1beta1_Gardener_To_core_Gardener(in *Gardener, out *core.Gardener, s conversion.Scope) error { + out.ID = in.ID + out.Name = in.Name + out.Version = in.Version + return nil +} + +// Convert_v1beta1_Gardener_To_core_Gardener is an autogenerated conversion function. +func Convert_v1beta1_Gardener_To_core_Gardener(in *Gardener, out *core.Gardener, s conversion.Scope) error { + return autoConvert_v1beta1_Gardener_To_core_Gardener(in, out, s) +} + +func autoConvert_core_Gardener_To_v1beta1_Gardener(in *core.Gardener, out *Gardener, s conversion.Scope) error { + out.ID = in.ID + out.Name = in.Name + out.Version = in.Version + return nil +} + +// Convert_core_Gardener_To_v1beta1_Gardener is an autogenerated conversion function. +func Convert_core_Gardener_To_v1beta1_Gardener(in *core.Gardener, out *Gardener, s conversion.Scope) error { + return autoConvert_core_Gardener_To_v1beta1_Gardener(in, out, s) +} + +func autoConvert_v1beta1_Hibernation_To_core_Hibernation(in *Hibernation, out *core.Hibernation, s conversion.Scope) error { + out.Enabled = (*bool)(unsafe.Pointer(in.Enabled)) + out.Schedules = *(*[]core.HibernationSchedule)(unsafe.Pointer(&in.Schedules)) + return nil +} + +// Convert_v1beta1_Hibernation_To_core_Hibernation is an autogenerated conversion function. +func Convert_v1beta1_Hibernation_To_core_Hibernation(in *Hibernation, out *core.Hibernation, s conversion.Scope) error { + return autoConvert_v1beta1_Hibernation_To_core_Hibernation(in, out, s) +} + +func autoConvert_core_Hibernation_To_v1beta1_Hibernation(in *core.Hibernation, out *Hibernation, s conversion.Scope) error { + out.Enabled = (*bool)(unsafe.Pointer(in.Enabled)) + out.Schedules = *(*[]HibernationSchedule)(unsafe.Pointer(&in.Schedules)) + return nil +} + +// Convert_core_Hibernation_To_v1beta1_Hibernation is an autogenerated conversion function. +func Convert_core_Hibernation_To_v1beta1_Hibernation(in *core.Hibernation, out *Hibernation, s conversion.Scope) error { + return autoConvert_core_Hibernation_To_v1beta1_Hibernation(in, out, s) +} + +func autoConvert_v1beta1_HibernationSchedule_To_core_HibernationSchedule(in *HibernationSchedule, out *core.HibernationSchedule, s conversion.Scope) error { + out.Start = (*string)(unsafe.Pointer(in.Start)) + out.End = (*string)(unsafe.Pointer(in.End)) + out.Location = (*string)(unsafe.Pointer(in.Location)) + return nil +} + +// Convert_v1beta1_HibernationSchedule_To_core_HibernationSchedule is an autogenerated conversion function. +func Convert_v1beta1_HibernationSchedule_To_core_HibernationSchedule(in *HibernationSchedule, out *core.HibernationSchedule, s conversion.Scope) error { + return autoConvert_v1beta1_HibernationSchedule_To_core_HibernationSchedule(in, out, s) +} + +func autoConvert_core_HibernationSchedule_To_v1beta1_HibernationSchedule(in *core.HibernationSchedule, out *HibernationSchedule, s conversion.Scope) error { + out.Start = (*string)(unsafe.Pointer(in.Start)) + out.End = (*string)(unsafe.Pointer(in.End)) + out.Location = (*string)(unsafe.Pointer(in.Location)) + return nil +} + +// Convert_core_HibernationSchedule_To_v1beta1_HibernationSchedule is an autogenerated conversion function. +func Convert_core_HibernationSchedule_To_v1beta1_HibernationSchedule(in *core.HibernationSchedule, out *HibernationSchedule, s conversion.Scope) error { + return autoConvert_core_HibernationSchedule_To_v1beta1_HibernationSchedule(in, out, s) +} + +func autoConvert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in *HorizontalPodAutoscalerConfig, out *core.HorizontalPodAutoscalerConfig, s conversion.Scope) error { + out.CPUInitializationPeriod = (*metav1.Duration)(unsafe.Pointer(in.CPUInitializationPeriod)) + out.DownscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.DownscaleDelay)) + out.DownscaleStabilization = (*metav1.Duration)(unsafe.Pointer(in.DownscaleStabilization)) + out.InitialReadinessDelay = (*metav1.Duration)(unsafe.Pointer(in.InitialReadinessDelay)) + out.SyncPeriod = (*metav1.Duration)(unsafe.Pointer(in.SyncPeriod)) + out.Tolerance = (*float64)(unsafe.Pointer(in.Tolerance)) + out.UpscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.UpscaleDelay)) + return nil +} + +// Convert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig is an autogenerated conversion function. +func Convert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in *HorizontalPodAutoscalerConfig, out *core.HorizontalPodAutoscalerConfig, s conversion.Scope) error { + return autoConvert_v1beta1_HorizontalPodAutoscalerConfig_To_core_HorizontalPodAutoscalerConfig(in, out, s) +} + +func autoConvert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig(in *core.HorizontalPodAutoscalerConfig, out *HorizontalPodAutoscalerConfig, s conversion.Scope) error { + out.CPUInitializationPeriod = (*metav1.Duration)(unsafe.Pointer(in.CPUInitializationPeriod)) + out.DownscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.DownscaleDelay)) + out.DownscaleStabilization = (*metav1.Duration)(unsafe.Pointer(in.DownscaleStabilization)) + out.InitialReadinessDelay = (*metav1.Duration)(unsafe.Pointer(in.InitialReadinessDelay)) + out.SyncPeriod = (*metav1.Duration)(unsafe.Pointer(in.SyncPeriod)) + out.Tolerance = (*float64)(unsafe.Pointer(in.Tolerance)) + out.UpscaleDelay = (*metav1.Duration)(unsafe.Pointer(in.UpscaleDelay)) + return nil +} + +// Convert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig is an autogenerated conversion function. +func Convert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig(in *core.HorizontalPodAutoscalerConfig, out *HorizontalPodAutoscalerConfig, s conversion.Scope) error { + return autoConvert_core_HorizontalPodAutoscalerConfig_To_v1beta1_HorizontalPodAutoscalerConfig(in, out, s) +} + +func autoConvert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *KubeAPIServerConfig, out *core.KubeAPIServerConfig, s conversion.Scope) error { + if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.AdmissionPlugins = *(*[]core.AdmissionPlugin)(unsafe.Pointer(&in.AdmissionPlugins)) + out.APIAudiences = *(*[]string)(unsafe.Pointer(&in.APIAudiences)) + out.AuditConfig = (*core.AuditConfig)(unsafe.Pointer(in.AuditConfig)) + out.EnableBasicAuthentication = (*bool)(unsafe.Pointer(in.EnableBasicAuthentication)) + out.OIDCConfig = (*core.OIDCConfig)(unsafe.Pointer(in.OIDCConfig)) + out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig)) + out.ServiceAccountConfig = (*core.ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig)) + return nil +} + +// Convert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig is an autogenerated conversion function. +func Convert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in *KubeAPIServerConfig, out *core.KubeAPIServerConfig, s conversion.Scope) error { + return autoConvert_v1beta1_KubeAPIServerConfig_To_core_KubeAPIServerConfig(in, out, s) +} + +func autoConvert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(in *core.KubeAPIServerConfig, out *KubeAPIServerConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.AdmissionPlugins = *(*[]AdmissionPlugin)(unsafe.Pointer(&in.AdmissionPlugins)) + out.APIAudiences = *(*[]string)(unsafe.Pointer(&in.APIAudiences)) + out.AuditConfig = (*AuditConfig)(unsafe.Pointer(in.AuditConfig)) + out.EnableBasicAuthentication = (*bool)(unsafe.Pointer(in.EnableBasicAuthentication)) + out.OIDCConfig = (*OIDCConfig)(unsafe.Pointer(in.OIDCConfig)) + out.RuntimeConfig = *(*map[string]bool)(unsafe.Pointer(&in.RuntimeConfig)) + out.ServiceAccountConfig = (*ServiceAccountConfig)(unsafe.Pointer(in.ServiceAccountConfig)) + return nil +} + +// Convert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig is an autogenerated conversion function. +func Convert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(in *core.KubeAPIServerConfig, out *KubeAPIServerConfig, s conversion.Scope) error { + return autoConvert_core_KubeAPIServerConfig_To_v1beta1_KubeAPIServerConfig(in, out, s) +} + +func autoConvert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in *KubeControllerManagerConfig, out *core.KubeControllerManagerConfig, s conversion.Scope) error { + if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.HorizontalPodAutoscalerConfig = (*core.HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig)) + out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize)) + return nil +} + +// Convert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig is an autogenerated conversion function. +func Convert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in *KubeControllerManagerConfig, out *core.KubeControllerManagerConfig, s conversion.Scope) error { + return autoConvert_v1beta1_KubeControllerManagerConfig_To_core_KubeControllerManagerConfig(in, out, s) +} + +func autoConvert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig(in *core.KubeControllerManagerConfig, out *KubeControllerManagerConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.HorizontalPodAutoscalerConfig = (*HorizontalPodAutoscalerConfig)(unsafe.Pointer(in.HorizontalPodAutoscalerConfig)) + out.NodeCIDRMaskSize = (*int32)(unsafe.Pointer(in.NodeCIDRMaskSize)) + return nil +} + +// Convert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig is an autogenerated conversion function. +func Convert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig(in *core.KubeControllerManagerConfig, out *KubeControllerManagerConfig, s conversion.Scope) error { + return autoConvert_core_KubeControllerManagerConfig_To_v1beta1_KubeControllerManagerConfig(in, out, s) +} + +func autoConvert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig(in *KubeProxyConfig, out *core.KubeProxyConfig, s conversion.Scope) error { + if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.Mode = (*core.ProxyMode)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig is an autogenerated conversion function. +func Convert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig(in *KubeProxyConfig, out *core.KubeProxyConfig, s conversion.Scope) error { + return autoConvert_v1beta1_KubeProxyConfig_To_core_KubeProxyConfig(in, out, s) +} + +func autoConvert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig(in *core.KubeProxyConfig, out *KubeProxyConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.Mode = (*ProxyMode)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig is an autogenerated conversion function. +func Convert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig(in *core.KubeProxyConfig, out *KubeProxyConfig, s conversion.Scope) error { + return autoConvert_core_KubeProxyConfig_To_v1beta1_KubeProxyConfig(in, out, s) +} + +func autoConvert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in *KubeSchedulerConfig, out *core.KubeSchedulerConfig, s conversion.Scope) error { + if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig is an autogenerated conversion function. +func Convert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in *KubeSchedulerConfig, out *core.KubeSchedulerConfig, s conversion.Scope) error { + return autoConvert_v1beta1_KubeSchedulerConfig_To_core_KubeSchedulerConfig(in, out, s) +} + +func autoConvert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig(in *core.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + return nil +} + +// Convert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig is an autogenerated conversion function. +func Convert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig(in *core.KubeSchedulerConfig, out *KubeSchedulerConfig, s conversion.Scope) error { + return autoConvert_core_KubeSchedulerConfig_To_v1beta1_KubeSchedulerConfig(in, out, s) +} + +func autoConvert_v1beta1_KubeletConfig_To_core_KubeletConfig(in *KubeletConfig, out *core.KubeletConfig, s conversion.Scope) error { + if err := Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.CPUCFSQuota = (*bool)(unsafe.Pointer(in.CPUCFSQuota)) + out.CPUManagerPolicy = (*string)(unsafe.Pointer(in.CPUManagerPolicy)) + out.EvictionHard = (*core.KubeletConfigEviction)(unsafe.Pointer(in.EvictionHard)) + out.EvictionMaxPodGracePeriod = (*int32)(unsafe.Pointer(in.EvictionMaxPodGracePeriod)) + out.EvictionMinimumReclaim = (*core.KubeletConfigEvictionMinimumReclaim)(unsafe.Pointer(in.EvictionMinimumReclaim)) + out.EvictionPressureTransitionPeriod = (*metav1.Duration)(unsafe.Pointer(in.EvictionPressureTransitionPeriod)) + out.EvictionSoft = (*core.KubeletConfigEviction)(unsafe.Pointer(in.EvictionSoft)) + out.EvictionSoftGracePeriod = (*core.KubeletConfigEvictionSoftGracePeriod)(unsafe.Pointer(in.EvictionSoftGracePeriod)) + out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods)) + out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit)) + return nil +} + +// Convert_v1beta1_KubeletConfig_To_core_KubeletConfig is an autogenerated conversion function. +func Convert_v1beta1_KubeletConfig_To_core_KubeletConfig(in *KubeletConfig, out *core.KubeletConfig, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletConfig_To_core_KubeletConfig(in, out, s) +} + +func autoConvert_core_KubeletConfig_To_v1beta1_KubeletConfig(in *core.KubeletConfig, out *KubeletConfig, s conversion.Scope) error { + if err := Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(&in.KubernetesConfig, &out.KubernetesConfig, s); err != nil { + return err + } + out.CPUCFSQuota = (*bool)(unsafe.Pointer(in.CPUCFSQuota)) + out.CPUManagerPolicy = (*string)(unsafe.Pointer(in.CPUManagerPolicy)) + out.EvictionHard = (*KubeletConfigEviction)(unsafe.Pointer(in.EvictionHard)) + out.EvictionMaxPodGracePeriod = (*int32)(unsafe.Pointer(in.EvictionMaxPodGracePeriod)) + out.EvictionMinimumReclaim = (*KubeletConfigEvictionMinimumReclaim)(unsafe.Pointer(in.EvictionMinimumReclaim)) + out.EvictionPressureTransitionPeriod = (*metav1.Duration)(unsafe.Pointer(in.EvictionPressureTransitionPeriod)) + out.EvictionSoft = (*KubeletConfigEviction)(unsafe.Pointer(in.EvictionSoft)) + out.EvictionSoftGracePeriod = (*KubeletConfigEvictionSoftGracePeriod)(unsafe.Pointer(in.EvictionSoftGracePeriod)) + out.MaxPods = (*int32)(unsafe.Pointer(in.MaxPods)) + out.PodPIDsLimit = (*int64)(unsafe.Pointer(in.PodPIDsLimit)) + return nil +} + +// Convert_core_KubeletConfig_To_v1beta1_KubeletConfig is an autogenerated conversion function. +func Convert_core_KubeletConfig_To_v1beta1_KubeletConfig(in *core.KubeletConfig, out *KubeletConfig, s conversion.Scope) error { + return autoConvert_core_KubeletConfig_To_v1beta1_KubeletConfig(in, out, s) +} + +func autoConvert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction(in *KubeletConfigEviction, out *core.KubeletConfigEviction, s conversion.Scope) error { + out.MemoryAvailable = (*string)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*string)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*string)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*string)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*string)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction is an autogenerated conversion function. +func Convert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction(in *KubeletConfigEviction, out *core.KubeletConfigEviction, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletConfigEviction_To_core_KubeletConfigEviction(in, out, s) +} + +func autoConvert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction(in *core.KubeletConfigEviction, out *KubeletConfigEviction, s conversion.Scope) error { + out.MemoryAvailable = (*string)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*string)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*string)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*string)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*string)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction is an autogenerated conversion function. +func Convert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction(in *core.KubeletConfigEviction, out *KubeletConfigEviction, s conversion.Scope) error { + return autoConvert_core_KubeletConfigEviction_To_v1beta1_KubeletConfigEviction(in, out, s) +} + +func autoConvert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in *KubeletConfigEvictionMinimumReclaim, out *core.KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error { + out.MemoryAvailable = (*resource.Quantity)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim is an autogenerated conversion function. +func Convert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in *KubeletConfigEvictionMinimumReclaim, out *core.KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletConfigEvictionMinimumReclaim_To_core_KubeletConfigEvictionMinimumReclaim(in, out, s) +} + +func autoConvert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim(in *core.KubeletConfigEvictionMinimumReclaim, out *KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error { + out.MemoryAvailable = (*resource.Quantity)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*resource.Quantity)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*resource.Quantity)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim is an autogenerated conversion function. +func Convert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim(in *core.KubeletConfigEvictionMinimumReclaim, out *KubeletConfigEvictionMinimumReclaim, s conversion.Scope) error { + return autoConvert_core_KubeletConfigEvictionMinimumReclaim_To_v1beta1_KubeletConfigEvictionMinimumReclaim(in, out, s) +} + +func autoConvert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in *KubeletConfigEvictionSoftGracePeriod, out *core.KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error { + out.MemoryAvailable = (*metav1.Duration)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod is an autogenerated conversion function. +func Convert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in *KubeletConfigEvictionSoftGracePeriod, out *core.KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error { + return autoConvert_v1beta1_KubeletConfigEvictionSoftGracePeriod_To_core_KubeletConfigEvictionSoftGracePeriod(in, out, s) +} + +func autoConvert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod(in *core.KubeletConfigEvictionSoftGracePeriod, out *KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error { + out.MemoryAvailable = (*metav1.Duration)(unsafe.Pointer(in.MemoryAvailable)) + out.ImageFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.ImageFSAvailable)) + out.ImageFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.ImageFSInodesFree)) + out.NodeFSAvailable = (*metav1.Duration)(unsafe.Pointer(in.NodeFSAvailable)) + out.NodeFSInodesFree = (*metav1.Duration)(unsafe.Pointer(in.NodeFSInodesFree)) + return nil +} + +// Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod is an autogenerated conversion function. +func Convert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod(in *core.KubeletConfigEvictionSoftGracePeriod, out *KubeletConfigEvictionSoftGracePeriod, s conversion.Scope) error { + return autoConvert_core_KubeletConfigEvictionSoftGracePeriod_To_v1beta1_KubeletConfigEvictionSoftGracePeriod(in, out, s) +} + +func autoConvert_v1beta1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core.Kubernetes, s conversion.Scope) error { + out.AllowPrivilegedContainers = (*bool)(unsafe.Pointer(in.AllowPrivilegedContainers)) + out.ClusterAutoscaler = (*core.ClusterAutoscaler)(unsafe.Pointer(in.ClusterAutoscaler)) + out.KubeAPIServer = (*core.KubeAPIServerConfig)(unsafe.Pointer(in.KubeAPIServer)) + out.KubeControllerManager = (*core.KubeControllerManagerConfig)(unsafe.Pointer(in.KubeControllerManager)) + out.KubeScheduler = (*core.KubeSchedulerConfig)(unsafe.Pointer(in.KubeScheduler)) + out.KubeProxy = (*core.KubeProxyConfig)(unsafe.Pointer(in.KubeProxy)) + out.Kubelet = (*core.KubeletConfig)(unsafe.Pointer(in.Kubelet)) + out.Version = in.Version + return nil +} + +// Convert_v1beta1_Kubernetes_To_core_Kubernetes is an autogenerated conversion function. +func Convert_v1beta1_Kubernetes_To_core_Kubernetes(in *Kubernetes, out *core.Kubernetes, s conversion.Scope) error { + return autoConvert_v1beta1_Kubernetes_To_core_Kubernetes(in, out, s) +} + +func autoConvert_core_Kubernetes_To_v1beta1_Kubernetes(in *core.Kubernetes, out *Kubernetes, s conversion.Scope) error { + out.AllowPrivilegedContainers = (*bool)(unsafe.Pointer(in.AllowPrivilegedContainers)) + out.ClusterAutoscaler = (*ClusterAutoscaler)(unsafe.Pointer(in.ClusterAutoscaler)) + out.KubeAPIServer = (*KubeAPIServerConfig)(unsafe.Pointer(in.KubeAPIServer)) + out.KubeControllerManager = (*KubeControllerManagerConfig)(unsafe.Pointer(in.KubeControllerManager)) + out.KubeScheduler = (*KubeSchedulerConfig)(unsafe.Pointer(in.KubeScheduler)) + out.KubeProxy = (*KubeProxyConfig)(unsafe.Pointer(in.KubeProxy)) + out.Kubelet = (*KubeletConfig)(unsafe.Pointer(in.Kubelet)) + out.Version = in.Version + return nil +} + +// Convert_core_Kubernetes_To_v1beta1_Kubernetes is an autogenerated conversion function. +func Convert_core_Kubernetes_To_v1beta1_Kubernetes(in *core.Kubernetes, out *Kubernetes, s conversion.Scope) error { + return autoConvert_core_Kubernetes_To_v1beta1_Kubernetes(in, out, s) +} + +func autoConvert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(in *KubernetesConfig, out *core.KubernetesConfig, s conversion.Scope) error { + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + return nil +} + +// Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig is an autogenerated conversion function. +func Convert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(in *KubernetesConfig, out *core.KubernetesConfig, s conversion.Scope) error { + return autoConvert_v1beta1_KubernetesConfig_To_core_KubernetesConfig(in, out, s) +} + +func autoConvert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(in *core.KubernetesConfig, out *KubernetesConfig, s conversion.Scope) error { + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + return nil +} + +// Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig is an autogenerated conversion function. +func Convert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(in *core.KubernetesConfig, out *KubernetesConfig, s conversion.Scope) error { + return autoConvert_core_KubernetesConfig_To_v1beta1_KubernetesConfig(in, out, s) +} + +func autoConvert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard(in *KubernetesDashboard, out *core.KubernetesDashboard, s conversion.Scope) error { + if err := Convert_v1beta1_Addon_To_core_Addon(&in.Addon, &out.Addon, s); err != nil { + return err + } + out.AuthenticationMode = (*string)(unsafe.Pointer(in.AuthenticationMode)) + return nil +} + +// Convert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard is an autogenerated conversion function. +func Convert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard(in *KubernetesDashboard, out *core.KubernetesDashboard, s conversion.Scope) error { + return autoConvert_v1beta1_KubernetesDashboard_To_core_KubernetesDashboard(in, out, s) +} + +func autoConvert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard(in *core.KubernetesDashboard, out *KubernetesDashboard, s conversion.Scope) error { + if err := Convert_core_Addon_To_v1beta1_Addon(&in.Addon, &out.Addon, s); err != nil { + return err + } + out.AuthenticationMode = (*string)(unsafe.Pointer(in.AuthenticationMode)) + return nil +} + +// Convert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard is an autogenerated conversion function. +func Convert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard(in *core.KubernetesDashboard, out *KubernetesDashboard, s conversion.Scope) error { + return autoConvert_core_KubernetesDashboard_To_v1beta1_KubernetesDashboard(in, out, s) +} + +func autoConvert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(in *KubernetesInfo, out *core.KubernetesInfo, s conversion.Scope) error { + out.Version = in.Version + return nil +} + +// Convert_v1beta1_KubernetesInfo_To_core_KubernetesInfo is an autogenerated conversion function. +func Convert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(in *KubernetesInfo, out *core.KubernetesInfo, s conversion.Scope) error { + return autoConvert_v1beta1_KubernetesInfo_To_core_KubernetesInfo(in, out, s) +} + +func autoConvert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(in *core.KubernetesInfo, out *KubernetesInfo, s conversion.Scope) error { + out.Version = in.Version + return nil +} + +// Convert_core_KubernetesInfo_To_v1beta1_KubernetesInfo is an autogenerated conversion function. +func Convert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(in *core.KubernetesInfo, out *KubernetesInfo, s conversion.Scope) error { + return autoConvert_core_KubernetesInfo_To_v1beta1_KubernetesInfo(in, out, s) +} + +func autoConvert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(in *KubernetesSettings, out *core.KubernetesSettings, s conversion.Scope) error { + out.Versions = *(*[]core.ExpirableVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +// Convert_v1beta1_KubernetesSettings_To_core_KubernetesSettings is an autogenerated conversion function. +func Convert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(in *KubernetesSettings, out *core.KubernetesSettings, s conversion.Scope) error { + return autoConvert_v1beta1_KubernetesSettings_To_core_KubernetesSettings(in, out, s) +} + +func autoConvert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(in *core.KubernetesSettings, out *KubernetesSettings, s conversion.Scope) error { + out.Versions = *(*[]ExpirableVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +// Convert_core_KubernetesSettings_To_v1beta1_KubernetesSettings is an autogenerated conversion function. +func Convert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(in *core.KubernetesSettings, out *KubernetesSettings, s conversion.Scope) error { + return autoConvert_core_KubernetesSettings_To_v1beta1_KubernetesSettings(in, out, s) +} + +func autoConvert_v1beta1_LastError_To_core_LastError(in *LastError, out *core.LastError, s conversion.Scope) error { + out.Description = in.Description + out.TaskID = (*string)(unsafe.Pointer(in.TaskID)) + out.Codes = *(*[]core.ErrorCode)(unsafe.Pointer(&in.Codes)) + out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) + return nil +} + +// Convert_v1beta1_LastError_To_core_LastError is an autogenerated conversion function. +func Convert_v1beta1_LastError_To_core_LastError(in *LastError, out *core.LastError, s conversion.Scope) error { + return autoConvert_v1beta1_LastError_To_core_LastError(in, out, s) +} + +func autoConvert_core_LastError_To_v1beta1_LastError(in *core.LastError, out *LastError, s conversion.Scope) error { + out.Description = in.Description + out.TaskID = (*string)(unsafe.Pointer(in.TaskID)) + out.Codes = *(*[]ErrorCode)(unsafe.Pointer(&in.Codes)) + out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) + return nil +} + +// Convert_core_LastError_To_v1beta1_LastError is an autogenerated conversion function. +func Convert_core_LastError_To_v1beta1_LastError(in *core.LastError, out *LastError, s conversion.Scope) error { + return autoConvert_core_LastError_To_v1beta1_LastError(in, out, s) +} + +func autoConvert_v1beta1_LastOperation_To_core_LastOperation(in *LastOperation, out *core.LastOperation, s conversion.Scope) error { + out.Description = in.Description + out.LastUpdateTime = in.LastUpdateTime + out.Progress = in.Progress + out.State = core.LastOperationState(in.State) + out.Type = core.LastOperationType(in.Type) + return nil +} + +// Convert_v1beta1_LastOperation_To_core_LastOperation is an autogenerated conversion function. +func Convert_v1beta1_LastOperation_To_core_LastOperation(in *LastOperation, out *core.LastOperation, s conversion.Scope) error { + return autoConvert_v1beta1_LastOperation_To_core_LastOperation(in, out, s) +} + +func autoConvert_core_LastOperation_To_v1beta1_LastOperation(in *core.LastOperation, out *LastOperation, s conversion.Scope) error { + out.Description = in.Description + out.LastUpdateTime = in.LastUpdateTime + out.Progress = in.Progress + out.State = LastOperationState(in.State) + out.Type = LastOperationType(in.Type) + return nil +} + +// Convert_core_LastOperation_To_v1beta1_LastOperation is an autogenerated conversion function. +func Convert_core_LastOperation_To_v1beta1_LastOperation(in *core.LastOperation, out *LastOperation, s conversion.Scope) error { + return autoConvert_core_LastOperation_To_v1beta1_LastOperation(in, out, s) +} + +func autoConvert_v1beta1_Machine_To_core_Machine(in *Machine, out *core.Machine, s conversion.Scope) error { + out.Type = in.Type + out.Image = (*core.ShootMachineImage)(unsafe.Pointer(in.Image)) + return nil +} + +// Convert_v1beta1_Machine_To_core_Machine is an autogenerated conversion function. +func Convert_v1beta1_Machine_To_core_Machine(in *Machine, out *core.Machine, s conversion.Scope) error { + return autoConvert_v1beta1_Machine_To_core_Machine(in, out, s) +} + +func autoConvert_core_Machine_To_v1beta1_Machine(in *core.Machine, out *Machine, s conversion.Scope) error { + out.Type = in.Type + out.Image = (*ShootMachineImage)(unsafe.Pointer(in.Image)) + return nil +} + +// Convert_core_Machine_To_v1beta1_Machine is an autogenerated conversion function. +func Convert_core_Machine_To_v1beta1_Machine(in *core.Machine, out *Machine, s conversion.Scope) error { + return autoConvert_core_Machine_To_v1beta1_Machine(in, out, s) +} + +func autoConvert_v1beta1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error { + out.Name = in.Name + out.Versions = *(*[]core.ExpirableVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +// Convert_v1beta1_MachineImage_To_core_MachineImage is an autogenerated conversion function. +func Convert_v1beta1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error { + return autoConvert_v1beta1_MachineImage_To_core_MachineImage(in, out, s) +} + +func autoConvert_core_MachineImage_To_v1beta1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error { + out.Name = in.Name + out.Versions = *(*[]ExpirableVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +// Convert_core_MachineImage_To_v1beta1_MachineImage is an autogenerated conversion function. +func Convert_core_MachineImage_To_v1beta1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error { + return autoConvert_core_MachineImage_To_v1beta1_MachineImage(in, out, s) +} + +func autoConvert_v1beta1_MachineType_To_core_MachineType(in *MachineType, out *core.MachineType, s conversion.Scope) error { + out.CPU = in.CPU + out.GPU = in.GPU + out.Memory = in.Memory + out.Name = in.Name + out.Storage = (*core.MachineTypeStorage)(unsafe.Pointer(in.Storage)) + out.Usable = (*bool)(unsafe.Pointer(in.Usable)) + return nil +} + +// Convert_v1beta1_MachineType_To_core_MachineType is an autogenerated conversion function. +func Convert_v1beta1_MachineType_To_core_MachineType(in *MachineType, out *core.MachineType, s conversion.Scope) error { + return autoConvert_v1beta1_MachineType_To_core_MachineType(in, out, s) +} + +func autoConvert_core_MachineType_To_v1beta1_MachineType(in *core.MachineType, out *MachineType, s conversion.Scope) error { + out.CPU = in.CPU + out.GPU = in.GPU + out.Memory = in.Memory + out.Name = in.Name + out.Storage = (*MachineTypeStorage)(unsafe.Pointer(in.Storage)) + out.Usable = (*bool)(unsafe.Pointer(in.Usable)) + return nil +} + +// Convert_core_MachineType_To_v1beta1_MachineType is an autogenerated conversion function. +func Convert_core_MachineType_To_v1beta1_MachineType(in *core.MachineType, out *MachineType, s conversion.Scope) error { + return autoConvert_core_MachineType_To_v1beta1_MachineType(in, out, s) +} + +func autoConvert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage(in *MachineTypeStorage, out *core.MachineTypeStorage, s conversion.Scope) error { + out.Class = in.Class + out.Size = in.Size + out.Type = in.Type + return nil +} + +// Convert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage is an autogenerated conversion function. +func Convert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage(in *MachineTypeStorage, out *core.MachineTypeStorage, s conversion.Scope) error { + return autoConvert_v1beta1_MachineTypeStorage_To_core_MachineTypeStorage(in, out, s) +} + +func autoConvert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage(in *core.MachineTypeStorage, out *MachineTypeStorage, s conversion.Scope) error { + out.Class = in.Class + out.Size = in.Size + out.Type = in.Type + return nil +} + +// Convert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage is an autogenerated conversion function. +func Convert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage(in *core.MachineTypeStorage, out *MachineTypeStorage, s conversion.Scope) error { + return autoConvert_core_MachineTypeStorage_To_v1beta1_MachineTypeStorage(in, out, s) +} + +func autoConvert_v1beta1_Maintenance_To_core_Maintenance(in *Maintenance, out *core.Maintenance, s conversion.Scope) error { + out.AutoUpdate = (*core.MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate)) + out.TimeWindow = (*core.MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow)) + return nil +} + +// Convert_v1beta1_Maintenance_To_core_Maintenance is an autogenerated conversion function. +func Convert_v1beta1_Maintenance_To_core_Maintenance(in *Maintenance, out *core.Maintenance, s conversion.Scope) error { + return autoConvert_v1beta1_Maintenance_To_core_Maintenance(in, out, s) +} + +func autoConvert_core_Maintenance_To_v1beta1_Maintenance(in *core.Maintenance, out *Maintenance, s conversion.Scope) error { + out.AutoUpdate = (*MaintenanceAutoUpdate)(unsafe.Pointer(in.AutoUpdate)) + out.TimeWindow = (*MaintenanceTimeWindow)(unsafe.Pointer(in.TimeWindow)) + return nil +} + +// Convert_core_Maintenance_To_v1beta1_Maintenance is an autogenerated conversion function. +func Convert_core_Maintenance_To_v1beta1_Maintenance(in *core.Maintenance, out *Maintenance, s conversion.Scope) error { + return autoConvert_core_Maintenance_To_v1beta1_Maintenance(in, out, s) +} + +func autoConvert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in *MaintenanceAutoUpdate, out *core.MaintenanceAutoUpdate, s conversion.Scope) error { + out.KubernetesVersion = in.KubernetesVersion + out.MachineImageVersion = in.MachineImageVersion + return nil +} + +// Convert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate is an autogenerated conversion function. +func Convert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in *MaintenanceAutoUpdate, out *core.MaintenanceAutoUpdate, s conversion.Scope) error { + return autoConvert_v1beta1_MaintenanceAutoUpdate_To_core_MaintenanceAutoUpdate(in, out, s) +} + +func autoConvert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate(in *core.MaintenanceAutoUpdate, out *MaintenanceAutoUpdate, s conversion.Scope) error { + out.KubernetesVersion = in.KubernetesVersion + out.MachineImageVersion = in.MachineImageVersion + return nil +} + +// Convert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate is an autogenerated conversion function. +func Convert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate(in *core.MaintenanceAutoUpdate, out *MaintenanceAutoUpdate, s conversion.Scope) error { + return autoConvert_core_MaintenanceAutoUpdate_To_v1beta1_MaintenanceAutoUpdate(in, out, s) +} + +func autoConvert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in *MaintenanceTimeWindow, out *core.MaintenanceTimeWindow, s conversion.Scope) error { + out.Begin = in.Begin + out.End = in.End + return nil +} + +// Convert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow is an autogenerated conversion function. +func Convert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in *MaintenanceTimeWindow, out *core.MaintenanceTimeWindow, s conversion.Scope) error { + return autoConvert_v1beta1_MaintenanceTimeWindow_To_core_MaintenanceTimeWindow(in, out, s) +} + +func autoConvert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow(in *core.MaintenanceTimeWindow, out *MaintenanceTimeWindow, s conversion.Scope) error { + out.Begin = in.Begin + out.End = in.End + return nil +} + +// Convert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow is an autogenerated conversion function. +func Convert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow(in *core.MaintenanceTimeWindow, out *MaintenanceTimeWindow, s conversion.Scope) error { + return autoConvert_core_MaintenanceTimeWindow_To_v1beta1_MaintenanceTimeWindow(in, out, s) +} + +func autoConvert_v1beta1_Monitoring_To_core_Monitoring(in *Monitoring, out *core.Monitoring, s conversion.Scope) error { + out.Alerting = (*core.Alerting)(unsafe.Pointer(in.Alerting)) + return nil +} + +// Convert_v1beta1_Monitoring_To_core_Monitoring is an autogenerated conversion function. +func Convert_v1beta1_Monitoring_To_core_Monitoring(in *Monitoring, out *core.Monitoring, s conversion.Scope) error { + return autoConvert_v1beta1_Monitoring_To_core_Monitoring(in, out, s) +} + +func autoConvert_core_Monitoring_To_v1beta1_Monitoring(in *core.Monitoring, out *Monitoring, s conversion.Scope) error { + out.Alerting = (*Alerting)(unsafe.Pointer(in.Alerting)) + return nil +} + +// Convert_core_Monitoring_To_v1beta1_Monitoring is an autogenerated conversion function. +func Convert_core_Monitoring_To_v1beta1_Monitoring(in *core.Monitoring, out *Monitoring, s conversion.Scope) error { + return autoConvert_core_Monitoring_To_v1beta1_Monitoring(in, out, s) +} + +func autoConvert_v1beta1_Networking_To_core_Networking(in *Networking, out *core.Networking, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Pods = (*string)(unsafe.Pointer(in.Pods)) + out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) + out.Services = (*string)(unsafe.Pointer(in.Services)) + return nil +} + +// Convert_v1beta1_Networking_To_core_Networking is an autogenerated conversion function. +func Convert_v1beta1_Networking_To_core_Networking(in *Networking, out *core.Networking, s conversion.Scope) error { + return autoConvert_v1beta1_Networking_To_core_Networking(in, out, s) +} + +func autoConvert_core_Networking_To_v1beta1_Networking(in *core.Networking, out *Networking, s conversion.Scope) error { + out.Type = in.Type + out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Pods = (*string)(unsafe.Pointer(in.Pods)) + out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) + out.Services = (*string)(unsafe.Pointer(in.Services)) + return nil +} + +// Convert_core_Networking_To_v1beta1_Networking is an autogenerated conversion function. +func Convert_core_Networking_To_v1beta1_Networking(in *core.Networking, out *Networking, s conversion.Scope) error { + return autoConvert_core_Networking_To_v1beta1_Networking(in, out, s) +} + +func autoConvert_v1beta1_NginxIngress_To_core_NginxIngress(in *NginxIngress, out *core.NginxIngress, s conversion.Scope) error { + if err := Convert_v1beta1_Addon_To_core_Addon(&in.Addon, &out.Addon, s); err != nil { + return err + } + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) + out.ExternalTrafficPolicy = (*v1.ServiceExternalTrafficPolicyType)(unsafe.Pointer(in.ExternalTrafficPolicy)) + return nil +} + +// Convert_v1beta1_NginxIngress_To_core_NginxIngress is an autogenerated conversion function. +func Convert_v1beta1_NginxIngress_To_core_NginxIngress(in *NginxIngress, out *core.NginxIngress, s conversion.Scope) error { + return autoConvert_v1beta1_NginxIngress_To_core_NginxIngress(in, out, s) +} + +func autoConvert_core_NginxIngress_To_v1beta1_NginxIngress(in *core.NginxIngress, out *NginxIngress, s conversion.Scope) error { + if err := Convert_core_Addon_To_v1beta1_Addon(&in.Addon, &out.Addon, s); err != nil { + return err + } + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) + out.ExternalTrafficPolicy = (*v1.ServiceExternalTrafficPolicyType)(unsafe.Pointer(in.ExternalTrafficPolicy)) + return nil +} + +// Convert_core_NginxIngress_To_v1beta1_NginxIngress is an autogenerated conversion function. +func Convert_core_NginxIngress_To_v1beta1_NginxIngress(in *core.NginxIngress, out *NginxIngress, s conversion.Scope) error { + return autoConvert_core_NginxIngress_To_v1beta1_NginxIngress(in, out, s) +} + +func autoConvert_v1beta1_OIDCConfig_To_core_OIDCConfig(in *OIDCConfig, out *core.OIDCConfig, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.ClientAuthentication = (*core.OpenIDConnectClientAuthentication)(unsafe.Pointer(in.ClientAuthentication)) + out.ClientID = (*string)(unsafe.Pointer(in.ClientID)) + out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim)) + out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix)) + out.IssuerURL = (*string)(unsafe.Pointer(in.IssuerURL)) + out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims)) + out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs)) + out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim)) + out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix)) + return nil +} + +// Convert_v1beta1_OIDCConfig_To_core_OIDCConfig is an autogenerated conversion function. +func Convert_v1beta1_OIDCConfig_To_core_OIDCConfig(in *OIDCConfig, out *core.OIDCConfig, s conversion.Scope) error { + return autoConvert_v1beta1_OIDCConfig_To_core_OIDCConfig(in, out, s) +} + +func autoConvert_core_OIDCConfig_To_v1beta1_OIDCConfig(in *core.OIDCConfig, out *OIDCConfig, s conversion.Scope) error { + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.ClientAuthentication = (*OpenIDConnectClientAuthentication)(unsafe.Pointer(in.ClientAuthentication)) + out.ClientID = (*string)(unsafe.Pointer(in.ClientID)) + out.GroupsClaim = (*string)(unsafe.Pointer(in.GroupsClaim)) + out.GroupsPrefix = (*string)(unsafe.Pointer(in.GroupsPrefix)) + out.IssuerURL = (*string)(unsafe.Pointer(in.IssuerURL)) + out.RequiredClaims = *(*map[string]string)(unsafe.Pointer(&in.RequiredClaims)) + out.SigningAlgs = *(*[]string)(unsafe.Pointer(&in.SigningAlgs)) + out.UsernameClaim = (*string)(unsafe.Pointer(in.UsernameClaim)) + out.UsernamePrefix = (*string)(unsafe.Pointer(in.UsernamePrefix)) + return nil +} + +// Convert_core_OIDCConfig_To_v1beta1_OIDCConfig is an autogenerated conversion function. +func Convert_core_OIDCConfig_To_v1beta1_OIDCConfig(in *core.OIDCConfig, out *OIDCConfig, s conversion.Scope) error { + return autoConvert_core_OIDCConfig_To_v1beta1_OIDCConfig(in, out, s) +} + +func autoConvert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *core.OpenIDConnectClientAuthentication, s conversion.Scope) error { + out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig)) + out.Secret = (*string)(unsafe.Pointer(in.Secret)) + return nil +} + +// Convert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication is an autogenerated conversion function. +func Convert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in *OpenIDConnectClientAuthentication, out *core.OpenIDConnectClientAuthentication, s conversion.Scope) error { + return autoConvert_v1beta1_OpenIDConnectClientAuthentication_To_core_OpenIDConnectClientAuthentication(in, out, s) +} + +func autoConvert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication(in *core.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error { + out.ExtraConfig = *(*map[string]string)(unsafe.Pointer(&in.ExtraConfig)) + out.Secret = (*string)(unsafe.Pointer(in.Secret)) + return nil +} + +// Convert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication is an autogenerated conversion function. +func Convert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication(in *core.OpenIDConnectClientAuthentication, out *OpenIDConnectClientAuthentication, s conversion.Scope) error { + return autoConvert_core_OpenIDConnectClientAuthentication_To_v1beta1_OpenIDConnectClientAuthentication(in, out, s) +} + +func autoConvert_v1beta1_Plant_To_core_Plant(in *Plant, out *core.Plant, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_PlantSpec_To_core_PlantSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_PlantStatus_To_core_PlantStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Plant_To_core_Plant is an autogenerated conversion function. +func Convert_v1beta1_Plant_To_core_Plant(in *Plant, out *core.Plant, s conversion.Scope) error { + return autoConvert_v1beta1_Plant_To_core_Plant(in, out, s) +} + +func autoConvert_core_Plant_To_v1beta1_Plant(in *core.Plant, out *Plant, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PlantSpec_To_v1beta1_PlantSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PlantStatus_To_v1beta1_PlantStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Plant_To_v1beta1_Plant is an autogenerated conversion function. +func Convert_core_Plant_To_v1beta1_Plant(in *core.Plant, out *Plant, s conversion.Scope) error { + return autoConvert_core_Plant_To_v1beta1_Plant(in, out, s) +} + +func autoConvert_v1beta1_PlantList_To_core_PlantList(in *PlantList, out *core.PlantList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Plant)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_PlantList_To_core_PlantList is an autogenerated conversion function. +func Convert_v1beta1_PlantList_To_core_PlantList(in *PlantList, out *core.PlantList, s conversion.Scope) error { + return autoConvert_v1beta1_PlantList_To_core_PlantList(in, out, s) +} + +func autoConvert_core_PlantList_To_v1beta1_PlantList(in *core.PlantList, out *PlantList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Plant)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_PlantList_To_v1beta1_PlantList is an autogenerated conversion function. +func Convert_core_PlantList_To_v1beta1_PlantList(in *core.PlantList, out *PlantList, s conversion.Scope) error { + return autoConvert_core_PlantList_To_v1beta1_PlantList(in, out, s) +} + +func autoConvert_v1beta1_PlantSpec_To_core_PlantSpec(in *PlantSpec, out *core.PlantSpec, s conversion.Scope) error { + out.SecretRef = in.SecretRef + out.Endpoints = *(*[]core.Endpoint)(unsafe.Pointer(&in.Endpoints)) + return nil +} + +// Convert_v1beta1_PlantSpec_To_core_PlantSpec is an autogenerated conversion function. +func Convert_v1beta1_PlantSpec_To_core_PlantSpec(in *PlantSpec, out *core.PlantSpec, s conversion.Scope) error { + return autoConvert_v1beta1_PlantSpec_To_core_PlantSpec(in, out, s) +} + +func autoConvert_core_PlantSpec_To_v1beta1_PlantSpec(in *core.PlantSpec, out *PlantSpec, s conversion.Scope) error { + out.SecretRef = in.SecretRef + out.Endpoints = *(*[]Endpoint)(unsafe.Pointer(&in.Endpoints)) + return nil +} + +// Convert_core_PlantSpec_To_v1beta1_PlantSpec is an autogenerated conversion function. +func Convert_core_PlantSpec_To_v1beta1_PlantSpec(in *core.PlantSpec, out *PlantSpec, s conversion.Scope) error { + return autoConvert_core_PlantSpec_To_v1beta1_PlantSpec(in, out, s) +} + +func autoConvert_v1beta1_PlantStatus_To_core_PlantStatus(in *PlantStatus, out *core.PlantStatus, s conversion.Scope) error { + out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.ClusterInfo = (*core.ClusterInfo)(unsafe.Pointer(in.ClusterInfo)) + return nil +} + +// Convert_v1beta1_PlantStatus_To_core_PlantStatus is an autogenerated conversion function. +func Convert_v1beta1_PlantStatus_To_core_PlantStatus(in *PlantStatus, out *core.PlantStatus, s conversion.Scope) error { + return autoConvert_v1beta1_PlantStatus_To_core_PlantStatus(in, out, s) +} + +func autoConvert_core_PlantStatus_To_v1beta1_PlantStatus(in *core.PlantStatus, out *PlantStatus, s conversion.Scope) error { + out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.ClusterInfo = (*ClusterInfo)(unsafe.Pointer(in.ClusterInfo)) + return nil +} + +// Convert_core_PlantStatus_To_v1beta1_PlantStatus is an autogenerated conversion function. +func Convert_core_PlantStatus_To_v1beta1_PlantStatus(in *core.PlantStatus, out *PlantStatus, s conversion.Scope) error { + return autoConvert_core_PlantStatus_To_v1beta1_PlantStatus(in, out, s) +} + +func autoConvert_v1beta1_Project_To_core_Project(in *Project, out *core.Project, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ProjectSpec_To_core_ProjectSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ProjectStatus_To_core_ProjectStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Project_To_core_Project is an autogenerated conversion function. +func Convert_v1beta1_Project_To_core_Project(in *Project, out *core.Project, s conversion.Scope) error { + return autoConvert_v1beta1_Project_To_core_Project(in, out, s) +} + +func autoConvert_core_Project_To_v1beta1_Project(in *core.Project, out *Project, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ProjectSpec_To_v1beta1_ProjectSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ProjectStatus_To_v1beta1_ProjectStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Project_To_v1beta1_Project is an autogenerated conversion function. +func Convert_core_Project_To_v1beta1_Project(in *core.Project, out *Project, s conversion.Scope) error { + return autoConvert_core_Project_To_v1beta1_Project(in, out, s) +} + +func autoConvert_v1beta1_ProjectList_To_core_ProjectList(in *ProjectList, out *core.ProjectList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Project, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Project_To_core_Project(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_ProjectList_To_core_ProjectList is an autogenerated conversion function. +func Convert_v1beta1_ProjectList_To_core_ProjectList(in *ProjectList, out *core.ProjectList, s conversion.Scope) error { + return autoConvert_v1beta1_ProjectList_To_core_ProjectList(in, out, s) +} + +func autoConvert_core_ProjectList_To_v1beta1_ProjectList(in *core.ProjectList, out *ProjectList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + if err := Convert_core_Project_To_v1beta1_Project(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ProjectList_To_v1beta1_ProjectList is an autogenerated conversion function. +func Convert_core_ProjectList_To_v1beta1_ProjectList(in *core.ProjectList, out *ProjectList, s conversion.Scope) error { + return autoConvert_core_ProjectList_To_v1beta1_ProjectList(in, out, s) +} + +func autoConvert_v1beta1_ProjectMember_To_core_ProjectMember(in *ProjectMember, out *core.ProjectMember, s conversion.Scope) error { + out.Subject = in.Subject + // WARNING: in.Role requires manual conversion: does not exist in peer-type + out.Roles = *(*[]string)(unsafe.Pointer(&in.Roles)) + return nil +} + +func autoConvert_core_ProjectMember_To_v1beta1_ProjectMember(in *core.ProjectMember, out *ProjectMember, s conversion.Scope) error { + out.Subject = in.Subject + out.Roles = *(*[]string)(unsafe.Pointer(&in.Roles)) + return nil +} + +func autoConvert_v1beta1_ProjectSpec_To_core_ProjectSpec(in *ProjectSpec, out *core.ProjectSpec, s conversion.Scope) error { + out.CreatedBy = (*rbacv1.Subject)(unsafe.Pointer(in.CreatedBy)) + out.Description = (*string)(unsafe.Pointer(in.Description)) + out.Owner = (*rbacv1.Subject)(unsafe.Pointer(in.Owner)) + out.Purpose = (*string)(unsafe.Pointer(in.Purpose)) + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]core.ProjectMember, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ProjectMember_To_core_ProjectMember(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Members = nil + } + out.Namespace = (*string)(unsafe.Pointer(in.Namespace)) + return nil +} + +func autoConvert_core_ProjectSpec_To_v1beta1_ProjectSpec(in *core.ProjectSpec, out *ProjectSpec, s conversion.Scope) error { + out.CreatedBy = (*rbacv1.Subject)(unsafe.Pointer(in.CreatedBy)) + out.Description = (*string)(unsafe.Pointer(in.Description)) + out.Owner = (*rbacv1.Subject)(unsafe.Pointer(in.Owner)) + out.Purpose = (*string)(unsafe.Pointer(in.Purpose)) + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]ProjectMember, len(*in)) + for i := range *in { + if err := Convert_core_ProjectMember_To_v1beta1_ProjectMember(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Members = nil + } + out.Namespace = (*string)(unsafe.Pointer(in.Namespace)) + return nil +} + +func autoConvert_v1beta1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out *core.ProjectStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Phase = core.ProjectPhase(in.Phase) + return nil +} + +// Convert_v1beta1_ProjectStatus_To_core_ProjectStatus is an autogenerated conversion function. +func Convert_v1beta1_ProjectStatus_To_core_ProjectStatus(in *ProjectStatus, out *core.ProjectStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ProjectStatus_To_core_ProjectStatus(in, out, s) +} + +func autoConvert_core_ProjectStatus_To_v1beta1_ProjectStatus(in *core.ProjectStatus, out *ProjectStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Phase = ProjectPhase(in.Phase) + return nil +} + +// Convert_core_ProjectStatus_To_v1beta1_ProjectStatus is an autogenerated conversion function. +func Convert_core_ProjectStatus_To_v1beta1_ProjectStatus(in *core.ProjectStatus, out *ProjectStatus, s conversion.Scope) error { + return autoConvert_core_ProjectStatus_To_v1beta1_ProjectStatus(in, out, s) +} + +func autoConvert_v1beta1_Provider_To_core_Provider(in *Provider, out *core.Provider, s conversion.Scope) error { + out.Type = in.Type + out.ControlPlaneConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ControlPlaneConfig)) + out.InfrastructureConfig = (*core.ProviderConfig)(unsafe.Pointer(in.InfrastructureConfig)) + out.Workers = *(*[]core.Worker)(unsafe.Pointer(&in.Workers)) + return nil +} + +// Convert_v1beta1_Provider_To_core_Provider is an autogenerated conversion function. +func Convert_v1beta1_Provider_To_core_Provider(in *Provider, out *core.Provider, s conversion.Scope) error { + return autoConvert_v1beta1_Provider_To_core_Provider(in, out, s) +} + +func autoConvert_core_Provider_To_v1beta1_Provider(in *core.Provider, out *Provider, s conversion.Scope) error { + out.Type = in.Type + out.ControlPlaneConfig = (*ProviderConfig)(unsafe.Pointer(in.ControlPlaneConfig)) + out.InfrastructureConfig = (*ProviderConfig)(unsafe.Pointer(in.InfrastructureConfig)) + out.Workers = *(*[]Worker)(unsafe.Pointer(&in.Workers)) + return nil +} + +// Convert_core_Provider_To_v1beta1_Provider is an autogenerated conversion function. +func Convert_core_Provider_To_v1beta1_Provider(in *core.Provider, out *Provider, s conversion.Scope) error { + return autoConvert_core_Provider_To_v1beta1_Provider(in, out, s) +} + +func autoConvert_v1beta1_ProviderConfig_To_core_ProviderConfig(in *ProviderConfig, out *core.ProviderConfig, s conversion.Scope) error { + out.RawExtension = in.RawExtension + return nil +} + +// Convert_v1beta1_ProviderConfig_To_core_ProviderConfig is an autogenerated conversion function. +func Convert_v1beta1_ProviderConfig_To_core_ProviderConfig(in *ProviderConfig, out *core.ProviderConfig, s conversion.Scope) error { + return autoConvert_v1beta1_ProviderConfig_To_core_ProviderConfig(in, out, s) +} + +func autoConvert_core_ProviderConfig_To_v1beta1_ProviderConfig(in *core.ProviderConfig, out *ProviderConfig, s conversion.Scope) error { + out.RawExtension = in.RawExtension + return nil +} + +// Convert_core_ProviderConfig_To_v1beta1_ProviderConfig is an autogenerated conversion function. +func Convert_core_ProviderConfig_To_v1beta1_ProviderConfig(in *core.ProviderConfig, out *ProviderConfig, s conversion.Scope) error { + return autoConvert_core_ProviderConfig_To_v1beta1_ProviderConfig(in, out, s) +} + +func autoConvert_v1beta1_Quota_To_core_Quota(in *Quota, out *core.Quota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_QuotaSpec_To_core_QuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Quota_To_core_Quota is an autogenerated conversion function. +func Convert_v1beta1_Quota_To_core_Quota(in *Quota, out *core.Quota, s conversion.Scope) error { + return autoConvert_v1beta1_Quota_To_core_Quota(in, out, s) +} + +func autoConvert_core_Quota_To_v1beta1_Quota(in *core.Quota, out *Quota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_QuotaSpec_To_v1beta1_QuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_Quota_To_v1beta1_Quota is an autogenerated conversion function. +func Convert_core_Quota_To_v1beta1_Quota(in *core.Quota, out *Quota, s conversion.Scope) error { + return autoConvert_core_Quota_To_v1beta1_Quota(in, out, s) +} + +func autoConvert_v1beta1_QuotaList_To_core_QuotaList(in *QuotaList, out *core.QuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Quota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_QuotaList_To_core_QuotaList is an autogenerated conversion function. +func Convert_v1beta1_QuotaList_To_core_QuotaList(in *QuotaList, out *core.QuotaList, s conversion.Scope) error { + return autoConvert_v1beta1_QuotaList_To_core_QuotaList(in, out, s) +} + +func autoConvert_core_QuotaList_To_v1beta1_QuotaList(in *core.QuotaList, out *QuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Quota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_QuotaList_To_v1beta1_QuotaList is an autogenerated conversion function. +func Convert_core_QuotaList_To_v1beta1_QuotaList(in *core.QuotaList, out *QuotaList, s conversion.Scope) error { + return autoConvert_core_QuotaList_To_v1beta1_QuotaList(in, out, s) +} + +func autoConvert_v1beta1_QuotaSpec_To_core_QuotaSpec(in *QuotaSpec, out *core.QuotaSpec, s conversion.Scope) error { + out.ClusterLifetimeDays = (*int)(unsafe.Pointer(in.ClusterLifetimeDays)) + out.Metrics = *(*v1.ResourceList)(unsafe.Pointer(&in.Metrics)) + out.Scope = in.Scope + return nil +} + +// Convert_v1beta1_QuotaSpec_To_core_QuotaSpec is an autogenerated conversion function. +func Convert_v1beta1_QuotaSpec_To_core_QuotaSpec(in *QuotaSpec, out *core.QuotaSpec, s conversion.Scope) error { + return autoConvert_v1beta1_QuotaSpec_To_core_QuotaSpec(in, out, s) +} + +func autoConvert_core_QuotaSpec_To_v1beta1_QuotaSpec(in *core.QuotaSpec, out *QuotaSpec, s conversion.Scope) error { + out.ClusterLifetimeDays = (*int)(unsafe.Pointer(in.ClusterLifetimeDays)) + out.Metrics = *(*v1.ResourceList)(unsafe.Pointer(&in.Metrics)) + out.Scope = in.Scope + return nil +} + +// Convert_core_QuotaSpec_To_v1beta1_QuotaSpec is an autogenerated conversion function. +func Convert_core_QuotaSpec_To_v1beta1_QuotaSpec(in *core.QuotaSpec, out *QuotaSpec, s conversion.Scope) error { + return autoConvert_core_QuotaSpec_To_v1beta1_QuotaSpec(in, out, s) +} + +func autoConvert_v1beta1_Region_To_core_Region(in *Region, out *core.Region, s conversion.Scope) error { + out.Name = in.Name + out.Zones = *(*[]core.AvailabilityZone)(unsafe.Pointer(&in.Zones)) + return nil +} + +// Convert_v1beta1_Region_To_core_Region is an autogenerated conversion function. +func Convert_v1beta1_Region_To_core_Region(in *Region, out *core.Region, s conversion.Scope) error { + return autoConvert_v1beta1_Region_To_core_Region(in, out, s) +} + +func autoConvert_core_Region_To_v1beta1_Region(in *core.Region, out *Region, s conversion.Scope) error { + out.Name = in.Name + out.Zones = *(*[]AvailabilityZone)(unsafe.Pointer(&in.Zones)) + return nil +} + +// Convert_core_Region_To_v1beta1_Region is an autogenerated conversion function. +func Convert_core_Region_To_v1beta1_Region(in *core.Region, out *Region, s conversion.Scope) error { + return autoConvert_core_Region_To_v1beta1_Region(in, out, s) +} + +func autoConvert_v1beta1_SecretBinding_To_core_SecretBinding(in *SecretBinding, out *core.SecretBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.SecretRef = in.SecretRef + out.Quotas = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Quotas)) + return nil +} + +// Convert_v1beta1_SecretBinding_To_core_SecretBinding is an autogenerated conversion function. +func Convert_v1beta1_SecretBinding_To_core_SecretBinding(in *SecretBinding, out *core.SecretBinding, s conversion.Scope) error { + return autoConvert_v1beta1_SecretBinding_To_core_SecretBinding(in, out, s) +} + +func autoConvert_core_SecretBinding_To_v1beta1_SecretBinding(in *core.SecretBinding, out *SecretBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.SecretRef = in.SecretRef + out.Quotas = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Quotas)) + return nil +} + +// Convert_core_SecretBinding_To_v1beta1_SecretBinding is an autogenerated conversion function. +func Convert_core_SecretBinding_To_v1beta1_SecretBinding(in *core.SecretBinding, out *SecretBinding, s conversion.Scope) error { + return autoConvert_core_SecretBinding_To_v1beta1_SecretBinding(in, out, s) +} + +func autoConvert_v1beta1_SecretBindingList_To_core_SecretBindingList(in *SecretBindingList, out *core.SecretBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.SecretBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_SecretBindingList_To_core_SecretBindingList is an autogenerated conversion function. +func Convert_v1beta1_SecretBindingList_To_core_SecretBindingList(in *SecretBindingList, out *core.SecretBindingList, s conversion.Scope) error { + return autoConvert_v1beta1_SecretBindingList_To_core_SecretBindingList(in, out, s) +} + +func autoConvert_core_SecretBindingList_To_v1beta1_SecretBindingList(in *core.SecretBindingList, out *SecretBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]SecretBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_SecretBindingList_To_v1beta1_SecretBindingList is an autogenerated conversion function. +func Convert_core_SecretBindingList_To_v1beta1_SecretBindingList(in *core.SecretBindingList, out *SecretBindingList, s conversion.Scope) error { + return autoConvert_core_SecretBindingList_To_v1beta1_SecretBindingList(in, out, s) +} + +func autoConvert_v1beta1_Seed_To_core_Seed(in *Seed, out *core.Seed, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_SeedSpec_To_core_SeedSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SeedStatus_To_core_SeedStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Seed_To_core_Seed is an autogenerated conversion function. +func Convert_v1beta1_Seed_To_core_Seed(in *Seed, out *core.Seed, s conversion.Scope) error { + return autoConvert_v1beta1_Seed_To_core_Seed(in, out, s) +} + +func autoConvert_core_Seed_To_v1beta1_Seed(in *core.Seed, out *Seed, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_SeedSpec_To_v1beta1_SeedSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_SeedStatus_To_v1beta1_SeedStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Seed_To_v1beta1_Seed is an autogenerated conversion function. +func Convert_core_Seed_To_v1beta1_Seed(in *core.Seed, out *Seed, s conversion.Scope) error { + return autoConvert_core_Seed_To_v1beta1_Seed(in, out, s) +} + +func autoConvert_v1beta1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.SeedBackup, s conversion.Scope) error { + out.Provider = in.Provider + out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Region = (*string)(unsafe.Pointer(in.Region)) + out.SecretRef = in.SecretRef + return nil +} + +// Convert_v1beta1_SeedBackup_To_core_SeedBackup is an autogenerated conversion function. +func Convert_v1beta1_SeedBackup_To_core_SeedBackup(in *SeedBackup, out *core.SeedBackup, s conversion.Scope) error { + return autoConvert_v1beta1_SeedBackup_To_core_SeedBackup(in, out, s) +} + +func autoConvert_core_SeedBackup_To_v1beta1_SeedBackup(in *core.SeedBackup, out *SeedBackup, s conversion.Scope) error { + out.Provider = in.Provider + out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Region = (*string)(unsafe.Pointer(in.Region)) + out.SecretRef = in.SecretRef + return nil +} + +// Convert_core_SeedBackup_To_v1beta1_SeedBackup is an autogenerated conversion function. +func Convert_core_SeedBackup_To_v1beta1_SeedBackup(in *core.SeedBackup, out *SeedBackup, s conversion.Scope) error { + return autoConvert_core_SeedBackup_To_v1beta1_SeedBackup(in, out, s) +} + +func autoConvert_v1beta1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s conversion.Scope) error { + out.IngressDomain = in.IngressDomain + return nil +} + +// Convert_v1beta1_SeedDNS_To_core_SeedDNS is an autogenerated conversion function. +func Convert_v1beta1_SeedDNS_To_core_SeedDNS(in *SeedDNS, out *core.SeedDNS, s conversion.Scope) error { + return autoConvert_v1beta1_SeedDNS_To_core_SeedDNS(in, out, s) +} + +func autoConvert_core_SeedDNS_To_v1beta1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s conversion.Scope) error { + out.IngressDomain = in.IngressDomain + return nil +} + +// Convert_core_SeedDNS_To_v1beta1_SeedDNS is an autogenerated conversion function. +func Convert_core_SeedDNS_To_v1beta1_SeedDNS(in *core.SeedDNS, out *SeedDNS, s conversion.Scope) error { + return autoConvert_core_SeedDNS_To_v1beta1_SeedDNS(in, out, s) +} + +func autoConvert_v1beta1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Seed)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_SeedList_To_core_SeedList is an autogenerated conversion function. +func Convert_v1beta1_SeedList_To_core_SeedList(in *SeedList, out *core.SeedList, s conversion.Scope) error { + return autoConvert_v1beta1_SeedList_To_core_SeedList(in, out, s) +} + +func autoConvert_core_SeedList_To_v1beta1_SeedList(in *core.SeedList, out *SeedList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Seed)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_SeedList_To_v1beta1_SeedList is an autogenerated conversion function. +func Convert_core_SeedList_To_v1beta1_SeedList(in *core.SeedList, out *SeedList, s conversion.Scope) error { + return autoConvert_core_SeedList_To_v1beta1_SeedList(in, out, s) +} + +func autoConvert_v1beta1_SeedNetworks_To_core_SeedNetworks(in *SeedNetworks, out *core.SeedNetworks, s conversion.Scope) error { + out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) + out.Pods = in.Pods + out.Services = in.Services + out.ShootDefaults = (*core.ShootNetworks)(unsafe.Pointer(in.ShootDefaults)) + out.BlockCIDRs = *(*[]string)(unsafe.Pointer(&in.BlockCIDRs)) + return nil +} + +// Convert_v1beta1_SeedNetworks_To_core_SeedNetworks is an autogenerated conversion function. +func Convert_v1beta1_SeedNetworks_To_core_SeedNetworks(in *SeedNetworks, out *core.SeedNetworks, s conversion.Scope) error { + return autoConvert_v1beta1_SeedNetworks_To_core_SeedNetworks(in, out, s) +} + +func autoConvert_core_SeedNetworks_To_v1beta1_SeedNetworks(in *core.SeedNetworks, out *SeedNetworks, s conversion.Scope) error { + out.Nodes = (*string)(unsafe.Pointer(in.Nodes)) + out.Pods = in.Pods + out.Services = in.Services + out.ShootDefaults = (*ShootNetworks)(unsafe.Pointer(in.ShootDefaults)) + out.BlockCIDRs = *(*[]string)(unsafe.Pointer(&in.BlockCIDRs)) + return nil +} + +// Convert_core_SeedNetworks_To_v1beta1_SeedNetworks is an autogenerated conversion function. +func Convert_core_SeedNetworks_To_v1beta1_SeedNetworks(in *core.SeedNetworks, out *SeedNetworks, s conversion.Scope) error { + return autoConvert_core_SeedNetworks_To_v1beta1_SeedNetworks(in, out, s) +} + +func autoConvert_v1beta1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *core.SeedProvider, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_v1beta1_SeedProvider_To_core_SeedProvider is an autogenerated conversion function. +func Convert_v1beta1_SeedProvider_To_core_SeedProvider(in *SeedProvider, out *core.SeedProvider, s conversion.Scope) error { + return autoConvert_v1beta1_SeedProvider_To_core_SeedProvider(in, out, s) +} + +func autoConvert_core_SeedProvider_To_v1beta1_SeedProvider(in *core.SeedProvider, out *SeedProvider, s conversion.Scope) error { + out.Type = in.Type + out.Region = in.Region + return nil +} + +// Convert_core_SeedProvider_To_v1beta1_SeedProvider is an autogenerated conversion function. +func Convert_core_SeedProvider_To_v1beta1_SeedProvider(in *core.SeedProvider, out *SeedProvider, s conversion.Scope) error { + return autoConvert_core_SeedProvider_To_v1beta1_SeedProvider(in, out, s) +} + +func autoConvert_v1beta1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedSpec, s conversion.Scope) error { + out.Backup = (*core.SeedBackup)(unsafe.Pointer(in.Backup)) + if err := Convert_v1beta1_SeedDNS_To_core_SeedDNS(&in.DNS, &out.DNS, s); err != nil { + return err + } + if err := Convert_v1beta1_SeedNetworks_To_core_SeedNetworks(&in.Networks, &out.Networks, s); err != nil { + return err + } + if err := Convert_v1beta1_SeedProvider_To_core_SeedProvider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.Taints = *(*[]core.SeedTaint)(unsafe.Pointer(&in.Taints)) + out.Volume = (*core.SeedVolume)(unsafe.Pointer(in.Volume)) + return nil +} + +// Convert_v1beta1_SeedSpec_To_core_SeedSpec is an autogenerated conversion function. +func Convert_v1beta1_SeedSpec_To_core_SeedSpec(in *SeedSpec, out *core.SeedSpec, s conversion.Scope) error { + return autoConvert_v1beta1_SeedSpec_To_core_SeedSpec(in, out, s) +} + +func autoConvert_core_SeedSpec_To_v1beta1_SeedSpec(in *core.SeedSpec, out *SeedSpec, s conversion.Scope) error { + out.Backup = (*SeedBackup)(unsafe.Pointer(in.Backup)) + if err := Convert_core_SeedDNS_To_v1beta1_SeedDNS(&in.DNS, &out.DNS, s); err != nil { + return err + } + if err := Convert_core_SeedNetworks_To_v1beta1_SeedNetworks(&in.Networks, &out.Networks, s); err != nil { + return err + } + if err := Convert_core_SeedProvider_To_v1beta1_SeedProvider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.Taints = *(*[]SeedTaint)(unsafe.Pointer(&in.Taints)) + out.Volume = (*SeedVolume)(unsafe.Pointer(in.Volume)) + return nil +} + +// Convert_core_SeedSpec_To_v1beta1_SeedSpec is an autogenerated conversion function. +func Convert_core_SeedSpec_To_v1beta1_SeedSpec(in *core.SeedSpec, out *SeedSpec, s conversion.Scope) error { + return autoConvert_core_SeedSpec_To_v1beta1_SeedSpec(in, out, s) +} + +func autoConvert_v1beta1_SeedStatus_To_core_SeedStatus(in *SeedStatus, out *core.SeedStatus, s conversion.Scope) error { + out.Gardener = (*core.Gardener)(unsafe.Pointer(in.Gardener)) + out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion)) + out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +// Convert_v1beta1_SeedStatus_To_core_SeedStatus is an autogenerated conversion function. +func Convert_v1beta1_SeedStatus_To_core_SeedStatus(in *SeedStatus, out *core.SeedStatus, s conversion.Scope) error { + return autoConvert_v1beta1_SeedStatus_To_core_SeedStatus(in, out, s) +} + +func autoConvert_core_SeedStatus_To_v1beta1_SeedStatus(in *core.SeedStatus, out *SeedStatus, s conversion.Scope) error { + out.Gardener = (*Gardener)(unsafe.Pointer(in.Gardener)) + out.KubernetesVersion = (*string)(unsafe.Pointer(in.KubernetesVersion)) + out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +// Convert_core_SeedStatus_To_v1beta1_SeedStatus is an autogenerated conversion function. +func Convert_core_SeedStatus_To_v1beta1_SeedStatus(in *core.SeedStatus, out *SeedStatus, s conversion.Scope) error { + return autoConvert_core_SeedStatus_To_v1beta1_SeedStatus(in, out, s) +} + +func autoConvert_v1beta1_SeedTaint_To_core_SeedTaint(in *SeedTaint, out *core.SeedTaint, s conversion.Scope) error { + out.Key = in.Key + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_v1beta1_SeedTaint_To_core_SeedTaint is an autogenerated conversion function. +func Convert_v1beta1_SeedTaint_To_core_SeedTaint(in *SeedTaint, out *core.SeedTaint, s conversion.Scope) error { + return autoConvert_v1beta1_SeedTaint_To_core_SeedTaint(in, out, s) +} + +func autoConvert_core_SeedTaint_To_v1beta1_SeedTaint(in *core.SeedTaint, out *SeedTaint, s conversion.Scope) error { + out.Key = in.Key + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_core_SeedTaint_To_v1beta1_SeedTaint is an autogenerated conversion function. +func Convert_core_SeedTaint_To_v1beta1_SeedTaint(in *core.SeedTaint, out *SeedTaint, s conversion.Scope) error { + return autoConvert_core_SeedTaint_To_v1beta1_SeedTaint(in, out, s) +} + +func autoConvert_v1beta1_SeedVolume_To_core_SeedVolume(in *SeedVolume, out *core.SeedVolume, s conversion.Scope) error { + out.MinimumSize = (*resource.Quantity)(unsafe.Pointer(in.MinimumSize)) + out.Providers = *(*[]core.SeedVolumeProvider)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_v1beta1_SeedVolume_To_core_SeedVolume is an autogenerated conversion function. +func Convert_v1beta1_SeedVolume_To_core_SeedVolume(in *SeedVolume, out *core.SeedVolume, s conversion.Scope) error { + return autoConvert_v1beta1_SeedVolume_To_core_SeedVolume(in, out, s) +} + +func autoConvert_core_SeedVolume_To_v1beta1_SeedVolume(in *core.SeedVolume, out *SeedVolume, s conversion.Scope) error { + out.MinimumSize = (*resource.Quantity)(unsafe.Pointer(in.MinimumSize)) + out.Providers = *(*[]SeedVolumeProvider)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_core_SeedVolume_To_v1beta1_SeedVolume is an autogenerated conversion function. +func Convert_core_SeedVolume_To_v1beta1_SeedVolume(in *core.SeedVolume, out *SeedVolume, s conversion.Scope) error { + return autoConvert_core_SeedVolume_To_v1beta1_SeedVolume(in, out, s) +} + +func autoConvert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider(in *SeedVolumeProvider, out *core.SeedVolumeProvider, s conversion.Scope) error { + out.Purpose = in.Purpose + out.Name = in.Name + return nil +} + +// Convert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider is an autogenerated conversion function. +func Convert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider(in *SeedVolumeProvider, out *core.SeedVolumeProvider, s conversion.Scope) error { + return autoConvert_v1beta1_SeedVolumeProvider_To_core_SeedVolumeProvider(in, out, s) +} + +func autoConvert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider(in *core.SeedVolumeProvider, out *SeedVolumeProvider, s conversion.Scope) error { + out.Purpose = in.Purpose + out.Name = in.Name + return nil +} + +// Convert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider is an autogenerated conversion function. +func Convert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider(in *core.SeedVolumeProvider, out *SeedVolumeProvider, s conversion.Scope) error { + return autoConvert_core_SeedVolumeProvider_To_v1beta1_SeedVolumeProvider(in, out, s) +} + +func autoConvert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig(in *ServiceAccountConfig, out *core.ServiceAccountConfig, s conversion.Scope) error { + out.Issuer = (*string)(unsafe.Pointer(in.Issuer)) + out.SigningKeySecret = (*v1.LocalObjectReference)(unsafe.Pointer(in.SigningKeySecret)) + return nil +} + +// Convert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig is an autogenerated conversion function. +func Convert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig(in *ServiceAccountConfig, out *core.ServiceAccountConfig, s conversion.Scope) error { + return autoConvert_v1beta1_ServiceAccountConfig_To_core_ServiceAccountConfig(in, out, s) +} + +func autoConvert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig(in *core.ServiceAccountConfig, out *ServiceAccountConfig, s conversion.Scope) error { + out.Issuer = (*string)(unsafe.Pointer(in.Issuer)) + out.SigningKeySecret = (*v1.LocalObjectReference)(unsafe.Pointer(in.SigningKeySecret)) + return nil +} + +// Convert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig is an autogenerated conversion function. +func Convert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig(in *core.ServiceAccountConfig, out *ServiceAccountConfig, s conversion.Scope) error { + return autoConvert_core_ServiceAccountConfig_To_v1beta1_ServiceAccountConfig(in, out, s) +} + +func autoConvert_v1beta1_Shoot_To_core_Shoot(in *Shoot, out *core.Shoot, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ShootSpec_To_core_ShootSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ShootStatus_To_core_ShootStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Shoot_To_core_Shoot is an autogenerated conversion function. +func Convert_v1beta1_Shoot_To_core_Shoot(in *Shoot, out *core.Shoot, s conversion.Scope) error { + return autoConvert_v1beta1_Shoot_To_core_Shoot(in, out, s) +} + +func autoConvert_core_Shoot_To_v1beta1_Shoot(in *core.Shoot, out *Shoot, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ShootSpec_To_v1beta1_ShootSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ShootStatus_To_v1beta1_ShootStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Shoot_To_v1beta1_Shoot is an autogenerated conversion function. +func Convert_core_Shoot_To_v1beta1_Shoot(in *core.Shoot, out *Shoot, s conversion.Scope) error { + return autoConvert_core_Shoot_To_v1beta1_Shoot(in, out, s) +} + +func autoConvert_v1beta1_ShootList_To_core_ShootList(in *ShootList, out *core.ShootList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Shoot)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_ShootList_To_core_ShootList is an autogenerated conversion function. +func Convert_v1beta1_ShootList_To_core_ShootList(in *ShootList, out *core.ShootList, s conversion.Scope) error { + return autoConvert_v1beta1_ShootList_To_core_ShootList(in, out, s) +} + +func autoConvert_core_ShootList_To_v1beta1_ShootList(in *core.ShootList, out *ShootList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Shoot)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ShootList_To_v1beta1_ShootList is an autogenerated conversion function. +func Convert_core_ShootList_To_v1beta1_ShootList(in *core.ShootList, out *ShootList, s conversion.Scope) error { + return autoConvert_core_ShootList_To_v1beta1_ShootList(in, out, s) +} + +func autoConvert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachineImage, out *core.ShootMachineImage, s conversion.Scope) error { + out.Name = in.Name + out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Version = in.Version + return nil +} + +// Convert_v1beta1_ShootMachineImage_To_core_ShootMachineImage is an autogenerated conversion function. +func Convert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(in *ShootMachineImage, out *core.ShootMachineImage, s conversion.Scope) error { + return autoConvert_v1beta1_ShootMachineImage_To_core_ShootMachineImage(in, out, s) +} + +func autoConvert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(in *core.ShootMachineImage, out *ShootMachineImage, s conversion.Scope) error { + out.Name = in.Name + out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Version = in.Version + return nil +} + +// Convert_core_ShootMachineImage_To_v1beta1_ShootMachineImage is an autogenerated conversion function. +func Convert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(in *core.ShootMachineImage, out *ShootMachineImage, s conversion.Scope) error { + return autoConvert_core_ShootMachineImage_To_v1beta1_ShootMachineImage(in, out, s) +} + +func autoConvert_v1beta1_ShootNetworks_To_core_ShootNetworks(in *ShootNetworks, out *core.ShootNetworks, s conversion.Scope) error { + out.Pods = (*string)(unsafe.Pointer(in.Pods)) + out.Services = (*string)(unsafe.Pointer(in.Services)) + return nil +} + +// Convert_v1beta1_ShootNetworks_To_core_ShootNetworks is an autogenerated conversion function. +func Convert_v1beta1_ShootNetworks_To_core_ShootNetworks(in *ShootNetworks, out *core.ShootNetworks, s conversion.Scope) error { + return autoConvert_v1beta1_ShootNetworks_To_core_ShootNetworks(in, out, s) +} + +func autoConvert_core_ShootNetworks_To_v1beta1_ShootNetworks(in *core.ShootNetworks, out *ShootNetworks, s conversion.Scope) error { + out.Pods = (*string)(unsafe.Pointer(in.Pods)) + out.Services = (*string)(unsafe.Pointer(in.Services)) + return nil +} + +// Convert_core_ShootNetworks_To_v1beta1_ShootNetworks is an autogenerated conversion function. +func Convert_core_ShootNetworks_To_v1beta1_ShootNetworks(in *core.ShootNetworks, out *ShootNetworks, s conversion.Scope) error { + return autoConvert_core_ShootNetworks_To_v1beta1_ShootNetworks(in, out, s) +} + +func autoConvert_v1beta1_ShootSpec_To_core_ShootSpec(in *ShootSpec, out *core.ShootSpec, s conversion.Scope) error { + out.Addons = (*core.Addons)(unsafe.Pointer(in.Addons)) + out.CloudProfileName = in.CloudProfileName + out.DNS = (*core.DNS)(unsafe.Pointer(in.DNS)) + out.Extensions = *(*[]core.Extension)(unsafe.Pointer(&in.Extensions)) + out.Hibernation = (*core.Hibernation)(unsafe.Pointer(in.Hibernation)) + if err := Convert_v1beta1_Kubernetes_To_core_Kubernetes(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + if err := Convert_v1beta1_Networking_To_core_Networking(&in.Networking, &out.Networking, s); err != nil { + return err + } + out.Maintenance = (*core.Maintenance)(unsafe.Pointer(in.Maintenance)) + out.Monitoring = (*core.Monitoring)(unsafe.Pointer(in.Monitoring)) + if err := Convert_v1beta1_Provider_To_core_Provider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.Purpose = (*core.ShootPurpose)(unsafe.Pointer(in.Purpose)) + out.Region = in.Region + out.SecretBindingName = in.SecretBindingName + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + return nil +} + +// Convert_v1beta1_ShootSpec_To_core_ShootSpec is an autogenerated conversion function. +func Convert_v1beta1_ShootSpec_To_core_ShootSpec(in *ShootSpec, out *core.ShootSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ShootSpec_To_core_ShootSpec(in, out, s) +} + +func autoConvert_core_ShootSpec_To_v1beta1_ShootSpec(in *core.ShootSpec, out *ShootSpec, s conversion.Scope) error { + out.Addons = (*Addons)(unsafe.Pointer(in.Addons)) + out.CloudProfileName = in.CloudProfileName + out.DNS = (*DNS)(unsafe.Pointer(in.DNS)) + out.Extensions = *(*[]Extension)(unsafe.Pointer(&in.Extensions)) + out.Hibernation = (*Hibernation)(unsafe.Pointer(in.Hibernation)) + if err := Convert_core_Kubernetes_To_v1beta1_Kubernetes(&in.Kubernetes, &out.Kubernetes, s); err != nil { + return err + } + if err := Convert_core_Networking_To_v1beta1_Networking(&in.Networking, &out.Networking, s); err != nil { + return err + } + out.Maintenance = (*Maintenance)(unsafe.Pointer(in.Maintenance)) + out.Monitoring = (*Monitoring)(unsafe.Pointer(in.Monitoring)) + if err := Convert_core_Provider_To_v1beta1_Provider(&in.Provider, &out.Provider, s); err != nil { + return err + } + out.Purpose = (*ShootPurpose)(unsafe.Pointer(in.Purpose)) + out.Region = in.Region + out.SecretBindingName = in.SecretBindingName + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + return nil +} + +// Convert_core_ShootSpec_To_v1beta1_ShootSpec is an autogenerated conversion function. +func Convert_core_ShootSpec_To_v1beta1_ShootSpec(in *core.ShootSpec, out *ShootSpec, s conversion.Scope) error { + return autoConvert_core_ShootSpec_To_v1beta1_ShootSpec(in, out, s) +} + +func autoConvert_v1beta1_ShootStatus_To_core_ShootStatus(in *ShootStatus, out *core.ShootStatus, s conversion.Scope) error { + out.Conditions = *(*[]core.Condition)(unsafe.Pointer(&in.Conditions)) + out.Constraints = *(*[]core.Condition)(unsafe.Pointer(&in.Constraints)) + if err := Convert_v1beta1_Gardener_To_core_Gardener(&in.Gardener, &out.Gardener, s); err != nil { + return err + } + out.IsHibernated = in.IsHibernated + out.LastOperation = (*core.LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastErrors = *(*[]core.LastError)(unsafe.Pointer(&in.LastErrors)) + out.ObservedGeneration = in.ObservedGeneration + out.RetryCycleStartTime = (*metav1.Time)(unsafe.Pointer(in.RetryCycleStartTime)) + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + out.TechnicalID = in.TechnicalID + out.UID = types.UID(in.UID) + return nil +} + +// Convert_v1beta1_ShootStatus_To_core_ShootStatus is an autogenerated conversion function. +func Convert_v1beta1_ShootStatus_To_core_ShootStatus(in *ShootStatus, out *core.ShootStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ShootStatus_To_core_ShootStatus(in, out, s) +} + +func autoConvert_core_ShootStatus_To_v1beta1_ShootStatus(in *core.ShootStatus, out *ShootStatus, s conversion.Scope) error { + out.Conditions = *(*[]Condition)(unsafe.Pointer(&in.Conditions)) + out.Constraints = *(*[]Condition)(unsafe.Pointer(&in.Constraints)) + if err := Convert_core_Gardener_To_v1beta1_Gardener(&in.Gardener, &out.Gardener, s); err != nil { + return err + } + out.IsHibernated = in.IsHibernated + out.LastOperation = (*LastOperation)(unsafe.Pointer(in.LastOperation)) + out.LastErrors = *(*[]LastError)(unsafe.Pointer(&in.LastErrors)) + out.ObservedGeneration = in.ObservedGeneration + out.RetryCycleStartTime = (*metav1.Time)(unsafe.Pointer(in.RetryCycleStartTime)) + out.SeedName = (*string)(unsafe.Pointer(in.SeedName)) + out.TechnicalID = in.TechnicalID + out.UID = types.UID(in.UID) + return nil +} + +// Convert_core_ShootStatus_To_v1beta1_ShootStatus is an autogenerated conversion function. +func Convert_core_ShootStatus_To_v1beta1_ShootStatus(in *core.ShootStatus, out *ShootStatus, s conversion.Scope) error { + return autoConvert_core_ShootStatus_To_v1beta1_ShootStatus(in, out, s) +} + +func autoConvert_v1beta1_Volume_To_core_Volume(in *Volume, out *core.Volume, s conversion.Scope) error { + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.Size = in.Size + out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted)) + return nil +} + +// Convert_v1beta1_Volume_To_core_Volume is an autogenerated conversion function. +func Convert_v1beta1_Volume_To_core_Volume(in *Volume, out *core.Volume, s conversion.Scope) error { + return autoConvert_v1beta1_Volume_To_core_Volume(in, out, s) +} + +func autoConvert_core_Volume_To_v1beta1_Volume(in *core.Volume, out *Volume, s conversion.Scope) error { + out.Name = (*string)(unsafe.Pointer(in.Name)) + out.Type = (*string)(unsafe.Pointer(in.Type)) + out.Size = in.Size + out.Encrypted = (*bool)(unsafe.Pointer(in.Encrypted)) + return nil +} + +// Convert_core_Volume_To_v1beta1_Volume is an autogenerated conversion function. +func Convert_core_Volume_To_v1beta1_Volume(in *core.Volume, out *Volume, s conversion.Scope) error { + return autoConvert_core_Volume_To_v1beta1_Volume(in, out, s) +} + +func autoConvert_v1beta1_VolumeType_To_core_VolumeType(in *VolumeType, out *core.VolumeType, s conversion.Scope) error { + out.Class = in.Class + out.Name = in.Name + out.Usable = (*bool)(unsafe.Pointer(in.Usable)) + return nil +} + +// Convert_v1beta1_VolumeType_To_core_VolumeType is an autogenerated conversion function. +func Convert_v1beta1_VolumeType_To_core_VolumeType(in *VolumeType, out *core.VolumeType, s conversion.Scope) error { + return autoConvert_v1beta1_VolumeType_To_core_VolumeType(in, out, s) +} + +func autoConvert_core_VolumeType_To_v1beta1_VolumeType(in *core.VolumeType, out *VolumeType, s conversion.Scope) error { + out.Class = in.Class + out.Name = in.Name + out.Usable = (*bool)(unsafe.Pointer(in.Usable)) + return nil +} + +// Convert_core_VolumeType_To_v1beta1_VolumeType is an autogenerated conversion function. +func Convert_core_VolumeType_To_v1beta1_VolumeType(in *core.VolumeType, out *VolumeType, s conversion.Scope) error { + return autoConvert_core_VolumeType_To_v1beta1_VolumeType(in, out, s) +} + +func autoConvert_v1beta1_Worker_To_core_Worker(in *Worker, out *core.Worker, s conversion.Scope) error { + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.Kubernetes = (*core.WorkerKubernetes)(unsafe.Pointer(in.Kubernetes)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Name = in.Name + if err := Convert_v1beta1_Machine_To_core_Machine(&in.Machine, &out.Machine, s); err != nil { + return err + } + out.Maximum = in.Maximum + out.Minimum = in.Minimum + out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge)) + out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable)) + out.ProviderConfig = (*core.ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.Volume = (*core.Volume)(unsafe.Pointer(in.Volume)) + out.DataVolumes = *(*[]core.Volume)(unsafe.Pointer(&in.DataVolumes)) + out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName)) + out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) + return nil +} + +// Convert_v1beta1_Worker_To_core_Worker is an autogenerated conversion function. +func Convert_v1beta1_Worker_To_core_Worker(in *Worker, out *core.Worker, s conversion.Scope) error { + return autoConvert_v1beta1_Worker_To_core_Worker(in, out, s) +} + +func autoConvert_core_Worker_To_v1beta1_Worker(in *core.Worker, out *Worker, s conversion.Scope) error { + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.CABundle = (*string)(unsafe.Pointer(in.CABundle)) + out.Kubernetes = (*WorkerKubernetes)(unsafe.Pointer(in.Kubernetes)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Name = in.Name + if err := Convert_core_Machine_To_v1beta1_Machine(&in.Machine, &out.Machine, s); err != nil { + return err + } + out.Maximum = in.Maximum + out.Minimum = in.Minimum + out.MaxSurge = (*intstr.IntOrString)(unsafe.Pointer(in.MaxSurge)) + out.MaxUnavailable = (*intstr.IntOrString)(unsafe.Pointer(in.MaxUnavailable)) + out.ProviderConfig = (*ProviderConfig)(unsafe.Pointer(in.ProviderConfig)) + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.Volume = (*Volume)(unsafe.Pointer(in.Volume)) + out.DataVolumes = *(*[]Volume)(unsafe.Pointer(&in.DataVolumes)) + out.KubeletDataVolumeName = (*string)(unsafe.Pointer(in.KubeletDataVolumeName)) + out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) + return nil +} + +// Convert_core_Worker_To_v1beta1_Worker is an autogenerated conversion function. +func Convert_core_Worker_To_v1beta1_Worker(in *core.Worker, out *Worker, s conversion.Scope) error { + return autoConvert_core_Worker_To_v1beta1_Worker(in, out, s) +} + +func autoConvert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes(in *WorkerKubernetes, out *core.WorkerKubernetes, s conversion.Scope) error { + out.Kubelet = (*core.KubeletConfig)(unsafe.Pointer(in.Kubelet)) + return nil +} + +// Convert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes is an autogenerated conversion function. +func Convert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes(in *WorkerKubernetes, out *core.WorkerKubernetes, s conversion.Scope) error { + return autoConvert_v1beta1_WorkerKubernetes_To_core_WorkerKubernetes(in, out, s) +} + +func autoConvert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(in *core.WorkerKubernetes, out *WorkerKubernetes, s conversion.Scope) error { + out.Kubelet = (*KubeletConfig)(unsafe.Pointer(in.Kubelet)) + return nil +} + +// Convert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes is an autogenerated conversion function. +func Convert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(in *core.WorkerKubernetes, out *WorkerKubernetes, s conversion.Scope) error { + return autoConvert_core_WorkerKubernetes_To_v1beta1_WorkerKubernetes(in, out, s) +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..162d9a15d --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,3180 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addon) DeepCopyInto(out *Addon) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon. +func (in *Addon) DeepCopy() *Addon { + if in == nil { + return nil + } + out := new(Addon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addons) DeepCopyInto(out *Addons) { + *out = *in + if in.KubernetesDashboard != nil { + in, out := &in.KubernetesDashboard, &out.KubernetesDashboard + *out = new(KubernetesDashboard) + (*in).DeepCopyInto(*out) + } + if in.NginxIngress != nil { + in, out := &in.NginxIngress, &out.NginxIngress + *out = new(NginxIngress) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addons. +func (in *Addons) DeepCopy() *Addons { + if in == nil { + return nil + } + out := new(Addons) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPlugin) DeepCopyInto(out *AdmissionPlugin) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPlugin. +func (in *AdmissionPlugin) DeepCopy() *AdmissionPlugin { + if in == nil { + return nil + } + out := new(AdmissionPlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Alerting) DeepCopyInto(out *Alerting) { + *out = *in + if in.EmailReceivers != nil { + in, out := &in.EmailReceivers, &out.EmailReceivers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alerting. +func (in *Alerting) DeepCopy() *Alerting { + if in == nil { + return nil + } + out := new(Alerting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + if in.AuditPolicy != nil { + in, out := &in.AuditPolicy, &out.AuditPolicy + *out = new(AuditPolicy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditPolicy) DeepCopyInto(out *AuditPolicy) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(v1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditPolicy. +func (in *AuditPolicy) DeepCopy() *AuditPolicy { + if in == nil { + return nil + } + out := new(AuditPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailabilityZone) DeepCopyInto(out *AvailabilityZone) { + *out = *in + if in.UnavailableMachineTypes != nil { + in, out := &in.UnavailableMachineTypes, &out.UnavailableMachineTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UnavailableVolumeTypes != nil { + in, out := &in.UnavailableVolumeTypes, &out.UnavailableVolumeTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailabilityZone. +func (in *AvailabilityZone) DeepCopy() *AvailabilityZone { + if in == nil { + return nil + } + out := new(AvailabilityZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucket) DeepCopyInto(out *BackupBucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucket. +func (in *BackupBucket) DeepCopy() *BackupBucket { + if in == nil { + return nil + } + out := new(BackupBucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupBucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketList) DeepCopyInto(out *BackupBucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupBucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketList. +func (in *BackupBucketList) DeepCopy() *BackupBucketList { + if in == nil { + return nil + } + out := new(BackupBucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupBucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketProvider) DeepCopyInto(out *BackupBucketProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketProvider. +func (in *BackupBucketProvider) DeepCopy() *BackupBucketProvider { + if in == nil { + return nil + } + out := new(BackupBucketProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) { + *out = *in + out.Provider = in.Provider + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + out.SecretRef = in.SecretRef + if in.SeedName != nil { + in, out := &in.SeedName, &out.SeedName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketSpec. +func (in *BackupBucketSpec) DeepCopy() *BackupBucketSpec { + if in == nil { + return nil + } + out := new(BackupBucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) { + *out = *in + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.LastError != nil { + in, out := &in.LastError, &out.LastError + *out = new(LastError) + (*in).DeepCopyInto(*out) + } + if in.GeneratedSecretRef != nil { + in, out := &in.GeneratedSecretRef, &out.GeneratedSecretRef + *out = new(v1.SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketStatus. +func (in *BackupBucketStatus) DeepCopy() *BackupBucketStatus { + if in == nil { + return nil + } + out := new(BackupBucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntry) DeepCopyInto(out *BackupEntry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntry. +func (in *BackupEntry) DeepCopy() *BackupEntry { + if in == nil { + return nil + } + out := new(BackupEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupEntry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryList) DeepCopyInto(out *BackupEntryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryList. +func (in *BackupEntryList) DeepCopy() *BackupEntryList { + if in == nil { + return nil + } + out := new(BackupEntryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupEntryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntrySpec) DeepCopyInto(out *BackupEntrySpec) { + *out = *in + if in.SeedName != nil { + in, out := &in.SeedName, &out.SeedName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntrySpec. +func (in *BackupEntrySpec) DeepCopy() *BackupEntrySpec { + if in == nil { + return nil + } + out := new(BackupEntrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryStatus) DeepCopyInto(out *BackupEntryStatus) { + *out = *in + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.LastError != nil { + in, out := &in.LastError, &out.LastError + *out = new(LastError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryStatus. +func (in *BackupEntryStatus) DeepCopy() *BackupEntryStatus { + if in == nil { + return nil + } + out := new(BackupEntryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudInfo) DeepCopyInto(out *CloudInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInfo. +func (in *CloudInfo) DeepCopy() *CloudInfo { + if in == nil { + return nil + } + out := new(CloudInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudProfile) DeepCopyInto(out *CloudProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfile. +func (in *CloudProfile) DeepCopy() *CloudProfile { + if in == nil { + return nil + } + out := new(CloudProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudProfileList) DeepCopyInto(out *CloudProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileList. +func (in *CloudProfileList) DeepCopy() *CloudProfileList { + if in == nil { + return nil + } + out := new(CloudProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + in.Kubernetes.DeepCopyInto(&out.Kubernetes) + if in.MachineImages != nil { + in, out := &in.MachineImages, &out.MachineImages + *out = make([]MachineImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MachineTypes != nil { + in, out := &in.MachineTypes, &out.MachineTypes + *out = make([]MachineType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]Region, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.VolumeTypes != nil { + in, out := &in.VolumeTypes, &out.VolumeTypes + *out = make([]VolumeType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileSpec. +func (in *CloudProfileSpec) DeepCopy() *CloudProfileSpec { + if in == nil { + return nil + } + out := new(CloudProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAutoscaler) DeepCopyInto(out *ClusterAutoscaler) { + *out = *in + if in.ScaleDownDelayAfterAdd != nil { + in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownDelayAfterDelete != nil { + in, out := &in.ScaleDownDelayAfterDelete, &out.ScaleDownDelayAfterDelete + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownDelayAfterFailure != nil { + in, out := &in.ScaleDownDelayAfterFailure, &out.ScaleDownDelayAfterFailure + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownUnneededTime != nil { + in, out := &in.ScaleDownUnneededTime, &out.ScaleDownUnneededTime + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownUtilizationThreshold != nil { + in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold + *out = new(float64) + **out = **in + } + if in.ScanInterval != nil { + in, out := &in.ScanInterval, &out.ScanInterval + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaler. +func (in *ClusterAutoscaler) DeepCopy() *ClusterAutoscaler { + if in == nil { + return nil + } + out := new(ClusterAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInfo) DeepCopyInto(out *ClusterInfo) { + *out = *in + out.Cloud = in.Cloud + out.Kubernetes = in.Kubernetes + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInfo. +func (in *ClusterInfo) DeepCopy() *ClusterInfo { + if in == nil { + return nil + } + out := new(ClusterInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerDeployment) DeepCopyInto(out *ControllerDeployment) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerDeployment. +func (in *ControllerDeployment) DeepCopy() *ControllerDeployment { + if in == nil { + return nil + } + out := new(ControllerDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallation) DeepCopyInto(out *ControllerInstallation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallation. +func (in *ControllerInstallation) DeepCopy() *ControllerInstallation { + if in == nil { + return nil + } + out := new(ControllerInstallation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerInstallation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationList) DeepCopyInto(out *ControllerInstallationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerInstallation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationList. +func (in *ControllerInstallationList) DeepCopy() *ControllerInstallationList { + if in == nil { + return nil + } + out := new(ControllerInstallationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerInstallationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationSpec) DeepCopyInto(out *ControllerInstallationSpec) { + *out = *in + out.RegistrationRef = in.RegistrationRef + out.SeedRef = in.SeedRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationSpec. +func (in *ControllerInstallationSpec) DeepCopy() *ControllerInstallationSpec { + if in == nil { + return nil + } + out := new(ControllerInstallationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationStatus) DeepCopyInto(out *ControllerInstallationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationStatus. +func (in *ControllerInstallationStatus) DeepCopy() *ControllerInstallationStatus { + if in == nil { + return nil + } + out := new(ControllerInstallationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRegistration) DeepCopyInto(out *ControllerRegistration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistration. +func (in *ControllerRegistration) DeepCopy() *ControllerRegistration { + if in == nil { + return nil + } + out := new(ControllerRegistration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRegistration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRegistrationList) DeepCopyInto(out *ControllerRegistrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRegistration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationList. +func (in *ControllerRegistrationList) DeepCopy() *ControllerRegistrationList { + if in == nil { + return nil + } + out := new(ControllerRegistrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRegistrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRegistrationSpec) DeepCopyInto(out *ControllerRegistrationSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ControllerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Deployment != nil { + in, out := &in.Deployment, &out.Deployment + *out = new(ControllerDeployment) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationSpec. +func (in *ControllerRegistrationSpec) DeepCopy() *ControllerRegistrationSpec { + if in == nil { + return nil + } + out := new(ControllerRegistrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerResource) DeepCopyInto(out *ControllerResource) { + *out = *in + if in.GloballyEnabled != nil { + in, out := &in.GloballyEnabled, &out.GloballyEnabled + *out = new(bool) + **out = **in + } + if in.ReconcileTimeout != nil { + in, out := &in.ReconcileTimeout, &out.ReconcileTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerResource. +func (in *ControllerResource) DeepCopy() *ControllerResource { + if in == nil { + return nil + } + out := new(ControllerResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]DNSProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSIncludeExclude) DeepCopyInto(out *DNSIncludeExclude) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSIncludeExclude. +func (in *DNSIncludeExclude) DeepCopy() *DNSIncludeExclude { + if in == nil { + return nil + } + out := new(DNSIncludeExclude) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSProvider) DeepCopyInto(out *DNSProvider) { + *out = *in + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProvider. +func (in *DNSProvider) DeepCopy() *DNSProvider { + if in == nil { + return nil + } + out := new(DNSProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpirableVersion) DeepCopyInto(out *ExpirableVersion) { + *out = *in + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirableVersion. +func (in *ExpirableVersion) DeepCopy() *ExpirableVersion { + if in == nil { + return nil + } + out := new(ExpirableVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extension) DeepCopyInto(out *Extension) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extension. +func (in *Extension) DeepCopy() *Extension { + if in == nil { + return nil + } + out := new(Extension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gardener) DeepCopyInto(out *Gardener) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gardener. +func (in *Gardener) DeepCopy() *Gardener { + if in == nil { + return nil + } + out := new(Gardener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hibernation) DeepCopyInto(out *Hibernation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Schedules != nil { + in, out := &in.Schedules, &out.Schedules + *out = make([]HibernationSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hibernation. +func (in *Hibernation) DeepCopy() *Hibernation { + if in == nil { + return nil + } + out := new(Hibernation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HibernationSchedule) DeepCopyInto(out *HibernationSchedule) { + *out = *in + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationSchedule. +func (in *HibernationSchedule) DeepCopy() *HibernationSchedule { + if in == nil { + return nil + } + out := new(HibernationSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerConfig) DeepCopyInto(out *HorizontalPodAutoscalerConfig) { + *out = *in + if in.CPUInitializationPeriod != nil { + in, out := &in.CPUInitializationPeriod, &out.CPUInitializationPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.DownscaleDelay != nil { + in, out := &in.DownscaleDelay, &out.DownscaleDelay + *out = new(metav1.Duration) + **out = **in + } + if in.DownscaleStabilization != nil { + in, out := &in.DownscaleStabilization, &out.DownscaleStabilization + *out = new(metav1.Duration) + **out = **in + } + if in.InitialReadinessDelay != nil { + in, out := &in.InitialReadinessDelay, &out.InitialReadinessDelay + *out = new(metav1.Duration) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.Tolerance != nil { + in, out := &in.Tolerance, &out.Tolerance + *out = new(float64) + **out = **in + } + if in.UpscaleDelay != nil { + in, out := &in.UpscaleDelay, &out.UpscaleDelay + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerConfig. +func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConfig { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.AdmissionPlugins != nil { + in, out := &in.AdmissionPlugins, &out.AdmissionPlugins + *out = make([]AdmissionPlugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.APIAudiences != nil { + in, out := &in.APIAudiences, &out.APIAudiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AuditConfig != nil { + in, out := &in.AuditConfig, &out.AuditConfig + *out = new(AuditConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableBasicAuthentication != nil { + in, out := &in.EnableBasicAuthentication, &out.EnableBasicAuthentication + *out = new(bool) + **out = **in + } + if in.OIDCConfig != nil { + in, out := &in.OIDCConfig, &out.OIDCConfig + *out = new(OIDCConfig) + (*in).DeepCopyInto(*out) + } + if in.RuntimeConfig != nil { + in, out := &in.RuntimeConfig, &out.RuntimeConfig + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceAccountConfig != nil { + in, out := &in.ServiceAccountConfig, &out.ServiceAccountConfig + *out = new(ServiceAccountConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig. +func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.HorizontalPodAutoscalerConfig != nil { + in, out := &in.HorizontalPodAutoscalerConfig, &out.HorizontalPodAutoscalerConfig + *out = new(HorizontalPodAutoscalerConfig) + (*in).DeepCopyInto(*out) + } + if in.NodeCIDRMaskSize != nil { + in, out := &in.NodeCIDRMaskSize, &out.NodeCIDRMaskSize + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig. +func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig { + if in == nil { + return nil + } + out := new(KubeControllerManagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyConfig) DeepCopyInto(out *KubeProxyConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(ProxyMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfig. +func (in *KubeProxyConfig) DeepCopy() *KubeProxyConfig { + if in == nil { + return nil + } + out := new(KubeProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfig. +func (in *KubeSchedulerConfig) DeepCopy() *KubeSchedulerConfig { + if in == nil { + return nil + } + out := new(KubeSchedulerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.CPUCFSQuota != nil { + in, out := &in.CPUCFSQuota, &out.CPUCFSQuota + *out = new(bool) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.EvictionHard != nil { + in, out := &in.EvictionHard, &out.EvictionHard + *out = new(KubeletConfigEviction) + (*in).DeepCopyInto(*out) + } + if in.EvictionMaxPodGracePeriod != nil { + in, out := &in.EvictionMaxPodGracePeriod, &out.EvictionMaxPodGracePeriod + *out = new(int32) + **out = **in + } + if in.EvictionMinimumReclaim != nil { + in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim + *out = new(KubeletConfigEvictionMinimumReclaim) + (*in).DeepCopyInto(*out) + } + if in.EvictionPressureTransitionPeriod != nil { + in, out := &in.EvictionPressureTransitionPeriod, &out.EvictionPressureTransitionPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.EvictionSoft != nil { + in, out := &in.EvictionSoft, &out.EvictionSoft + *out = new(KubeletConfigEviction) + (*in).DeepCopyInto(*out) + } + if in.EvictionSoftGracePeriod != nil { + in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod + *out = new(KubeletConfigEvictionSoftGracePeriod) + (*in).DeepCopyInto(*out) + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(int32) + **out = **in + } + if in.PodPIDsLimit != nil { + in, out := &in.PodPIDsLimit, &out.PodPIDsLimit + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfig. +func (in *KubeletConfig) DeepCopy() *KubeletConfig { + if in == nil { + return nil + } + out := new(KubeletConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigEviction) DeepCopyInto(out *KubeletConfigEviction) { + *out = *in + if in.MemoryAvailable != nil { + in, out := &in.MemoryAvailable, &out.MemoryAvailable + *out = new(string) + **out = **in + } + if in.ImageFSAvailable != nil { + in, out := &in.ImageFSAvailable, &out.ImageFSAvailable + *out = new(string) + **out = **in + } + if in.ImageFSInodesFree != nil { + in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree + *out = new(string) + **out = **in + } + if in.NodeFSAvailable != nil { + in, out := &in.NodeFSAvailable, &out.NodeFSAvailable + *out = new(string) + **out = **in + } + if in.NodeFSInodesFree != nil { + in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEviction. +func (in *KubeletConfigEviction) DeepCopy() *KubeletConfigEviction { + if in == nil { + return nil + } + out := new(KubeletConfigEviction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigEvictionMinimumReclaim) DeepCopyInto(out *KubeletConfigEvictionMinimumReclaim) { + *out = *in + if in.MemoryAvailable != nil { + in, out := &in.MemoryAvailable, &out.MemoryAvailable + x := (*in).DeepCopy() + *out = &x + } + if in.ImageFSAvailable != nil { + in, out := &in.ImageFSAvailable, &out.ImageFSAvailable + x := (*in).DeepCopy() + *out = &x + } + if in.ImageFSInodesFree != nil { + in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree + x := (*in).DeepCopy() + *out = &x + } + if in.NodeFSAvailable != nil { + in, out := &in.NodeFSAvailable, &out.NodeFSAvailable + x := (*in).DeepCopy() + *out = &x + } + if in.NodeFSInodesFree != nil { + in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionMinimumReclaim. +func (in *KubeletConfigEvictionMinimumReclaim) DeepCopy() *KubeletConfigEvictionMinimumReclaim { + if in == nil { + return nil + } + out := new(KubeletConfigEvictionMinimumReclaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopyInto(out *KubeletConfigEvictionSoftGracePeriod) { + *out = *in + if in.MemoryAvailable != nil { + in, out := &in.MemoryAvailable, &out.MemoryAvailable + *out = new(metav1.Duration) + **out = **in + } + if in.ImageFSAvailable != nil { + in, out := &in.ImageFSAvailable, &out.ImageFSAvailable + *out = new(metav1.Duration) + **out = **in + } + if in.ImageFSInodesFree != nil { + in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree + *out = new(metav1.Duration) + **out = **in + } + if in.NodeFSAvailable != nil { + in, out := &in.NodeFSAvailable, &out.NodeFSAvailable + *out = new(metav1.Duration) + **out = **in + } + if in.NodeFSInodesFree != nil { + in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionSoftGracePeriod. +func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopy() *KubeletConfigEvictionSoftGracePeriod { + if in == nil { + return nil + } + out := new(KubeletConfigEvictionSoftGracePeriod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kubernetes) DeepCopyInto(out *Kubernetes) { + *out = *in + if in.AllowPrivilegedContainers != nil { + in, out := &in.AllowPrivilegedContainers, &out.AllowPrivilegedContainers + *out = new(bool) + **out = **in + } + if in.ClusterAutoscaler != nil { + in, out := &in.ClusterAutoscaler, &out.ClusterAutoscaler + *out = new(ClusterAutoscaler) + (*in).DeepCopyInto(*out) + } + if in.KubeAPIServer != nil { + in, out := &in.KubeAPIServer, &out.KubeAPIServer + *out = new(KubeAPIServerConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeControllerManager != nil { + in, out := &in.KubeControllerManager, &out.KubeControllerManager + *out = new(KubeControllerManagerConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeScheduler != nil { + in, out := &in.KubeScheduler, &out.KubeScheduler + *out = new(KubeSchedulerConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeProxy != nil { + in, out := &in.KubeProxy, &out.KubeProxy + *out = new(KubeProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.Kubelet != nil { + in, out := &in.Kubelet, &out.Kubelet + *out = new(KubeletConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubernetes. +func (in *Kubernetes) DeepCopy() *Kubernetes { + if in == nil { + return nil + } + out := new(Kubernetes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesConfig) DeepCopyInto(out *KubernetesConfig) { + *out = *in + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesConfig. +func (in *KubernetesConfig) DeepCopy() *KubernetesConfig { + if in == nil { + return nil + } + out := new(KubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesDashboard) DeepCopyInto(out *KubernetesDashboard) { + *out = *in + out.Addon = in.Addon + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesDashboard. +func (in *KubernetesDashboard) DeepCopy() *KubernetesDashboard { + if in == nil { + return nil + } + out := new(KubernetesDashboard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesInfo) DeepCopyInto(out *KubernetesInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesInfo. +func (in *KubernetesInfo) DeepCopy() *KubernetesInfo { + if in == nil { + return nil + } + out := new(KubernetesInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSettings) DeepCopyInto(out *KubernetesSettings) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]ExpirableVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSettings. +func (in *KubernetesSettings) DeepCopy() *KubernetesSettings { + if in == nil { + return nil + } + out := new(KubernetesSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastError) DeepCopyInto(out *LastError) { + *out = *in + if in.TaskID != nil { + in, out := &in.TaskID, &out.TaskID + *out = new(string) + **out = **in + } + if in.Codes != nil { + in, out := &in.Codes, &out.Codes + *out = make([]ErrorCode, len(*in)) + copy(*out, *in) + } + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastError. +func (in *LastError) DeepCopy() *LastError { + if in == nil { + return nil + } + out := new(LastError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastOperation) DeepCopyInto(out *LastOperation) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. +func (in *LastOperation) DeepCopy() *LastOperation { + if in == nil { + return nil + } + out := new(LastOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ShootMachineImage) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineImage) DeepCopyInto(out *MachineImage) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]ExpirableVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImage. +func (in *MachineImage) DeepCopy() *MachineImage { + if in == nil { + return nil + } + out := new(MachineImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineType) DeepCopyInto(out *MachineType) { + *out = *in + out.CPU = in.CPU.DeepCopy() + out.GPU = in.GPU.DeepCopy() + out.Memory = in.Memory.DeepCopy() + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(MachineTypeStorage) + (*in).DeepCopyInto(*out) + } + if in.Usable != nil { + in, out := &in.Usable, &out.Usable + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineType. +func (in *MachineType) DeepCopy() *MachineType { + if in == nil { + return nil + } + out := new(MachineType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTypeStorage) DeepCopyInto(out *MachineTypeStorage) { + *out = *in + out.Size = in.Size.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTypeStorage. +func (in *MachineTypeStorage) DeepCopy() *MachineTypeStorage { + if in == nil { + return nil + } + out := new(MachineTypeStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Maintenance) DeepCopyInto(out *Maintenance) { + *out = *in + if in.AutoUpdate != nil { + in, out := &in.AutoUpdate, &out.AutoUpdate + *out = new(MaintenanceAutoUpdate) + **out = **in + } + if in.TimeWindow != nil { + in, out := &in.TimeWindow, &out.TimeWindow + *out = new(MaintenanceTimeWindow) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintenance. +func (in *Maintenance) DeepCopy() *Maintenance { + if in == nil { + return nil + } + out := new(Maintenance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceAutoUpdate) DeepCopyInto(out *MaintenanceAutoUpdate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceAutoUpdate. +func (in *MaintenanceAutoUpdate) DeepCopy() *MaintenanceAutoUpdate { + if in == nil { + return nil + } + out := new(MaintenanceAutoUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceTimeWindow) DeepCopyInto(out *MaintenanceTimeWindow) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceTimeWindow. +func (in *MaintenanceTimeWindow) DeepCopy() *MaintenanceTimeWindow { + if in == nil { + return nil + } + out := new(MaintenanceTimeWindow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Monitoring) DeepCopyInto(out *Monitoring) { + *out = *in + if in.Alerting != nil { + in, out := &in.Alerting, &out.Alerting + *out = new(Alerting) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring. +func (in *Monitoring) DeepCopy() *Monitoring { + if in == nil { + return nil + } + out := new(Monitoring) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(string) + **out = **in + } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(string) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxIngress) DeepCopyInto(out *NginxIngress) { + *out = *in + out.Addon = in.Addon + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalTrafficPolicy != nil { + in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy + *out = new(v1.ServiceExternalTrafficPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxIngress. +func (in *NginxIngress) DeepCopy() *NginxIngress { + if in == nil { + return nil + } + out := new(NginxIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCConfig) DeepCopyInto(out *OIDCConfig) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(OpenIDConnectClientAuthentication) + (*in).DeepCopyInto(*out) + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.GroupsClaim != nil { + in, out := &in.GroupsClaim, &out.GroupsClaim + *out = new(string) + **out = **in + } + if in.GroupsPrefix != nil { + in, out := &in.GroupsPrefix, &out.GroupsPrefix + *out = new(string) + **out = **in + } + if in.IssuerURL != nil { + in, out := &in.IssuerURL, &out.IssuerURL + *out = new(string) + **out = **in + } + if in.RequiredClaims != nil { + in, out := &in.RequiredClaims, &out.RequiredClaims + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SigningAlgs != nil { + in, out := &in.SigningAlgs, &out.SigningAlgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameClaim != nil { + in, out := &in.UsernameClaim, &out.UsernameClaim + *out = new(string) + **out = **in + } + if in.UsernamePrefix != nil { + in, out := &in.UsernamePrefix, &out.UsernamePrefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCConfig. +func (in *OIDCConfig) DeepCopy() *OIDCConfig { + if in == nil { + return nil + } + out := new(OIDCConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectClientAuthentication) DeepCopyInto(out *OpenIDConnectClientAuthentication) { + *out = *in + if in.ExtraConfig != nil { + in, out := &in.ExtraConfig, &out.ExtraConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectClientAuthentication. +func (in *OpenIDConnectClientAuthentication) DeepCopy() *OpenIDConnectClientAuthentication { + if in == nil { + return nil + } + out := new(OpenIDConnectClientAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plant) DeepCopyInto(out *Plant) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plant. +func (in *Plant) DeepCopy() *Plant { + if in == nil { + return nil + } + out := new(Plant) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Plant) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlantList) DeepCopyInto(out *PlantList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Plant, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantList. +func (in *PlantList) DeepCopy() *PlantList { + if in == nil { + return nil + } + out := new(PlantList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlantList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlantSpec) DeepCopyInto(out *PlantSpec) { + *out = *in + out.SecretRef = in.SecretRef + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantSpec. +func (in *PlantSpec) DeepCopy() *PlantSpec { + if in == nil { + return nil + } + out := new(PlantSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlantStatus) DeepCopyInto(out *PlantStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.ClusterInfo != nil { + in, out := &in.ClusterInfo, &out.ClusterInfo + *out = new(ClusterInfo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantStatus. +func (in *PlantStatus) DeepCopy() *PlantStatus { + if in == nil { + return nil + } + out := new(PlantStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectMember) DeepCopyInto(out *ProjectMember) { + *out = *in + out.Subject = in.Subject + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectMember. +func (in *ProjectMember) DeepCopy() *ProjectMember { + if in == nil { + return nil + } + out := new(ProjectMember) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + if in.CreatedBy != nil { + in, out := &in.CreatedBy, &out.CreatedBy + *out = new(rbacv1.Subject) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(rbacv1.Subject) + **out = **in + } + if in.Purpose != nil { + in, out := &in.Purpose, &out.Purpose + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]ProjectMember, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Provider) DeepCopyInto(out *Provider) { + *out = *in + if in.ControlPlaneConfig != nil { + in, out := &in.ControlPlaneConfig, &out.ControlPlaneConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfig != nil { + in, out := &in.InfrastructureConfig, &out.InfrastructureConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Workers != nil { + in, out := &in.Workers, &out.Workers + *out = make([]Worker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provider. +func (in *Provider) DeepCopy() *Provider { + if in == nil { + return nil + } + out := new(Provider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfig) DeepCopyInto(out *ProviderConfig) { + *out = *in + in.RawExtension.DeepCopyInto(&out.RawExtension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfig. +func (in *ProviderConfig) DeepCopy() *ProviderConfig { + if in == nil { + return nil + } + out := new(ProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Quota) DeepCopyInto(out *Quota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Quota. +func (in *Quota) DeepCopy() *Quota { + if in == nil { + return nil + } + out := new(Quota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Quota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaList) DeepCopyInto(out *QuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Quota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaList. +func (in *QuotaList) DeepCopy() *QuotaList { + if in == nil { + return nil + } + out := new(QuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaSpec) DeepCopyInto(out *QuotaSpec) { + *out = *in + if in.ClusterLifetimeDays != nil { + in, out := &in.ClusterLifetimeDays, &out.ClusterLifetimeDays + *out = new(int) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + out.Scope = in.Scope + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSpec. +func (in *QuotaSpec) DeepCopy() *QuotaSpec { + if in == nil { + return nil + } + out := new(QuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Region) DeepCopyInto(out *Region) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]AvailabilityZone, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Region. +func (in *Region) DeepCopy() *Region { + if in == nil { + return nil + } + out := new(Region) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBinding) DeepCopyInto(out *SecretBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.SecretRef = in.SecretRef + if in.Quotas != nil { + in, out := &in.Quotas, &out.Quotas + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBinding. +func (in *SecretBinding) DeepCopy() *SecretBinding { + if in == nil { + return nil + } + out := new(SecretBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBindingList) DeepCopyInto(out *SecretBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecretBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBindingList. +func (in *SecretBindingList) DeepCopy() *SecretBindingList { + if in == nil { + return nil + } + out := new(SecretBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Seed) DeepCopyInto(out *Seed) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Seed. +func (in *Seed) DeepCopy() *Seed { + if in == nil { + return nil + } + out := new(Seed) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Seed) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedBackup) DeepCopyInto(out *SeedBackup) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + out.SecretRef = in.SecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedBackup. +func (in *SeedBackup) DeepCopy() *SeedBackup { + if in == nil { + return nil + } + out := new(SeedBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedDNS) DeepCopyInto(out *SeedDNS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNS. +func (in *SeedDNS) DeepCopy() *SeedDNS { + if in == nil { + return nil + } + out := new(SeedDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedList) DeepCopyInto(out *SeedList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Seed, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedList. +func (in *SeedList) DeepCopy() *SeedList { + if in == nil { + return nil + } + out := new(SeedList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SeedList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedNetworks) DeepCopyInto(out *SeedNetworks) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(string) + **out = **in + } + if in.ShootDefaults != nil { + in, out := &in.ShootDefaults, &out.ShootDefaults + *out = new(ShootNetworks) + (*in).DeepCopyInto(*out) + } + if in.BlockCIDRs != nil { + in, out := &in.BlockCIDRs, &out.BlockCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedNetworks. +func (in *SeedNetworks) DeepCopy() *SeedNetworks { + if in == nil { + return nil + } + out := new(SeedNetworks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedProvider) DeepCopyInto(out *SeedProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedProvider. +func (in *SeedProvider) DeepCopy() *SeedProvider { + if in == nil { + return nil + } + out := new(SeedProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(SeedBackup) + (*in).DeepCopyInto(*out) + } + out.DNS = in.DNS + in.Networks.DeepCopyInto(&out.Networks) + out.Provider = in.Provider + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.SecretReference) + **out = **in + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]SeedTaint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(SeedVolume) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSpec. +func (in *SeedSpec) DeepCopy() *SeedSpec { + if in == nil { + return nil + } + out := new(SeedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedStatus) DeepCopyInto(out *SeedStatus) { + *out = *in + if in.Gardener != nil { + in, out := &in.Gardener, &out.Gardener + *out = new(Gardener) + **out = **in + } + if in.KubernetesVersion != nil { + in, out := &in.KubernetesVersion, &out.KubernetesVersion + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedStatus. +func (in *SeedStatus) DeepCopy() *SeedStatus { + if in == nil { + return nil + } + out := new(SeedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedTaint) DeepCopyInto(out *SeedTaint) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedTaint. +func (in *SeedTaint) DeepCopy() *SeedTaint { + if in == nil { + return nil + } + out := new(SeedTaint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedVolume) DeepCopyInto(out *SeedVolume) { + *out = *in + if in.MinimumSize != nil { + in, out := &in.MinimumSize, &out.MinimumSize + x := (*in).DeepCopy() + *out = &x + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]SeedVolumeProvider, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolume. +func (in *SeedVolume) DeepCopy() *SeedVolume { + if in == nil { + return nil + } + out := new(SeedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedVolumeProvider) DeepCopyInto(out *SeedVolumeProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolumeProvider. +func (in *SeedVolumeProvider) DeepCopy() *SeedVolumeProvider { + if in == nil { + return nil + } + out := new(SeedVolumeProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountConfig) DeepCopyInto(out *ServiceAccountConfig) { + *out = *in + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.SigningKeySecret != nil { + in, out := &in.SigningKeySecret, &out.SigningKeySecret + *out = new(v1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountConfig. +func (in *ServiceAccountConfig) DeepCopy() *ServiceAccountConfig { + if in == nil { + return nil + } + out := new(ServiceAccountConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Shoot) DeepCopyInto(out *Shoot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shoot. +func (in *Shoot) DeepCopy() *Shoot { + if in == nil { + return nil + } + out := new(Shoot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Shoot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootList) DeepCopyInto(out *ShootList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Shoot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootList. +func (in *ShootList) DeepCopy() *ShootList { + if in == nil { + return nil + } + out := new(ShootList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShootList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootMachineImage) DeepCopyInto(out *ShootMachineImage) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootMachineImage. +func (in *ShootMachineImage) DeepCopy() *ShootMachineImage { + if in == nil { + return nil + } + out := new(ShootMachineImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootNetworks) DeepCopyInto(out *ShootNetworks) { + *out = *in + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(string) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootNetworks. +func (in *ShootNetworks) DeepCopy() *ShootNetworks { + if in == nil { + return nil + } + out := new(ShootNetworks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootSpec) DeepCopyInto(out *ShootSpec) { + *out = *in + if in.Addons != nil { + in, out := &in.Addons, &out.Addons + *out = new(Addons) + (*in).DeepCopyInto(*out) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNS) + (*in).DeepCopyInto(*out) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]Extension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hibernation != nil { + in, out := &in.Hibernation, &out.Hibernation + *out = new(Hibernation) + (*in).DeepCopyInto(*out) + } + in.Kubernetes.DeepCopyInto(&out.Kubernetes) + in.Networking.DeepCopyInto(&out.Networking) + if in.Maintenance != nil { + in, out := &in.Maintenance, &out.Maintenance + *out = new(Maintenance) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(Monitoring) + (*in).DeepCopyInto(*out) + } + in.Provider.DeepCopyInto(&out.Provider) + if in.Purpose != nil { + in, out := &in.Purpose, &out.Purpose + *out = new(ShootPurpose) + **out = **in + } + if in.SeedName != nil { + in, out := &in.SeedName, &out.SeedName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootSpec. +func (in *ShootSpec) DeepCopy() *ShootSpec { + if in == nil { + return nil + } + out := new(ShootSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootStatus) DeepCopyInto(out *ShootStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Constraints != nil { + in, out := &in.Constraints, &out.Constraints + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Gardener = in.Gardener + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.LastErrors != nil { + in, out := &in.LastErrors, &out.LastErrors + *out = make([]LastError, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetryCycleStartTime != nil { + in, out := &in.RetryCycleStartTime, &out.RetryCycleStartTime + *out = (*in).DeepCopy() + } + if in.SeedName != nil { + in, out := &in.SeedName, &out.SeedName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStatus. +func (in *ShootStatus) DeepCopy() *ShootStatus { + if in == nil { + return nil + } + out := new(ShootStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeType) DeepCopyInto(out *VolumeType) { + *out = *in + if in.Usable != nil { + in, out := &in.Usable, &out.Usable + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeType. +func (in *VolumeType) DeepCopy() *VolumeType { + if in == nil { + return nil + } + out := new(VolumeType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Worker) DeepCopyInto(out *Worker) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(WorkerKubernetes) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Machine.DeepCopyInto(&out.Machine) + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(Volume) + (*in).DeepCopyInto(*out) + } + if in.DataVolumes != nil { + in, out := &in.DataVolumes, &out.DataVolumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubeletDataVolumeName != nil { + in, out := &in.KubeletDataVolumeName, &out.KubeletDataVolumeName + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker. +func (in *Worker) DeepCopy() *Worker { + if in == nil { + return nil + } + out := new(Worker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerKubernetes) DeepCopyInto(out *WorkerKubernetes) { + *out = *in + if in.Kubelet != nil { + in, out := &in.Kubelet, &out.Kubelet + *out = new(KubeletConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerKubernetes. +func (in *WorkerKubernetes) DeepCopy() *WorkerKubernetes { + if in == nil { + return nil + } + out := new(WorkerKubernetes) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..214d9743d --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go @@ -0,0 +1,103 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CloudProfile{}, func(obj interface{}) { SetObjectDefaults_CloudProfile(obj.(*CloudProfile)) }) + scheme.AddTypeDefaultingFunc(&CloudProfileList{}, func(obj interface{}) { SetObjectDefaults_CloudProfileList(obj.(*CloudProfileList)) }) + scheme.AddTypeDefaultingFunc(&Project{}, func(obj interface{}) { SetObjectDefaults_Project(obj.(*Project)) }) + scheme.AddTypeDefaultingFunc(&ProjectList{}, func(obj interface{}) { SetObjectDefaults_ProjectList(obj.(*ProjectList)) }) + scheme.AddTypeDefaultingFunc(&SecretBinding{}, func(obj interface{}) { SetObjectDefaults_SecretBinding(obj.(*SecretBinding)) }) + scheme.AddTypeDefaultingFunc(&SecretBindingList{}, func(obj interface{}) { SetObjectDefaults_SecretBindingList(obj.(*SecretBindingList)) }) + scheme.AddTypeDefaultingFunc(&Shoot{}, func(obj interface{}) { SetObjectDefaults_Shoot(obj.(*Shoot)) }) + scheme.AddTypeDefaultingFunc(&ShootList{}, func(obj interface{}) { SetObjectDefaults_ShootList(obj.(*ShootList)) }) + return nil +} + +func SetObjectDefaults_CloudProfile(in *CloudProfile) { + for i := range in.Spec.MachineTypes { + a := &in.Spec.MachineTypes[i] + SetDefaults_MachineType(a) + } + for i := range in.Spec.VolumeTypes { + a := &in.Spec.VolumeTypes[i] + SetDefaults_VolumeType(a) + } +} + +func SetObjectDefaults_CloudProfileList(in *CloudProfileList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CloudProfile(a) + } +} + +func SetObjectDefaults_Project(in *Project) { + SetDefaults_Project(in) +} + +func SetObjectDefaults_ProjectList(in *ProjectList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Project(a) + } +} + +func SetObjectDefaults_SecretBinding(in *SecretBinding) { + SetDefaults_SecretBinding(in) +} + +func SetObjectDefaults_SecretBindingList(in *SecretBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_SecretBinding(a) + } +} + +func SetObjectDefaults_Shoot(in *Shoot) { + SetDefaults_Shoot(in) + if in.Spec.Addons != nil { + if in.Spec.Addons.NginxIngress != nil { + SetDefaults_NginxIngress(in.Spec.Addons.NginxIngress) + } + } + if in.Spec.Maintenance != nil { + SetDefaults_Maintenance(in.Spec.Maintenance) + } + for i := range in.Spec.Provider.Workers { + a := &in.Spec.Provider.Workers[i] + SetDefaults_Worker(a) + } +} + +func SetObjectDefaults_ShootList(in *ShootList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Shoot(a) + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go new file mode 100644 index 000000000..1d17a6038 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go @@ -0,0 +1,3320 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package core + +import ( + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addon) DeepCopyInto(out *Addon) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addon. +func (in *Addon) DeepCopy() *Addon { + if in == nil { + return nil + } + out := new(Addon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addons) DeepCopyInto(out *Addons) { + *out = *in + if in.KubernetesDashboard != nil { + in, out := &in.KubernetesDashboard, &out.KubernetesDashboard + *out = new(KubernetesDashboard) + (*in).DeepCopyInto(*out) + } + if in.NginxIngress != nil { + in, out := &in.NginxIngress, &out.NginxIngress + *out = new(NginxIngress) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addons. +func (in *Addons) DeepCopy() *Addons { + if in == nil { + return nil + } + out := new(Addons) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPlugin) DeepCopyInto(out *AdmissionPlugin) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPlugin. +func (in *AdmissionPlugin) DeepCopy() *AdmissionPlugin { + if in == nil { + return nil + } + out := new(AdmissionPlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Alerting) DeepCopyInto(out *Alerting) { + *out = *in + if in.EmailReceivers != nil { + in, out := &in.EmailReceivers, &out.EmailReceivers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alerting. +func (in *Alerting) DeepCopy() *Alerting { + if in == nil { + return nil + } + out := new(Alerting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + if in.AuditPolicy != nil { + in, out := &in.AuditPolicy, &out.AuditPolicy + *out = new(AuditPolicy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditPolicy) DeepCopyInto(out *AuditPolicy) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(v1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditPolicy. +func (in *AuditPolicy) DeepCopy() *AuditPolicy { + if in == nil { + return nil + } + out := new(AuditPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvailabilityZone) DeepCopyInto(out *AvailabilityZone) { + *out = *in + if in.UnavailableMachineTypes != nil { + in, out := &in.UnavailableMachineTypes, &out.UnavailableMachineTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UnavailableVolumeTypes != nil { + in, out := &in.UnavailableVolumeTypes, &out.UnavailableVolumeTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailabilityZone. +func (in *AvailabilityZone) DeepCopy() *AvailabilityZone { + if in == nil { + return nil + } + out := new(AvailabilityZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucket) DeepCopyInto(out *BackupBucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucket. +func (in *BackupBucket) DeepCopy() *BackupBucket { + if in == nil { + return nil + } + out := new(BackupBucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupBucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketList) DeepCopyInto(out *BackupBucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupBucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketList. +func (in *BackupBucketList) DeepCopy() *BackupBucketList { + if in == nil { + return nil + } + out := new(BackupBucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupBucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketProvider) DeepCopyInto(out *BackupBucketProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketProvider. +func (in *BackupBucketProvider) DeepCopy() *BackupBucketProvider { + if in == nil { + return nil + } + out := new(BackupBucketProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) { + *out = *in + out.Provider = in.Provider + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + out.SecretRef = in.SecretRef + if in.SeedName != nil { + in, out := &in.SeedName, &out.SeedName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketSpec. +func (in *BackupBucketSpec) DeepCopy() *BackupBucketSpec { + if in == nil { + return nil + } + out := new(BackupBucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) { + *out = *in + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.LastError != nil { + in, out := &in.LastError, &out.LastError + *out = new(LastError) + (*in).DeepCopyInto(*out) + } + if in.GeneratedSecretRef != nil { + in, out := &in.GeneratedSecretRef, &out.GeneratedSecretRef + *out = new(v1.SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketStatus. +func (in *BackupBucketStatus) DeepCopy() *BackupBucketStatus { + if in == nil { + return nil + } + out := new(BackupBucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntry) DeepCopyInto(out *BackupEntry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntry. +func (in *BackupEntry) DeepCopy() *BackupEntry { + if in == nil { + return nil + } + out := new(BackupEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupEntry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryList) DeepCopyInto(out *BackupEntryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryList. +func (in *BackupEntryList) DeepCopy() *BackupEntryList { + if in == nil { + return nil + } + out := new(BackupEntryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupEntryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntrySpec) DeepCopyInto(out *BackupEntrySpec) { + *out = *in + if in.SeedName != nil { + in, out := &in.SeedName, &out.SeedName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntrySpec. +func (in *BackupEntrySpec) DeepCopy() *BackupEntrySpec { + if in == nil { + return nil + } + out := new(BackupEntrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryStatus) DeepCopyInto(out *BackupEntryStatus) { + *out = *in + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.LastError != nil { + in, out := &in.LastError, &out.LastError + *out = new(LastError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryStatus. +func (in *BackupEntryStatus) DeepCopy() *BackupEntryStatus { + if in == nil { + return nil + } + out := new(BackupEntryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudInfo) DeepCopyInto(out *CloudInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInfo. +func (in *CloudInfo) DeepCopy() *CloudInfo { + if in == nil { + return nil + } + out := new(CloudInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudProfile) DeepCopyInto(out *CloudProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfile. +func (in *CloudProfile) DeepCopy() *CloudProfile { + if in == nil { + return nil + } + out := new(CloudProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudProfileList) DeepCopyInto(out *CloudProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileList. +func (in *CloudProfileList) DeepCopy() *CloudProfileList { + if in == nil { + return nil + } + out := new(CloudProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudProfileSpec) DeepCopyInto(out *CloudProfileSpec) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + in.Kubernetes.DeepCopyInto(&out.Kubernetes) + if in.MachineImages != nil { + in, out := &in.MachineImages, &out.MachineImages + *out = make([]MachineImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MachineTypes != nil { + in, out := &in.MachineTypes, &out.MachineTypes + *out = make([]MachineType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Regions != nil { + in, out := &in.Regions, &out.Regions + *out = make([]Region, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SeedSelector != nil { + in, out := &in.SeedSelector, &out.SeedSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.VolumeTypes != nil { + in, out := &in.VolumeTypes, &out.VolumeTypes + *out = make([]VolumeType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProfileSpec. +func (in *CloudProfileSpec) DeepCopy() *CloudProfileSpec { + if in == nil { + return nil + } + out := new(CloudProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAutoscaler) DeepCopyInto(out *ClusterAutoscaler) { + *out = *in + if in.ScaleDownDelayAfterAdd != nil { + in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownDelayAfterDelete != nil { + in, out := &in.ScaleDownDelayAfterDelete, &out.ScaleDownDelayAfterDelete + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownDelayAfterFailure != nil { + in, out := &in.ScaleDownDelayAfterFailure, &out.ScaleDownDelayAfterFailure + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownUnneededTime != nil { + in, out := &in.ScaleDownUnneededTime, &out.ScaleDownUnneededTime + *out = new(metav1.Duration) + **out = **in + } + if in.ScaleDownUtilizationThreshold != nil { + in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold + *out = new(float64) + **out = **in + } + if in.ScanInterval != nil { + in, out := &in.ScanInterval, &out.ScanInterval + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaler. +func (in *ClusterAutoscaler) DeepCopy() *ClusterAutoscaler { + if in == nil { + return nil + } + out := new(ClusterAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInfo) DeepCopyInto(out *ClusterInfo) { + *out = *in + out.Cloud = in.Cloud + out.Kubernetes = in.Kubernetes + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInfo. +func (in *ClusterInfo) DeepCopy() *ClusterInfo { + if in == nil { + return nil + } + out := new(ClusterInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerDeployment) DeepCopyInto(out *ControllerDeployment) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerDeployment. +func (in *ControllerDeployment) DeepCopy() *ControllerDeployment { + if in == nil { + return nil + } + out := new(ControllerDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallation) DeepCopyInto(out *ControllerInstallation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallation. +func (in *ControllerInstallation) DeepCopy() *ControllerInstallation { + if in == nil { + return nil + } + out := new(ControllerInstallation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerInstallation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationList) DeepCopyInto(out *ControllerInstallationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerInstallation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationList. +func (in *ControllerInstallationList) DeepCopy() *ControllerInstallationList { + if in == nil { + return nil + } + out := new(ControllerInstallationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerInstallationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationSpec) DeepCopyInto(out *ControllerInstallationSpec) { + *out = *in + out.RegistrationRef = in.RegistrationRef + out.SeedRef = in.SeedRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationSpec. +func (in *ControllerInstallationSpec) DeepCopy() *ControllerInstallationSpec { + if in == nil { + return nil + } + out := new(ControllerInstallationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerInstallationStatus) DeepCopyInto(out *ControllerInstallationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerInstallationStatus. +func (in *ControllerInstallationStatus) DeepCopy() *ControllerInstallationStatus { + if in == nil { + return nil + } + out := new(ControllerInstallationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRegistration) DeepCopyInto(out *ControllerRegistration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistration. +func (in *ControllerRegistration) DeepCopy() *ControllerRegistration { + if in == nil { + return nil + } + out := new(ControllerRegistration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRegistration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRegistrationList) DeepCopyInto(out *ControllerRegistrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRegistration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationList. +func (in *ControllerRegistrationList) DeepCopy() *ControllerRegistrationList { + if in == nil { + return nil + } + out := new(ControllerRegistrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRegistrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRegistrationSpec) DeepCopyInto(out *ControllerRegistrationSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ControllerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Deployment != nil { + in, out := &in.Deployment, &out.Deployment + *out = new(ControllerDeployment) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRegistrationSpec. +func (in *ControllerRegistrationSpec) DeepCopy() *ControllerRegistrationSpec { + if in == nil { + return nil + } + out := new(ControllerRegistrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerResource) DeepCopyInto(out *ControllerResource) { + *out = *in + if in.GloballyEnabled != nil { + in, out := &in.GloballyEnabled, &out.GloballyEnabled + *out = new(bool) + **out = **in + } + if in.ReconcileTimeout != nil { + in, out := &in.ReconcileTimeout, &out.ReconcileTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerResource. +func (in *ControllerResource) DeepCopy() *ControllerResource { + if in == nil { + return nil + } + out := new(ControllerResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(string) + **out = **in + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]DNSProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSIncludeExclude) DeepCopyInto(out *DNSIncludeExclude) { + *out = *in + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Exclude != nil { + in, out := &in.Exclude, &out.Exclude + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSIncludeExclude. +func (in *DNSIncludeExclude) DeepCopy() *DNSIncludeExclude { + if in == nil { + return nil + } + out := new(DNSIncludeExclude) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSProvider) DeepCopyInto(out *DNSProvider) { + *out = *in + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + if in.Primary != nil { + in, out := &in.Primary, &out.Primary + *out = new(bool) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = new(DNSIncludeExclude) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSProvider. +func (in *DNSProvider) DeepCopy() *DNSProvider { + if in == nil { + return nil + } + out := new(DNSProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpirableVersion) DeepCopyInto(out *ExpirableVersion) { + *out = *in + if in.ExpirationDate != nil { + in, out := &in.ExpirationDate, &out.ExpirationDate + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpirableVersion. +func (in *ExpirableVersion) DeepCopy() *ExpirableVersion { + if in == nil { + return nil + } + out := new(ExpirableVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extension) DeepCopyInto(out *Extension) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extension. +func (in *Extension) DeepCopy() *Extension { + if in == nil { + return nil + } + out := new(Extension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionResourceState) DeepCopyInto(out *ExtensionResourceState) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Purpose != nil { + in, out := &in.Purpose, &out.Purpose + *out = new(string) + **out = **in + } + in.State.DeepCopyInto(&out.State) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionResourceState. +func (in *ExtensionResourceState) DeepCopy() *ExtensionResourceState { + if in == nil { + return nil + } + out := new(ExtensionResourceState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gardener) DeepCopyInto(out *Gardener) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gardener. +func (in *Gardener) DeepCopy() *Gardener { + if in == nil { + return nil + } + out := new(Gardener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GardenerResourceData) DeepCopyInto(out *GardenerResourceData) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GardenerResourceData. +func (in *GardenerResourceData) DeepCopy() *GardenerResourceData { + if in == nil { + return nil + } + out := new(GardenerResourceData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hibernation) DeepCopyInto(out *Hibernation) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Schedules != nil { + in, out := &in.Schedules, &out.Schedules + *out = make([]HibernationSchedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hibernation. +func (in *Hibernation) DeepCopy() *Hibernation { + if in == nil { + return nil + } + out := new(Hibernation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HibernationSchedule) DeepCopyInto(out *HibernationSchedule) { + *out = *in + if in.Start != nil { + in, out := &in.Start, &out.Start + *out = new(string) + **out = **in + } + if in.End != nil { + in, out := &in.End, &out.End + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HibernationSchedule. +func (in *HibernationSchedule) DeepCopy() *HibernationSchedule { + if in == nil { + return nil + } + out := new(HibernationSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerConfig) DeepCopyInto(out *HorizontalPodAutoscalerConfig) { + *out = *in + if in.CPUInitializationPeriod != nil { + in, out := &in.CPUInitializationPeriod, &out.CPUInitializationPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.DownscaleDelay != nil { + in, out := &in.DownscaleDelay, &out.DownscaleDelay + *out = new(metav1.Duration) + **out = **in + } + if in.DownscaleStabilization != nil { + in, out := &in.DownscaleStabilization, &out.DownscaleStabilization + *out = new(metav1.Duration) + **out = **in + } + if in.InitialReadinessDelay != nil { + in, out := &in.InitialReadinessDelay, &out.InitialReadinessDelay + *out = new(metav1.Duration) + **out = **in + } + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.Tolerance != nil { + in, out := &in.Tolerance, &out.Tolerance + *out = new(float64) + **out = **in + } + if in.UpscaleDelay != nil { + in, out := &in.UpscaleDelay, &out.UpscaleDelay + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerConfig. +func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConfig { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.AdmissionPlugins != nil { + in, out := &in.AdmissionPlugins, &out.AdmissionPlugins + *out = make([]AdmissionPlugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.APIAudiences != nil { + in, out := &in.APIAudiences, &out.APIAudiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AuditConfig != nil { + in, out := &in.AuditConfig, &out.AuditConfig + *out = new(AuditConfig) + (*in).DeepCopyInto(*out) + } + if in.EnableBasicAuthentication != nil { + in, out := &in.EnableBasicAuthentication, &out.EnableBasicAuthentication + *out = new(bool) + **out = **in + } + if in.OIDCConfig != nil { + in, out := &in.OIDCConfig, &out.OIDCConfig + *out = new(OIDCConfig) + (*in).DeepCopyInto(*out) + } + if in.RuntimeConfig != nil { + in, out := &in.RuntimeConfig, &out.RuntimeConfig + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceAccountConfig != nil { + in, out := &in.ServiceAccountConfig, &out.ServiceAccountConfig + *out = new(ServiceAccountConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig. +func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.HorizontalPodAutoscalerConfig != nil { + in, out := &in.HorizontalPodAutoscalerConfig, &out.HorizontalPodAutoscalerConfig + *out = new(HorizontalPodAutoscalerConfig) + (*in).DeepCopyInto(*out) + } + if in.NodeCIDRMaskSize != nil { + in, out := &in.NodeCIDRMaskSize, &out.NodeCIDRMaskSize + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig. +func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig { + if in == nil { + return nil + } + out := new(KubeControllerManagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyConfig) DeepCopyInto(out *KubeProxyConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(ProxyMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfig. +func (in *KubeProxyConfig) DeepCopy() *KubeProxyConfig { + if in == nil { + return nil + } + out := new(KubeProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfig. +func (in *KubeSchedulerConfig) DeepCopy() *KubeSchedulerConfig { + if in == nil { + return nil + } + out := new(KubeSchedulerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) { + *out = *in + in.KubernetesConfig.DeepCopyInto(&out.KubernetesConfig) + if in.CPUCFSQuota != nil { + in, out := &in.CPUCFSQuota, &out.CPUCFSQuota + *out = new(bool) + **out = **in + } + if in.CPUManagerPolicy != nil { + in, out := &in.CPUManagerPolicy, &out.CPUManagerPolicy + *out = new(string) + **out = **in + } + if in.EvictionHard != nil { + in, out := &in.EvictionHard, &out.EvictionHard + *out = new(KubeletConfigEviction) + (*in).DeepCopyInto(*out) + } + if in.EvictionMaxPodGracePeriod != nil { + in, out := &in.EvictionMaxPodGracePeriod, &out.EvictionMaxPodGracePeriod + *out = new(int32) + **out = **in + } + if in.EvictionMinimumReclaim != nil { + in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim + *out = new(KubeletConfigEvictionMinimumReclaim) + (*in).DeepCopyInto(*out) + } + if in.EvictionPressureTransitionPeriod != nil { + in, out := &in.EvictionPressureTransitionPeriod, &out.EvictionPressureTransitionPeriod + *out = new(metav1.Duration) + **out = **in + } + if in.EvictionSoft != nil { + in, out := &in.EvictionSoft, &out.EvictionSoft + *out = new(KubeletConfigEviction) + (*in).DeepCopyInto(*out) + } + if in.EvictionSoftGracePeriod != nil { + in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod + *out = new(KubeletConfigEvictionSoftGracePeriod) + (*in).DeepCopyInto(*out) + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(int32) + **out = **in + } + if in.PodPIDsLimit != nil { + in, out := &in.PodPIDsLimit, &out.PodPIDsLimit + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfig. +func (in *KubeletConfig) DeepCopy() *KubeletConfig { + if in == nil { + return nil + } + out := new(KubeletConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigEviction) DeepCopyInto(out *KubeletConfigEviction) { + *out = *in + if in.MemoryAvailable != nil { + in, out := &in.MemoryAvailable, &out.MemoryAvailable + *out = new(string) + **out = **in + } + if in.ImageFSAvailable != nil { + in, out := &in.ImageFSAvailable, &out.ImageFSAvailable + *out = new(string) + **out = **in + } + if in.ImageFSInodesFree != nil { + in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree + *out = new(string) + **out = **in + } + if in.NodeFSAvailable != nil { + in, out := &in.NodeFSAvailable, &out.NodeFSAvailable + *out = new(string) + **out = **in + } + if in.NodeFSInodesFree != nil { + in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEviction. +func (in *KubeletConfigEviction) DeepCopy() *KubeletConfigEviction { + if in == nil { + return nil + } + out := new(KubeletConfigEviction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigEvictionMinimumReclaim) DeepCopyInto(out *KubeletConfigEvictionMinimumReclaim) { + *out = *in + if in.MemoryAvailable != nil { + in, out := &in.MemoryAvailable, &out.MemoryAvailable + x := (*in).DeepCopy() + *out = &x + } + if in.ImageFSAvailable != nil { + in, out := &in.ImageFSAvailable, &out.ImageFSAvailable + x := (*in).DeepCopy() + *out = &x + } + if in.ImageFSInodesFree != nil { + in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree + x := (*in).DeepCopy() + *out = &x + } + if in.NodeFSAvailable != nil { + in, out := &in.NodeFSAvailable, &out.NodeFSAvailable + x := (*in).DeepCopy() + *out = &x + } + if in.NodeFSInodesFree != nil { + in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionMinimumReclaim. +func (in *KubeletConfigEvictionMinimumReclaim) DeepCopy() *KubeletConfigEvictionMinimumReclaim { + if in == nil { + return nil + } + out := new(KubeletConfigEvictionMinimumReclaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopyInto(out *KubeletConfigEvictionSoftGracePeriod) { + *out = *in + if in.MemoryAvailable != nil { + in, out := &in.MemoryAvailable, &out.MemoryAvailable + *out = new(metav1.Duration) + **out = **in + } + if in.ImageFSAvailable != nil { + in, out := &in.ImageFSAvailable, &out.ImageFSAvailable + *out = new(metav1.Duration) + **out = **in + } + if in.ImageFSInodesFree != nil { + in, out := &in.ImageFSInodesFree, &out.ImageFSInodesFree + *out = new(metav1.Duration) + **out = **in + } + if in.NodeFSAvailable != nil { + in, out := &in.NodeFSAvailable, &out.NodeFSAvailable + *out = new(metav1.Duration) + **out = **in + } + if in.NodeFSInodesFree != nil { + in, out := &in.NodeFSInodesFree, &out.NodeFSInodesFree + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfigEvictionSoftGracePeriod. +func (in *KubeletConfigEvictionSoftGracePeriod) DeepCopy() *KubeletConfigEvictionSoftGracePeriod { + if in == nil { + return nil + } + out := new(KubeletConfigEvictionSoftGracePeriod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kubernetes) DeepCopyInto(out *Kubernetes) { + *out = *in + if in.AllowPrivilegedContainers != nil { + in, out := &in.AllowPrivilegedContainers, &out.AllowPrivilegedContainers + *out = new(bool) + **out = **in + } + if in.ClusterAutoscaler != nil { + in, out := &in.ClusterAutoscaler, &out.ClusterAutoscaler + *out = new(ClusterAutoscaler) + (*in).DeepCopyInto(*out) + } + if in.KubeAPIServer != nil { + in, out := &in.KubeAPIServer, &out.KubeAPIServer + *out = new(KubeAPIServerConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeControllerManager != nil { + in, out := &in.KubeControllerManager, &out.KubeControllerManager + *out = new(KubeControllerManagerConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeScheduler != nil { + in, out := &in.KubeScheduler, &out.KubeScheduler + *out = new(KubeSchedulerConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeProxy != nil { + in, out := &in.KubeProxy, &out.KubeProxy + *out = new(KubeProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.Kubelet != nil { + in, out := &in.Kubelet, &out.Kubelet + *out = new(KubeletConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubernetes. +func (in *Kubernetes) DeepCopy() *Kubernetes { + if in == nil { + return nil + } + out := new(Kubernetes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesConfig) DeepCopyInto(out *KubernetesConfig) { + *out = *in + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesConfig. +func (in *KubernetesConfig) DeepCopy() *KubernetesConfig { + if in == nil { + return nil + } + out := new(KubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesDashboard) DeepCopyInto(out *KubernetesDashboard) { + *out = *in + out.Addon = in.Addon + if in.AuthenticationMode != nil { + in, out := &in.AuthenticationMode, &out.AuthenticationMode + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesDashboard. +func (in *KubernetesDashboard) DeepCopy() *KubernetesDashboard { + if in == nil { + return nil + } + out := new(KubernetesDashboard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesInfo) DeepCopyInto(out *KubernetesInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesInfo. +func (in *KubernetesInfo) DeepCopy() *KubernetesInfo { + if in == nil { + return nil + } + out := new(KubernetesInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSettings) DeepCopyInto(out *KubernetesSettings) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]ExpirableVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSettings. +func (in *KubernetesSettings) DeepCopy() *KubernetesSettings { + if in == nil { + return nil + } + out := new(KubernetesSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastError) DeepCopyInto(out *LastError) { + *out = *in + if in.TaskID != nil { + in, out := &in.TaskID, &out.TaskID + *out = new(string) + **out = **in + } + if in.Codes != nil { + in, out := &in.Codes, &out.Codes + *out = make([]ErrorCode, len(*in)) + copy(*out, *in) + } + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastError. +func (in *LastError) DeepCopy() *LastError { + if in == nil { + return nil + } + out := new(LastError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastOperation) DeepCopyInto(out *LastOperation) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. +func (in *LastOperation) DeepCopy() *LastOperation { + if in == nil { + return nil + } + out := new(LastOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ShootMachineImage) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineImage) DeepCopyInto(out *MachineImage) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]ExpirableVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImage. +func (in *MachineImage) DeepCopy() *MachineImage { + if in == nil { + return nil + } + out := new(MachineImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineType) DeepCopyInto(out *MachineType) { + *out = *in + out.CPU = in.CPU.DeepCopy() + out.GPU = in.GPU.DeepCopy() + out.Memory = in.Memory.DeepCopy() + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(MachineTypeStorage) + (*in).DeepCopyInto(*out) + } + if in.Usable != nil { + in, out := &in.Usable, &out.Usable + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineType. +func (in *MachineType) DeepCopy() *MachineType { + if in == nil { + return nil + } + out := new(MachineType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTypeStorage) DeepCopyInto(out *MachineTypeStorage) { + *out = *in + out.Size = in.Size.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTypeStorage. +func (in *MachineTypeStorage) DeepCopy() *MachineTypeStorage { + if in == nil { + return nil + } + out := new(MachineTypeStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Maintenance) DeepCopyInto(out *Maintenance) { + *out = *in + if in.AutoUpdate != nil { + in, out := &in.AutoUpdate, &out.AutoUpdate + *out = new(MaintenanceAutoUpdate) + **out = **in + } + if in.TimeWindow != nil { + in, out := &in.TimeWindow, &out.TimeWindow + *out = new(MaintenanceTimeWindow) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintenance. +func (in *Maintenance) DeepCopy() *Maintenance { + if in == nil { + return nil + } + out := new(Maintenance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceAutoUpdate) DeepCopyInto(out *MaintenanceAutoUpdate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceAutoUpdate. +func (in *MaintenanceAutoUpdate) DeepCopy() *MaintenanceAutoUpdate { + if in == nil { + return nil + } + out := new(MaintenanceAutoUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceTimeWindow) DeepCopyInto(out *MaintenanceTimeWindow) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceTimeWindow. +func (in *MaintenanceTimeWindow) DeepCopy() *MaintenanceTimeWindow { + if in == nil { + return nil + } + out := new(MaintenanceTimeWindow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Monitoring) DeepCopyInto(out *Monitoring) { + *out = *in + if in.Alerting != nil { + in, out := &in.Alerting, &out.Alerting + *out = new(Alerting) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring. +func (in *Monitoring) DeepCopy() *Monitoring { + if in == nil { + return nil + } + out := new(Monitoring) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(string) + **out = **in + } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(string) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxIngress) DeepCopyInto(out *NginxIngress) { + *out = *in + out.Addon = in.Addon + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalTrafficPolicy != nil { + in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy + *out = new(v1.ServiceExternalTrafficPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxIngress. +func (in *NginxIngress) DeepCopy() *NginxIngress { + if in == nil { + return nil + } + out := new(NginxIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCConfig) DeepCopyInto(out *OIDCConfig) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + if in.ClientAuthentication != nil { + in, out := &in.ClientAuthentication, &out.ClientAuthentication + *out = new(OpenIDConnectClientAuthentication) + (*in).DeepCopyInto(*out) + } + if in.ClientID != nil { + in, out := &in.ClientID, &out.ClientID + *out = new(string) + **out = **in + } + if in.GroupsClaim != nil { + in, out := &in.GroupsClaim, &out.GroupsClaim + *out = new(string) + **out = **in + } + if in.GroupsPrefix != nil { + in, out := &in.GroupsPrefix, &out.GroupsPrefix + *out = new(string) + **out = **in + } + if in.IssuerURL != nil { + in, out := &in.IssuerURL, &out.IssuerURL + *out = new(string) + **out = **in + } + if in.RequiredClaims != nil { + in, out := &in.RequiredClaims, &out.RequiredClaims + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SigningAlgs != nil { + in, out := &in.SigningAlgs, &out.SigningAlgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameClaim != nil { + in, out := &in.UsernameClaim, &out.UsernameClaim + *out = new(string) + **out = **in + } + if in.UsernamePrefix != nil { + in, out := &in.UsernamePrefix, &out.UsernamePrefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCConfig. +func (in *OIDCConfig) DeepCopy() *OIDCConfig { + if in == nil { + return nil + } + out := new(OIDCConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectClientAuthentication) DeepCopyInto(out *OpenIDConnectClientAuthentication) { + *out = *in + if in.ExtraConfig != nil { + in, out := &in.ExtraConfig, &out.ExtraConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectClientAuthentication. +func (in *OpenIDConnectClientAuthentication) DeepCopy() *OpenIDConnectClientAuthentication { + if in == nil { + return nil + } + out := new(OpenIDConnectClientAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plant) DeepCopyInto(out *Plant) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plant. +func (in *Plant) DeepCopy() *Plant { + if in == nil { + return nil + } + out := new(Plant) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Plant) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlantList) DeepCopyInto(out *PlantList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Plant, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantList. +func (in *PlantList) DeepCopy() *PlantList { + if in == nil { + return nil + } + out := new(PlantList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlantList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlantSpec) DeepCopyInto(out *PlantSpec) { + *out = *in + out.SecretRef = in.SecretRef + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]Endpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantSpec. +func (in *PlantSpec) DeepCopy() *PlantSpec { + if in == nil { + return nil + } + out := new(PlantSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlantStatus) DeepCopyInto(out *PlantStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.ClusterInfo != nil { + in, out := &in.ClusterInfo, &out.ClusterInfo + *out = new(ClusterInfo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlantStatus. +func (in *PlantStatus) DeepCopy() *PlantStatus { + if in == nil { + return nil + } + out := new(PlantStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectMember) DeepCopyInto(out *ProjectMember) { + *out = *in + out.Subject = in.Subject + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectMember. +func (in *ProjectMember) DeepCopy() *ProjectMember { + if in == nil { + return nil + } + out := new(ProjectMember) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + if in.CreatedBy != nil { + in, out := &in.CreatedBy, &out.CreatedBy + *out = new(rbacv1.Subject) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Owner != nil { + in, out := &in.Owner, &out.Owner + *out = new(rbacv1.Subject) + **out = **in + } + if in.Purpose != nil { + in, out := &in.Purpose, &out.Purpose + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]ProjectMember, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Provider) DeepCopyInto(out *Provider) { + *out = *in + if in.ControlPlaneConfig != nil { + in, out := &in.ControlPlaneConfig, &out.ControlPlaneConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureConfig != nil { + in, out := &in.InfrastructureConfig, &out.InfrastructureConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Workers != nil { + in, out := &in.Workers, &out.Workers + *out = make([]Worker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provider. +func (in *Provider) DeepCopy() *Provider { + if in == nil { + return nil + } + out := new(Provider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfig) DeepCopyInto(out *ProviderConfig) { + *out = *in + in.RawExtension.DeepCopyInto(&out.RawExtension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfig. +func (in *ProviderConfig) DeepCopy() *ProviderConfig { + if in == nil { + return nil + } + out := new(ProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Quota) DeepCopyInto(out *Quota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Quota. +func (in *Quota) DeepCopy() *Quota { + if in == nil { + return nil + } + out := new(Quota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Quota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaList) DeepCopyInto(out *QuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Quota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaList. +func (in *QuotaList) DeepCopy() *QuotaList { + if in == nil { + return nil + } + out := new(QuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaSpec) DeepCopyInto(out *QuotaSpec) { + *out = *in + if in.ClusterLifetimeDays != nil { + in, out := &in.ClusterLifetimeDays, &out.ClusterLifetimeDays + *out = new(int) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + out.Scope = in.Scope + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSpec. +func (in *QuotaSpec) DeepCopy() *QuotaSpec { + if in == nil { + return nil + } + out := new(QuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Region) DeepCopyInto(out *Region) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]AvailabilityZone, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Region. +func (in *Region) DeepCopy() *Region { + if in == nil { + return nil + } + out := new(Region) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBinding) DeepCopyInto(out *SecretBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.SecretRef = in.SecretRef + if in.Quotas != nil { + in, out := &in.Quotas, &out.Quotas + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBinding. +func (in *SecretBinding) DeepCopy() *SecretBinding { + if in == nil { + return nil + } + out := new(SecretBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBindingList) DeepCopyInto(out *SecretBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecretBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBindingList. +func (in *SecretBindingList) DeepCopy() *SecretBindingList { + if in == nil { + return nil + } + out := new(SecretBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Seed) DeepCopyInto(out *Seed) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Seed. +func (in *Seed) DeepCopy() *Seed { + if in == nil { + return nil + } + out := new(Seed) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Seed) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedBackup) DeepCopyInto(out *SeedBackup) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + out.SecretRef = in.SecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedBackup. +func (in *SeedBackup) DeepCopy() *SeedBackup { + if in == nil { + return nil + } + out := new(SeedBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedDNS) DeepCopyInto(out *SeedDNS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedDNS. +func (in *SeedDNS) DeepCopy() *SeedDNS { + if in == nil { + return nil + } + out := new(SeedDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedList) DeepCopyInto(out *SeedList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Seed, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedList. +func (in *SeedList) DeepCopy() *SeedList { + if in == nil { + return nil + } + out := new(SeedList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SeedList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedNetworks) DeepCopyInto(out *SeedNetworks) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(string) + **out = **in + } + if in.ShootDefaults != nil { + in, out := &in.ShootDefaults, &out.ShootDefaults + *out = new(ShootNetworks) + (*in).DeepCopyInto(*out) + } + if in.BlockCIDRs != nil { + in, out := &in.BlockCIDRs, &out.BlockCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedNetworks. +func (in *SeedNetworks) DeepCopy() *SeedNetworks { + if in == nil { + return nil + } + out := new(SeedNetworks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedProvider) DeepCopyInto(out *SeedProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedProvider. +func (in *SeedProvider) DeepCopy() *SeedProvider { + if in == nil { + return nil + } + out := new(SeedProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedSpec) DeepCopyInto(out *SeedSpec) { + *out = *in + if in.Backup != nil { + in, out := &in.Backup, &out.Backup + *out = new(SeedBackup) + (*in).DeepCopyInto(*out) + } + out.DNS = in.DNS + in.Networks.DeepCopyInto(&out.Networks) + out.Provider = in.Provider + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.SecretReference) + **out = **in + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]SeedTaint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(SeedVolume) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSpec. +func (in *SeedSpec) DeepCopy() *SeedSpec { + if in == nil { + return nil + } + out := new(SeedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedStatus) DeepCopyInto(out *SeedStatus) { + *out = *in + if in.Gardener != nil { + in, out := &in.Gardener, &out.Gardener + *out = new(Gardener) + **out = **in + } + if in.KubernetesVersion != nil { + in, out := &in.KubernetesVersion, &out.KubernetesVersion + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedStatus. +func (in *SeedStatus) DeepCopy() *SeedStatus { + if in == nil { + return nil + } + out := new(SeedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedTaint) DeepCopyInto(out *SeedTaint) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedTaint. +func (in *SeedTaint) DeepCopy() *SeedTaint { + if in == nil { + return nil + } + out := new(SeedTaint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedVolume) DeepCopyInto(out *SeedVolume) { + *out = *in + if in.MinimumSize != nil { + in, out := &in.MinimumSize, &out.MinimumSize + x := (*in).DeepCopy() + *out = &x + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]SeedVolumeProvider, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolume. +func (in *SeedVolume) DeepCopy() *SeedVolume { + if in == nil { + return nil + } + out := new(SeedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeedVolumeProvider) DeepCopyInto(out *SeedVolumeProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedVolumeProvider. +func (in *SeedVolumeProvider) DeepCopy() *SeedVolumeProvider { + if in == nil { + return nil + } + out := new(SeedVolumeProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountConfig) DeepCopyInto(out *ServiceAccountConfig) { + *out = *in + if in.Issuer != nil { + in, out := &in.Issuer, &out.Issuer + *out = new(string) + **out = **in + } + if in.SigningKeySecret != nil { + in, out := &in.SigningKeySecret, &out.SigningKeySecret + *out = new(v1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountConfig. +func (in *ServiceAccountConfig) DeepCopy() *ServiceAccountConfig { + if in == nil { + return nil + } + out := new(ServiceAccountConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Shoot) DeepCopyInto(out *Shoot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shoot. +func (in *Shoot) DeepCopy() *Shoot { + if in == nil { + return nil + } + out := new(Shoot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Shoot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootList) DeepCopyInto(out *ShootList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Shoot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootList. +func (in *ShootList) DeepCopy() *ShootList { + if in == nil { + return nil + } + out := new(ShootList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShootList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootMachineImage) DeepCopyInto(out *ShootMachineImage) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootMachineImage. +func (in *ShootMachineImage) DeepCopy() *ShootMachineImage { + if in == nil { + return nil + } + out := new(ShootMachineImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootNetworks) DeepCopyInto(out *ShootNetworks) { + *out = *in + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(string) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootNetworks. +func (in *ShootNetworks) DeepCopy() *ShootNetworks { + if in == nil { + return nil + } + out := new(ShootNetworks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootSpec) DeepCopyInto(out *ShootSpec) { + *out = *in + if in.Addons != nil { + in, out := &in.Addons, &out.Addons + *out = new(Addons) + (*in).DeepCopyInto(*out) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(DNS) + (*in).DeepCopyInto(*out) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]Extension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Hibernation != nil { + in, out := &in.Hibernation, &out.Hibernation + *out = new(Hibernation) + (*in).DeepCopyInto(*out) + } + in.Kubernetes.DeepCopyInto(&out.Kubernetes) + in.Networking.DeepCopyInto(&out.Networking) + if in.Maintenance != nil { + in, out := &in.Maintenance, &out.Maintenance + *out = new(Maintenance) + (*in).DeepCopyInto(*out) + } + if in.Monitoring != nil { + in, out := &in.Monitoring, &out.Monitoring + *out = new(Monitoring) + (*in).DeepCopyInto(*out) + } + in.Provider.DeepCopyInto(&out.Provider) + if in.Purpose != nil { + in, out := &in.Purpose, &out.Purpose + *out = new(ShootPurpose) + **out = **in + } + if in.SeedName != nil { + in, out := &in.SeedName, &out.SeedName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootSpec. +func (in *ShootSpec) DeepCopy() *ShootSpec { + if in == nil { + return nil + } + out := new(ShootSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootState) DeepCopyInto(out *ShootState) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootState. +func (in *ShootState) DeepCopy() *ShootState { + if in == nil { + return nil + } + out := new(ShootState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShootState) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootStateList) DeepCopyInto(out *ShootStateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ShootState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateList. +func (in *ShootStateList) DeepCopy() *ShootStateList { + if in == nil { + return nil + } + out := new(ShootStateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShootStateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootStateSpec) DeepCopyInto(out *ShootStateSpec) { + *out = *in + if in.Gardener != nil { + in, out := &in.Gardener, &out.Gardener + *out = make([]GardenerResourceData, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]ExtensionResourceState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStateSpec. +func (in *ShootStateSpec) DeepCopy() *ShootStateSpec { + if in == nil { + return nil + } + out := new(ShootStateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShootStatus) DeepCopyInto(out *ShootStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Constraints != nil { + in, out := &in.Constraints, &out.Constraints + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Gardener = in.Gardener + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.LastErrors != nil { + in, out := &in.LastErrors, &out.LastErrors + *out = make([]LastError, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetryCycleStartTime != nil { + in, out := &in.RetryCycleStartTime, &out.RetryCycleStartTime + *out = (*in).DeepCopy() + } + if in.SeedName != nil { + in, out := &in.SeedName, &out.SeedName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShootStatus. +func (in *ShootStatus) DeepCopy() *ShootStatus { + if in == nil { + return nil + } + out := new(ShootStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeType) DeepCopyInto(out *VolumeType) { + *out = *in + if in.Usable != nil { + in, out := &in.Usable, &out.Usable + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeType. +func (in *VolumeType) DeepCopy() *VolumeType { + if in == nil { + return nil + } + out := new(VolumeType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Worker) DeepCopyInto(out *Worker) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(string) + **out = **in + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(WorkerKubernetes) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Machine.DeepCopyInto(&out.Machine) + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(ProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(Volume) + (*in).DeepCopyInto(*out) + } + if in.DataVolumes != nil { + in, out := &in.DataVolumes, &out.DataVolumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubeletDataVolumeName != nil { + in, out := &in.KubeletDataVolumeName, &out.KubeletDataVolumeName + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker. +func (in *Worker) DeepCopy() *Worker { + if in == nil { + return nil + } + out := new(Worker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerKubernetes) DeepCopyInto(out *WorkerKubernetes) { + *out = *in + if in.Kubelet != nil { + in, out := &in.Kubelet, &out.Kubelet + *out = new(KubeletConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerKubernetes. +func (in *WorkerKubernetes) DeepCopy() *WorkerKubernetes { + if in == nil { + return nil + } + out := new(WorkerKubernetes) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/register.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/register.go new file mode 100644 index 000000000..c074510b8 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/register.go @@ -0,0 +1,19 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extensions + +const ( + GroupName = "extensions.gardener.cloud" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/doc.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/doc.go new file mode 100644 index 000000000..61cc19c84 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package +//go:generate gen-crd-api-reference-docs -api-dir . -config ../../../../hack/api-reference/extensions-config.json -template-dir ../../../../hack/api-reference/template -out-file ../../../../hack/api-reference/extensions.md + +// Package v1alpha1 is the v1alpha1 version of the API. +// +groupName=extensions.gardener.cloud +package v1alpha1 diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/register.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/register.go new file mode 100644 index 000000000..b3ba007e3 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/register.go @@ -0,0 +1,67 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "github.com/gardener/gardener/pkg/apis/extensions" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: extensions.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &BackupBucket{}, + &BackupBucketList{}, + &BackupEntry{}, + &BackupEntryList{}, + &Cluster{}, + &ClusterList{}, + &ControlPlane{}, + &ControlPlaneList{}, + &Extension{}, + &ExtensionList{}, + &Infrastructure{}, + &InfrastructureList{}, + &Network{}, + &NetworkList{}, + &OperatingSystemConfig{}, + &OperatingSystemConfigList{}, + &Worker{}, + &WorkerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go new file mode 100644 index 000000000..554734f96 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types.go @@ -0,0 +1,87 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Status is the status of an Object. +type Status interface { + // GetProviderStatus retrieves the provider status. + GetProviderStatus() *runtime.RawExtension + // GetConditions retrieves the Conditions of a status. + // Conditions may be nil. + GetConditions() []gardencorev1beta1.Condition + // SetConditions sets the Conditions of a status. + SetConditions([]gardencorev1beta1.Condition) + // GetLastOperation retrieves the LastOperation of a status. + // LastOperation may be nil. + GetLastOperation() LastOperation + // GetObservedGeneration retrieves the last generation observed by the extension controller. + GetObservedGeneration() int64 + // GetLastError retrieves the LastError of a status. + // LastError may be nil. + GetLastError() LastError + // GetState retrieves the State of the extension + GetState() *runtime.RawExtension +} + +// LastOperation is the last operation on an object. +type LastOperation interface { + // GetDescription returns the description of the last operation. + GetDescription() string + // GetLastUpdateTime returns the last update time of the last operation. + GetLastUpdateTime() metav1.Time + // GetProgress returns progress of the last operation. + GetProgress() int + // GetState returns the LastOperationState of the last operation. + GetState() gardencorev1beta1.LastOperationState + // GetType returns the LastOperationType of the last operation. + GetType() gardencorev1beta1.LastOperationType +} + +// LastError is the last error on an object. +type LastError interface { + // GetDescription gets the description of the last occurred error. + GetDescription() string + // GetTaskID gets the task ID of the last error. + GetTaskID() *string + // GetCodes gets the error codes of the last error. + GetCodes() []gardencorev1beta1.ErrorCode + // GetLastUpdateTime retrieves the last time the error was updated. + GetLastUpdateTime() *metav1.Time +} + +// Spec is the spec section of an Object. +type Spec interface { + // GetExtensionType retrieves the extension type. + GetExtensionType() string + // GetExtensionPurpose retrieves the extension purpose. + GetExtensionPurpose() *string + // GetProviderConfig retrieves the provider config. + GetProviderConfig() *runtime.RawExtension +} + +// Object is an extension object resource. +type Object interface { + metav1.Object + // GetExtensionStatus retrieves the object's status. + GetExtensionStatus() Status + // GetExtensionSpec retrieves the object's spec. + GetExtensionSpec() Spec +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go new file mode 100644 index 000000000..d5ddb39fe --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupbucket.go @@ -0,0 +1,81 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ Object = (*BackupBucket)(nil) + +// BackupBucketResource is a constant for the name of the BackupBucket resource. +const BackupBucketResource = "BackupBucket" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupBucket is a specification for backup bucket. +type BackupBucket struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupBucketSpec `json:"spec"` + Status BackupBucketStatus `json:"status"` +} + +// GetExtensionSpec implements Object. +func (i *BackupBucket) GetExtensionSpec() Spec { + return &i.Spec +} + +// GetExtensionStatus implements Object. +func (i *BackupBucket) GetExtensionStatus() Status { + return &i.Status +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupBucketList is a list of BackupBucket resources. +type BackupBucketList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of BackupBucket. + Items []BackupBucket `json:"items"` +} + +// BackupBucketSpec is the spec for an BackupBucket resource. +type BackupBucketSpec struct { + // DefaultSpec is a structure containing common fields used by all extension resources. + DefaultSpec `json:",inline"` + // Region is the region of this bucket. + Region string `json:"region"` + // SecretRef is a reference to a secret that contains the credentials to access object store. + SecretRef corev1.SecretReference `json:"secretRef"` +} + +// BackupBucketStatus is the status for an BackupBucket resource. +type BackupBucketStatus struct { + // DefaultStatus is a structure containing common fields used by all extension resources. + DefaultStatus `json:",inline"` + // GeneratedSecretRef is reference to the secret generated by backup bucket, which + // will have object store specific credentials. + // +optional + GeneratedSecretRef *corev1.SecretReference `json:"generatedSecretRef,omitempty"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go new file mode 100644 index 000000000..146ed6086 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_backupentry.go @@ -0,0 +1,84 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Object = (*BackupEntry)(nil) + +// BackupEntryResource is a constant for the name of the BackupEntry resource. +const BackupEntryResource = "BackupEntry" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupEntry is a specification for backup Entry. +type BackupEntry struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BackupEntrySpec `json:"spec"` + Status BackupEntryStatus `json:"status"` +} + +// GetExtensionSpec implements Object. +func (i *BackupEntry) GetExtensionSpec() Spec { + return &i.Spec +} + +// GetExtensionStatus implements Object. +func (i *BackupEntry) GetExtensionStatus() Status { + return &i.Status +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupEntryList is a list of BackupEntry resources. +type BackupEntryList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of BackupEntry. + Items []BackupEntry `json:"items"` +} + +// BackupEntrySpec is the spec for an BackupEntry resource. +type BackupEntrySpec struct { + // DefaultSpec is a structure containing common fields used by all extension resources. + DefaultSpec `json:",inline"` + // BackupBucketProviderStatus contains the provider status that has + // been generated by the controller responsible for the `BackupBucket` resource. + // +optional + BackupBucketProviderStatus *runtime.RawExtension `json:"backupBucketProviderStatus,omitempty"` + // Region is the region of this Entry. + Region string `json:"region"` + // BucketName is the name of backup bucket for this Backup Entry. + BucketName string `json:"bucketName"` + // SecretRef is a reference to a secret that contains the credentials to access object store. + SecretRef corev1.SecretReference `json:"secretRef"` +} + +// BackupEntryStatus is the status for an BackupEntry resource. +type BackupEntryStatus struct { + // DefaultStatus is a structure containing common fields used by all extension resources. + DefaultStatus `json:",inline"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go new file mode 100644 index 000000000..ccfda96fa --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_cluster.go @@ -0,0 +1,58 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ClusterResource is a constant for the name of the Cluster resource. +const ClusterResource = "Cluster" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Cluster is a specification for a Cluster resource. +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSpec `json:"spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterList is a list of Cluster resources. +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is the list of Cluster. + Items []Cluster `json:"items"` +} + +// ClusterSpec is the spec for a Cluster resource. +type ClusterSpec struct { + // CloudProfile is a raw extension field that contains the cloudprofile resource referenced + // by the shoot that has to be reconciled. + CloudProfile runtime.RawExtension `json:"cloudProfile"` + // Seed is a raw extension field that contains the seed resource referenced by the shoot that + // has to be reconciled. + Seed runtime.RawExtension `json:"seed"` + // Shoot is a raw extension field that contains the shoot resource that has to be reconciled. + Shoot runtime.RawExtension `json:"shoot"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go new file mode 100644 index 000000000..563edbaf9 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_controlplane.go @@ -0,0 +1,97 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Object = (*ControlPlane)(nil) + +// ControlPlaneResource is a constant for the name of the ControlPlane resource. +const ControlPlaneResource = "ControlPlane" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControlPlane is a specification for a ControlPlane resource. +type ControlPlane struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ControlPlaneSpec `json:"spec"` + Status ControlPlaneStatus `json:"status"` +} + +// GetExtensionSpec implements Object. +func (i *ControlPlane) GetExtensionSpec() Spec { + return &i.Spec +} + +// GetExtensionStatus implements Object. +func (i *ControlPlane) GetExtensionStatus() Status { + return &i.Status +} + +// GetExtensionPurpose implements Object. +func (i *ControlPlaneSpec) GetExtensionPurpose() *string { + return (*string)(i.Purpose) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControlPlaneList is a list of ControlPlane resources. +type ControlPlaneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is the list of ControlPlanes. + Items []ControlPlane `json:"items"` +} + +// ControlPlaneSpec is the spec of a ControlPlane resource. +type ControlPlaneSpec struct { + // DefaultSpec is a structure containing common fields used by all extension resources. + DefaultSpec `json:",inline"` + // Purpose contains the data if a cloud provider needs additional components in order to expose the control plane. + // +optional + Purpose *Purpose `json:"purpose,omitempty"` + // InfrastructureProviderStatus contains the provider status that has + // been generated by the controller responsible for the `Infrastructure` resource. + // +optional + InfrastructureProviderStatus *runtime.RawExtension `json:"infrastructureProviderStatus,omitempty"` + // Region is the region of this control plane. + Region string `json:"region"` + // SecretRef is a reference to a secret that contains the cloud provider specific credentials. + SecretRef corev1.SecretReference `json:"secretRef"` +} + +// ControlPlaneStatus is the status of a ControlPlane resource. +type ControlPlaneStatus struct { + // DefaultStatus is a structure containing common fields used by all extension resources. + DefaultStatus `json:",inline"` +} + +// Purpose is a string alias. +type Purpose string + +const ( + // Normal triggers the ControlPlane controllers for the shoot provider. + Normal Purpose = "normal" + // Exposure triggers the ControlPlane controllers for the exposure settings. + Exposure Purpose = "exposure" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go new file mode 100644 index 000000000..9affd7831 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_defaults.go @@ -0,0 +1,106 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DefaultSpec contains common status fields for every extension resource. +type DefaultSpec struct { + // Type contains the instance of the resource's kind. + Type string `json:"type"` + // ProviderConfig is the provider specific configuration. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty"` +} + +// GetExtensionType implements Spec. +func (d *DefaultSpec) GetExtensionType() string { + return d.Type +} + +// GetExtensionPurpose implements Spec. +func (d *DefaultSpec) GetExtensionPurpose() *string { + return nil +} + +// GetProviderConfig implements Spec. +func (d *DefaultSpec) GetProviderConfig() *runtime.RawExtension { + return d.ProviderConfig +} + +// DefaultStatus contains common status fields for every extension resource. +type DefaultStatus struct { + // ProviderStatus contains provider-specific status. + // +optional + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` + // Conditions represents the latest available observations of a Seed's current state. + // +optional + Conditions []gardencorev1beta1.Condition `json:"conditions,omitempty"` + // LastError holds information about the last occurred error during an operation. + // +optional + LastError *gardencorev1beta1.LastError `json:"lastError,omitempty"` + // LastOperation holds information about the last operation on the resource. + // +optional + LastOperation *gardencorev1beta1.LastOperation `json:"lastOperation,omitempty"` + // ObservedGeneration is the most recent generation observed for this resource. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // State can be filled by the operating controller with what ever data it needs. + // +optional + State *runtime.RawExtension `json:"state,omitempty"` +} + +// GetProviderStatus implements Status. +func (d *DefaultStatus) GetProviderStatus() *runtime.RawExtension { + return d.ProviderStatus +} + +// GetConditions implements Status. +func (d *DefaultStatus) GetConditions() []gardencorev1beta1.Condition { + return d.Conditions +} + +// SetConditions implements Status. +func (d *DefaultStatus) SetConditions(c []gardencorev1beta1.Condition) { + d.Conditions = c +} + +// GetLastOperation implements Status. +func (d *DefaultStatus) GetLastOperation() LastOperation { + if d.LastOperation == nil { + return nil + } + return d.LastOperation +} + +// GetLastError implements Status. +func (d *DefaultStatus) GetLastError() LastError { + if d.LastError == nil { + return nil + } + return d.LastError +} + +// GetObservedGeneration implements Status. +func (d *DefaultStatus) GetObservedGeneration() int64 { + return d.ObservedGeneration +} + +// GetState implements Status. +func (d *DefaultStatus) GetState() *runtime.RawExtension { + return d.State +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go new file mode 100644 index 000000000..94953e470 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_extension.go @@ -0,0 +1,70 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ Object = (*Extension)(nil) + +// ExtensionResource is a constant for the name of the Extension resource. +const ExtensionResource = "Extension" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Extension is a specification for a Extension resource. +type Extension struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExtensionSpec `json:"spec"` + Status ExtensionStatus `json:"status"` +} + +// GetExtensionSpec implements Object. +func (i *Extension) GetExtensionSpec() Spec { + return &i.Spec +} + +// GetExtensionStatus implements Object. +func (i *Extension) GetExtensionStatus() Status { + return &i.Status +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExtensionList is a list of Extension resources. +type ExtensionList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Extension `json:"items"` +} + +// ExtensionSpec is the spec for a Extension resource. +type ExtensionSpec struct { + // DefaultSpec is a structure containing common fields used by all extension resources. + DefaultSpec `json:",inline"` +} + +// ExtensionStatus is the status for a Extension resource. +type ExtensionStatus struct { + // DefaultStatus is a structure containing common fields used by all extension resources. + DefaultStatus `json:",inline"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go new file mode 100644 index 000000000..7e514a3ff --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_infrastructure.go @@ -0,0 +1,84 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ Object = (*Infrastructure)(nil) + +// InfrastructureResource is a constant for the name of the Infrastructure resource. +const InfrastructureResource = "Infrastructure" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Infrastructure is a specification for cloud provider infrastructure. +type Infrastructure struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec InfrastructureSpec `json:"spec"` + Status InfrastructureStatus `json:"status"` +} + +// GetExtensionSpec implements Object. +func (i *Infrastructure) GetExtensionSpec() Spec { + return &i.Spec +} + +// GetExtensionStatus implements Object. +func (i *Infrastructure) GetExtensionStatus() Status { + return &i.Status +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InfrastructureList is a list of Infrastructure resources. +type InfrastructureList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of Infrastructures. + Items []Infrastructure `json:"items"` +} + +// InfrastructureSpec is the spec for an Infrastructure resource. +type InfrastructureSpec struct { + // DefaultSpec is a structure containing common fields used by all extension resources. + DefaultSpec `json:",inline"` + // Region is the region of this infrastructure. + Region string `json:"region"` + // SecretRef is a reference to a secret that contains the actual result of the generated cloud config. + SecretRef corev1.SecretReference `json:"secretRef"` + // SSHPublicKey is the public SSH key that should be used with this infrastructure. + // +optional + SSHPublicKey []byte `json:"sshPublicKey,omitempty"` +} + +// InfrastructureStatus is the status for an Infrastructure resource. +type InfrastructureStatus struct { + // DefaultStatus is a structure containing common fields used by all extension resources. + DefaultStatus `json:",inline"` + // NodesCIDR is the CIDR of the node network that was optionally created by the acting extension controller. + // This might be needed in environments in which the CIDR for the network for the shoot worker node cannot + // be statically defined in the Shoot resource but must be computed dynamically. + // +optional + NodesCIDR *string `json:"nodesCIDR,omitempty"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go new file mode 100644 index 000000000..0888e9417 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_network.go @@ -0,0 +1,80 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ Object = (*Network)(nil) + +// NetworkResource is a constant for the name of the Network resource. +const NetworkResource = "Network" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network is the specification for cluster networking. +type Network struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NetworkSpec `json:"spec"` + Status NetworkStatus `json:"status"` +} + +// GetExtensionSpec implements Object. +func (i *Network) GetExtensionSpec() Spec { + return &i.Spec +} + +// GetExtensionStatus implements Object. +func (i *Network) GetExtensionStatus() Status { + return &i.Status +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkList is a list of Network resources. +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of Networks. + Items []Network `json:"items"` +} + +// NetworkSpec is the spec for an Network resource. +type NetworkSpec struct { + // DefaultSpec is a structure containing common fields used by all extension resources. + DefaultSpec `json:",inline"` + // PodCIDR defines the CIDR that will be used for pods. + PodCIDR string `json:"podCIDR"` + // ServiceCIDR defines the CIDR that will be used for services. + ServiceCIDR string `json:"serviceCIDR"` +} + +// NetworkStatus is the status for an Network resource. +type NetworkStatus struct { + // DefaultStatus is a structure containing common fields used by all extension resources. + DefaultStatus `json:",inline"` +} + +// GetExtensionType returns the type of this Network resource. +func (n *Network) GetExtensionType() string { + return n.Spec.Type +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go new file mode 100644 index 000000000..d514a3c17 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go @@ -0,0 +1,204 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ Object = (*OperatingSystemConfig)(nil) + +// OperatingSystemConfigResource is a constant for the name of the OperatingSystemConfig resource. +const OperatingSystemConfigResource = "OperatingSystemConfig" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatingSystemConfig is a specification for a OperatingSystemConfig resource +type OperatingSystemConfig struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OperatingSystemConfigSpec `json:"spec"` + Status OperatingSystemConfigStatus `json:"status"` +} + +// GetExtensionSpec implements Object. +func (o *OperatingSystemConfig) GetExtensionSpec() Spec { + return &o.Spec +} + +// GetExtensionPurpose implements Object. +func (o *OperatingSystemConfigSpec) GetExtensionPurpose() *string { + return (*string)(&o.Purpose) + +} + +// GetExtensionStatus implements Object. +func (o *OperatingSystemConfig) GetExtensionStatus() Status { + return &o.Status +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatingSystemConfigList is a list of OperatingSystemConfig resources. +type OperatingSystemConfigList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of OperatingSystemConfigs. + Items []OperatingSystemConfig `json:"items"` +} + +// OperatingSystemConfigSpec is the spec for a OperatingSystemConfig resource. +type OperatingSystemConfigSpec struct { + // DefaultSpec is a structure containing common fields used by all extension resources. + DefaultSpec `json:",inline"` + + // Purpose describes how the result of this OperatingSystemConfig is used by Gardener. Either it + // gets sent to the `Worker` extension controller to bootstrap a VM, or it is downloaded by the + // cloud-config-downloader script already running on a bootstrapped VM. + Purpose OperatingSystemConfigPurpose `json:"purpose"` + // ReloadConfigFilePath is the path to the generated operating system configuration. If set, controllers + // are asked to use it when determining the .status.command of this resource. For example, if for CoreOS + // the reload-path might be "/var/lib/config"; then the controller shall set .status.command to + // "/usr/bin/coreos-cloudinit --from-file=/var/lib/config". + // +optional + ReloadConfigFilePath *string `json:"reloadConfigFilePath,omitempty"` + // Units is a list of unit for the operating system configuration (usually, a systemd unit). + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Units []Unit `json:"units,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // Files is a list of files that should get written to the host's file system. + // +patchMergeKey=path + // +patchStrategy=merge + // +optional + Files []File `json:"files,omitempty" patchStrategy:"merge" patchMergeKey:"path"` +} + +// Unit is a unit for the operating system configuration (usually, a systemd unit). +type Unit struct { + // Name is the name of a unit. + Name string `json:"name"` + // Command is the unit's command. + // +optional + Command *string `json:"command,omitempty"` + // Enable describes whether the unit is enabled or not. + // +optional + Enable *bool `json:"enable,omitempty"` + // Content is the unit's content. + // +optional + Content *string `json:"content,omitempty"` + // DropIns is a list of drop-ins for this unit. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + DropIns []DropIn `json:"dropIns,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// DropIn is a drop-in configuration for a systemd unit. +type DropIn struct { + // Name is the name of the drop-in. + Name string `json:"name"` + // Content is the content of the drop-in. + Content string `json:"content"` +} + +// File is a file that should get written to the host's file system. The content can either be inlined or +// referenced from a secret in the same namespace. +type File struct { + // Path is the path of the file system where the file should get written to. + Path string `json:"path"` + // Permissions describes with which permissions the file should get written to the file system. + // Should be defaulted to octal 0644. + // +optional + Permissions *int32 `json:"permissions,omitempty"` + // Content describe the file's content. + Content FileContent `json:"content"` +} + +// FileContent can either reference a secret or contain inline configuration. +type FileContent struct { + // SecretRef is a struct that contains information about the referenced secret. + // +optional + SecretRef *FileContentSecretRef `json:"secretRef,omitempty"` + // Inline is a struct that contains information about the inlined data. + // +optional + Inline *FileContentInline `json:"inline,omitempty"` +} + +// FileContentSecretRef contains keys for referencing a file content's data from a secret in the same namespace. +type FileContentSecretRef struct { + // Name is the name of the secret. + Name string `json:"name"` + // DataKey is the key in the secret's `.data` field that should be read. + DataKey string `json:"dataKey"` +} + +// FileContentInline contains keys for inlining a file content's data and encoding. +type FileContentInline struct { + // Encoding is the file's encoding (e.g. base64). + Encoding string `json:"encoding"` + // Data is the file's data. + Data string `json:"data"` +} + +// OperatingSystemConfigStatus is the status for a OperatingSystemConfig resource. +type OperatingSystemConfigStatus struct { + // DefaultStatus is a structure containing common fields used by all extension resources. + DefaultStatus `json:",inline"` + // CloudConfig is a structure for containing the generated output for the given operating system + // config spec. It contains a reference to a secret as the result may contain confidential data. + // +optional + CloudConfig *CloudConfig `json:"cloudConfig,omitempty"` + // Command is the command whose execution renews/reloads the cloud config on an existing VM, e.g. + // "/usr/bin/reload-cloud-config -from-file=". The is optionally provided by Gardener + // in the .spec.reloadConfigFilePath field. + // +optional + Command *string `json:"command,omitempty"` + // Units is a list of systemd unit names that are part of the generated Cloud Config and shall be + // restarted when a new version has been downloaded. + // +optional + Units []string `json:"units,omitempty"` +} + +// CloudConfig is a structure for containing the generated output for the given operating system +// config spec. It contains a reference to a secret as the result may contain confidential data. +type CloudConfig struct { + // SecretRef is a reference to a secret that contains the actual result of the generated cloud config. + SecretRef corev1.SecretReference `json:"secretRef"` +} + +// OperatingSystemConfigPurpose is a string alias. +type OperatingSystemConfigPurpose string + +const ( + // OperatingSystemConfigPurposeProvision describes that the operating system configuration is used to bootstrap a + // new VM. + OperatingSystemConfigPurposeProvision OperatingSystemConfigPurpose = "provision" + // OperatingSystemConfigPurposeReconcile describes that the operating system configuration is executed on an already + // provisioned VM by the cloud-config-downloader script. + OperatingSystemConfigPurposeReconcile OperatingSystemConfigPurpose = "reconcile" + + // OperatingSystemConfigDefaultFilePermission is the default value for a permission of a file. + OperatingSystemConfigDefaultFilePermission int32 = 0644 + // OperatingSystemConfigSecretDataKey is a constant for the key in a secret's `.data` field containing the + // results of a computed cloud config. + OperatingSystemConfigSecretDataKey = "cloud_config" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go new file mode 100644 index 000000000..235b4d2f8 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_worker.go @@ -0,0 +1,177 @@ +// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +var _ Object = (*Worker)(nil) + +// WorkerResource is a constant for the name of the Worker resource. +const WorkerResource = "Worker" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Worker is a specification for a Worker resource. +type Worker struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec WorkerSpec `json:"spec"` + Status WorkerStatus `json:"status"` +} + +// GetExtensionSpec implements Object. +func (i *Worker) GetExtensionSpec() Spec { + return &i.Spec +} + +// GetExtensionStatus implements Object. +func (i *Worker) GetExtensionStatus() Status { + return &i.Status +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WorkerList is a list of Worker resources. +type WorkerList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of Worker. + Items []Worker `json:"items"` +} + +// WorkerSpec is the spec for a Worker resource. +type WorkerSpec struct { + // DefaultSpec is a structure containing common fields used by all extension resources. + DefaultSpec `json:",inline"` + + // InfrastructureProviderStatus is a raw extension field that contains the provider status that has + // been generated by the controller responsible for the `Infrastructure` resource. + // +optional + InfrastructureProviderStatus *runtime.RawExtension `json:"infrastructureProviderStatus,omitempty"` + // Region is the name of the region where the worker pool should be deployed to. + Region string `json:"region"` + // SecretRef is a reference to a secret that contains the cloud provider specific credentials. + SecretRef corev1.SecretReference `json:"secretRef"` + // SSHPublicKey is the public SSH key that should be used with these workers. + // +optional + SSHPublicKey []byte `json:"sshPublicKey,omitempty"` + // Pools is a list of worker pools. + // +patchMergeKey=name + // +patchStrategy=merge + Pools []WorkerPool `json:"pools" patchStrategy:"merge" patchMergeKey:"name"` +} + +// WorkerPool is the definition of a specific worker pool. +type WorkerPool struct { + // MachineType contains information about the machine type that should be used for this worker pool. + MachineType string `json:"machineType"` + // Maximum is the maximum size of the worker pool. + Maximum int `json:"maximum"` + // MaxSurge is maximum number of VMs that are created during an update. + MaxSurge intstr.IntOrString `json:"maxSurge"` + // MaxUnavailable is the maximum number of VMs that can be unavailable during an update. + MaxUnavailable intstr.IntOrString `json:"maxUnavailable"` + // Annotations is a map of key/value pairs for annotations for all the `Node` objects in this worker pool. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + // Labels is a map of key/value pairs for labels for all the `Node` objects in this worker pool. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // Taints is a list of taints for all the `Node` objects in this worker pool. + // +optional + Taints []corev1.Taint `json:"taints,omitempty"` + // MachineImage contains logical information about the name and the version of the machie image that + // should be used. The logical information must be mapped to the provider-specific information (e.g., + // AMIs, ...) by the provider itself. + MachineImage MachineImage `json:"machineImage,omitempty"` + // Minimum is the minimum size of the worker pool. + Minimum int `json:"minimum"` + // Name is the name of this worker pool. + Name string `json:"name"` + // ProviderConfig is a provider specific configuration for the worker pool. + // +optional + ProviderConfig *runtime.RawExtension `json:"providerConfig,omitempty"` + // UserData is a base64-encoded string that contains the data that is sent to the provider's APIs + // when a new machine/VM that is part of this worker pool shall be spawned. + UserData []byte `json:"userData"` + // Volume contains information about the root disks that should be used for this worker pool. + // +optional + Volume *Volume `json:"volume,omitempty"` + // DataVolumes contains a list of additional worker volumes. + // +optional + DataVolumes []Volume `json:"dataVolumes,omitempty"` + // KubeletDataVolumeName contains the name of a dataVolume that should be used for storing kubelet state. + // +optional + KubeletDataVolumeName *string `json:"kubeletDataVolumeName,omitempty"` + // Zones contains information about availability zones for this worker pool. + // +optional + Zones []string `json:"zones,omitempty"` +} + +// MachineImage contains logical information about the name and the version of the machie image that +// should be used. The logical information must be mapped to the provider-specific information (e.g., +// AMIs, ...) by the provider itself. +type MachineImage struct { + // Name is the logical name of the machine image. + Name string `json:"name"` + // Version is the version of the machine image. + Version string `json:"version"` +} + +// Volume contains information about the root disks that should be used for worker pools. +type Volume struct { + // Name of the volume to make it referencable. + // +optional + Name *string `json:"name,omitempty"` + // Type is the type of the volume. + // +optional + Type *string `json:"type,omitempty"` + // Size is the of the root volume. + Size string `json:"size"` + // Encrypted determines if the volume should be encrypted. + // +optional + Encrypted *bool `json:"encrypted,omitempty"` +} + +// WorkerStatus is the status for a Worker resource. +type WorkerStatus struct { + // DefaultStatus is a structure containing common fields used by all extension resources. + DefaultStatus `json:",inline"` + // MachineDeployments is a list of created machine deployments. It will be used to e.g. configure + // the cluster-autoscaler properly. + // +patchMergeKey=name + // +patchStrategy=merge + MachineDeployments []MachineDeployment `json:"machineDeployments,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// MachineDeployment is a created machine deployment. +type MachineDeployment struct { + // Name is the name of the `MachineDeployment` resource. + Name string `json:"name"` + // Minimum is the minimum number for this machine deployment. + Minimum int `json:"minimum"` + // Maximum is the maximum number for this machine deployment. + Maximum int `json:"maximum"` +} diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..57b4b1660 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1305 @@ +// +build !ignore_autogenerated + +/* +Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucket) DeepCopyInto(out *BackupBucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucket. +func (in *BackupBucket) DeepCopy() *BackupBucket { + if in == nil { + return nil + } + out := new(BackupBucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupBucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketList) DeepCopyInto(out *BackupBucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupBucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketList. +func (in *BackupBucketList) DeepCopy() *BackupBucketList { + if in == nil { + return nil + } + out := new(BackupBucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupBucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketSpec) DeepCopyInto(out *BackupBucketSpec) { + *out = *in + in.DefaultSpec.DeepCopyInto(&out.DefaultSpec) + out.SecretRef = in.SecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketSpec. +func (in *BackupBucketSpec) DeepCopy() *BackupBucketSpec { + if in == nil { + return nil + } + out := new(BackupBucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupBucketStatus) DeepCopyInto(out *BackupBucketStatus) { + *out = *in + in.DefaultStatus.DeepCopyInto(&out.DefaultStatus) + if in.GeneratedSecretRef != nil { + in, out := &in.GeneratedSecretRef, &out.GeneratedSecretRef + *out = new(v1.SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupBucketStatus. +func (in *BackupBucketStatus) DeepCopy() *BackupBucketStatus { + if in == nil { + return nil + } + out := new(BackupBucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntry) DeepCopyInto(out *BackupEntry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntry. +func (in *BackupEntry) DeepCopy() *BackupEntry { + if in == nil { + return nil + } + out := new(BackupEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupEntry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryList) DeepCopyInto(out *BackupEntryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryList. +func (in *BackupEntryList) DeepCopy() *BackupEntryList { + if in == nil { + return nil + } + out := new(BackupEntryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupEntryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntrySpec) DeepCopyInto(out *BackupEntrySpec) { + *out = *in + in.DefaultSpec.DeepCopyInto(&out.DefaultSpec) + if in.BackupBucketProviderStatus != nil { + in, out := &in.BackupBucketProviderStatus, &out.BackupBucketProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + out.SecretRef = in.SecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntrySpec. +func (in *BackupEntrySpec) DeepCopy() *BackupEntrySpec { + if in == nil { + return nil + } + out := new(BackupEntrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupEntryStatus) DeepCopyInto(out *BackupEntryStatus) { + *out = *in + in.DefaultStatus.DeepCopyInto(&out.DefaultStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupEntryStatus. +func (in *BackupEntryStatus) DeepCopy() *BackupEntryStatus { + if in == nil { + return nil + } + out := new(BackupEntryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudConfig) DeepCopyInto(out *CloudConfig) { + *out = *in + out.SecretRef = in.SecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudConfig. +func (in *CloudConfig) DeepCopy() *CloudConfig { + if in == nil { + return nil + } + out := new(CloudConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.CloudProfile.DeepCopyInto(&out.CloudProfile) + in.Seed.DeepCopyInto(&out.Seed) + in.Shoot.DeepCopyInto(&out.Shoot) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlane) DeepCopyInto(out *ControlPlane) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlane. +func (in *ControlPlane) DeepCopy() *ControlPlane { + if in == nil { + return nil + } + out := new(ControlPlane) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlane) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneList) DeepCopyInto(out *ControlPlaneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControlPlane, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneList. +func (in *ControlPlaneList) DeepCopy() *ControlPlaneList { + if in == nil { + return nil + } + out := new(ControlPlaneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlaneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneSpec) DeepCopyInto(out *ControlPlaneSpec) { + *out = *in + in.DefaultSpec.DeepCopyInto(&out.DefaultSpec) + if in.Purpose != nil { + in, out := &in.Purpose, &out.Purpose + *out = new(Purpose) + **out = **in + } + if in.InfrastructureProviderStatus != nil { + in, out := &in.InfrastructureProviderStatus, &out.InfrastructureProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + out.SecretRef = in.SecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneSpec. +func (in *ControlPlaneSpec) DeepCopy() *ControlPlaneSpec { + if in == nil { + return nil + } + out := new(ControlPlaneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneStatus) DeepCopyInto(out *ControlPlaneStatus) { + *out = *in + in.DefaultStatus.DeepCopyInto(&out.DefaultStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneStatus. +func (in *ControlPlaneStatus) DeepCopy() *ControlPlaneStatus { + if in == nil { + return nil + } + out := new(ControlPlaneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultSpec) DeepCopyInto(out *DefaultSpec) { + *out = *in + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultSpec. +func (in *DefaultSpec) DeepCopy() *DefaultSpec { + if in == nil { + return nil + } + out := new(DefaultSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultStatus) DeepCopyInto(out *DefaultStatus) { + *out = *in + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1beta1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastError != nil { + in, out := &in.LastError, &out.LastError + *out = new(v1beta1.LastError) + (*in).DeepCopyInto(*out) + } + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(v1beta1.LastOperation) + (*in).DeepCopyInto(*out) + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultStatus. +func (in *DefaultStatus) DeepCopy() *DefaultStatus { + if in == nil { + return nil + } + out := new(DefaultStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DropIn) DeepCopyInto(out *DropIn) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DropIn. +func (in *DropIn) DeepCopy() *DropIn { + if in == nil { + return nil + } + out := new(DropIn) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extension) DeepCopyInto(out *Extension) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extension. +func (in *Extension) DeepCopy() *Extension { + if in == nil { + return nil + } + out := new(Extension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Extension) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionList) DeepCopyInto(out *ExtensionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Extension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionList. +func (in *ExtensionList) DeepCopy() *ExtensionList { + if in == nil { + return nil + } + out := new(ExtensionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExtensionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionSpec) DeepCopyInto(out *ExtensionSpec) { + *out = *in + in.DefaultSpec.DeepCopyInto(&out.DefaultSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionSpec. +func (in *ExtensionSpec) DeepCopy() *ExtensionSpec { + if in == nil { + return nil + } + out := new(ExtensionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionStatus) DeepCopyInto(out *ExtensionStatus) { + *out = *in + in.DefaultStatus.DeepCopyInto(&out.DefaultStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionStatus. +func (in *ExtensionStatus) DeepCopy() *ExtensionStatus { + if in == nil { + return nil + } + out := new(ExtensionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *File) DeepCopyInto(out *File) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = new(int32) + **out = **in + } + in.Content.DeepCopyInto(&out.Content) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new File. +func (in *File) DeepCopy() *File { + if in == nil { + return nil + } + out := new(File) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileContent) DeepCopyInto(out *FileContent) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(FileContentSecretRef) + **out = **in + } + if in.Inline != nil { + in, out := &in.Inline, &out.Inline + *out = new(FileContentInline) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileContent. +func (in *FileContent) DeepCopy() *FileContent { + if in == nil { + return nil + } + out := new(FileContent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileContentInline) DeepCopyInto(out *FileContentInline) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileContentInline. +func (in *FileContentInline) DeepCopy() *FileContentInline { + if in == nil { + return nil + } + out := new(FileContentInline) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileContentSecretRef) DeepCopyInto(out *FileContentSecretRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileContentSecretRef. +func (in *FileContentSecretRef) DeepCopy() *FileContentSecretRef { + if in == nil { + return nil + } + out := new(FileContentSecretRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure. +func (in *Infrastructure) DeepCopy() *Infrastructure { + if in == nil { + return nil + } + out := new(Infrastructure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Infrastructure) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Infrastructure, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList. +func (in *InfrastructureList) DeepCopy() *InfrastructureList { + if in == nil { + return nil + } + out := new(InfrastructureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfrastructureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) { + *out = *in + in.DefaultSpec.DeepCopyInto(&out.DefaultSpec) + out.SecretRef = in.SecretRef + if in.SSHPublicKey != nil { + in, out := &in.SSHPublicKey, &out.SSHPublicKey + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec. +func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { + if in == nil { + return nil + } + out := new(InfrastructureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { + *out = *in + in.DefaultStatus.DeepCopyInto(&out.DefaultStatus) + if in.NodesCIDR != nil { + in, out := &in.NodesCIDR, &out.NodesCIDR + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus. +func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus { + if in == nil { + return nil + } + out := new(InfrastructureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeployment. +func (in *MachineDeployment) DeepCopy() *MachineDeployment { + if in == nil { + return nil + } + out := new(MachineDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineImage) DeepCopyInto(out *MachineImage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineImage. +func (in *MachineImage) DeepCopy() *MachineImage { + if in == nil { + return nil + } + out := new(MachineImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + in.DefaultSpec.DeepCopyInto(&out.DefaultSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + in.DefaultStatus.DeepCopyInto(&out.DefaultStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatingSystemConfig) DeepCopyInto(out *OperatingSystemConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatingSystemConfig. +func (in *OperatingSystemConfig) DeepCopy() *OperatingSystemConfig { + if in == nil { + return nil + } + out := new(OperatingSystemConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatingSystemConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatingSystemConfigList) DeepCopyInto(out *OperatingSystemConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OperatingSystemConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatingSystemConfigList. +func (in *OperatingSystemConfigList) DeepCopy() *OperatingSystemConfigList { + if in == nil { + return nil + } + out := new(OperatingSystemConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatingSystemConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatingSystemConfigSpec) DeepCopyInto(out *OperatingSystemConfigSpec) { + *out = *in + in.DefaultSpec.DeepCopyInto(&out.DefaultSpec) + if in.ReloadConfigFilePath != nil { + in, out := &in.ReloadConfigFilePath, &out.ReloadConfigFilePath + *out = new(string) + **out = **in + } + if in.Units != nil { + in, out := &in.Units, &out.Units + *out = make([]Unit, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Files != nil { + in, out := &in.Files, &out.Files + *out = make([]File, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatingSystemConfigSpec. +func (in *OperatingSystemConfigSpec) DeepCopy() *OperatingSystemConfigSpec { + if in == nil { + return nil + } + out := new(OperatingSystemConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatingSystemConfigStatus) DeepCopyInto(out *OperatingSystemConfigStatus) { + *out = *in + in.DefaultStatus.DeepCopyInto(&out.DefaultStatus) + if in.CloudConfig != nil { + in, out := &in.CloudConfig, &out.CloudConfig + *out = new(CloudConfig) + **out = **in + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = new(string) + **out = **in + } + if in.Units != nil { + in, out := &in.Units, &out.Units + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatingSystemConfigStatus. +func (in *OperatingSystemConfigStatus) DeepCopy() *OperatingSystemConfigStatus { + if in == nil { + return nil + } + out := new(OperatingSystemConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Unit) DeepCopyInto(out *Unit) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = new(string) + **out = **in + } + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = new(string) + **out = **in + } + if in.DropIns != nil { + in, out := &in.DropIns, &out.DropIns + *out = make([]DropIn, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Unit. +func (in *Unit) DeepCopy() *Unit { + if in == nil { + return nil + } + out := new(Unit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Worker) DeepCopyInto(out *Worker) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker. +func (in *Worker) DeepCopy() *Worker { + if in == nil { + return nil + } + out := new(Worker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Worker) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerList) DeepCopyInto(out *WorkerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Worker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerList. +func (in *WorkerList) DeepCopy() *WorkerList { + if in == nil { + return nil + } + out := new(WorkerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerPool) DeepCopyInto(out *WorkerPool) { + *out = *in + out.MaxSurge = in.MaxSurge + out.MaxUnavailable = in.MaxUnavailable + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.MachineImage = in.MachineImage + if in.ProviderConfig != nil { + in, out := &in.ProviderConfig, &out.ProviderConfig + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(Volume) + (*in).DeepCopyInto(*out) + } + if in.DataVolumes != nil { + in, out := &in.DataVolumes, &out.DataVolumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubeletDataVolumeName != nil { + in, out := &in.KubeletDataVolumeName, &out.KubeletDataVolumeName + *out = new(string) + **out = **in + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerPool. +func (in *WorkerPool) DeepCopy() *WorkerPool { + if in == nil { + return nil + } + out := new(WorkerPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerSpec) DeepCopyInto(out *WorkerSpec) { + *out = *in + in.DefaultSpec.DeepCopyInto(&out.DefaultSpec) + if in.InfrastructureProviderStatus != nil { + in, out := &in.InfrastructureProviderStatus, &out.InfrastructureProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + out.SecretRef = in.SecretRef + if in.SSHPublicKey != nil { + in, out := &in.SSHPublicKey, &out.SSHPublicKey + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Pools != nil { + in, out := &in.Pools, &out.Pools + *out = make([]WorkerPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSpec. +func (in *WorkerSpec) DeepCopy() *WorkerSpec { + if in == nil { + return nil + } + out := new(WorkerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerStatus) DeepCopyInto(out *WorkerStatus) { + *out = *in + in.DefaultStatus.DeepCopyInto(&out.DefaultStatus) + if in.MachineDeployments != nil { + in, out := &in.MachineDeployments, &out.MachineDeployments + *out = make([]MachineDeployment, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerStatus. +func (in *WorkerStatus) DeepCopy() *WorkerStatus { + if in == nil { + return nil + } + out := new(WorkerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/pkg/logger/logger.go b/vendor/github.com/gardener/gardener/pkg/logger/logger.go new file mode 100644 index 000000000..00cdd4ecf --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/logger/logger.go @@ -0,0 +1,84 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logger + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/sirupsen/logrus" +) + +// Logger is the standard logger for the Gardener which is used for all messages which are not Shoot +// cluster specific. +var Logger *logrus.Logger + +// NewLogger creates a new logrus logger. +// It uses STDERR as output channel and evaluates the value of the --log-level command line argument in order +// to set the log level. +// Example output: time="2017-06-08T13:00:28+02:00" level=info msg="gardener started successfully". +func NewLogger(logLevel string) *logrus.Logger { + var level logrus.Level + + switch logLevel { + case "debug": + level = logrus.DebugLevel + case "", "info": + level = logrus.InfoLevel + case "error": + level = logrus.ErrorLevel + default: + panic("The specified log level is not supported.") + } + + logger := &logrus.Logger{ + Out: os.Stderr, + Level: level, + Formatter: &logrus.TextFormatter{ + DisableColors: true, + }, + } + Logger = logger + return logger +} + +// NewNopLogger instantiates a new logger that logs to ioutil.Discard. +func NewNopLogger() *logrus.Logger { + logger := logrus.New() + logger.Out = ioutil.Discard + return logger +} + +// AddWriter returns a logger that uses the tests writer (e.g., GingkoWriter) as output channel +func AddWriter(logger *logrus.Logger, writer io.Writer) *logrus.Logger { + logger.Out = writer + return logger +} + +// NewShootLogger extends an existing logrus logger and adds an additional field containing the Shoot cluster name +// and the project in the Garden cluster to the output. If an is provided it will be printed for every +// log message. +// Example output: time="2017-06-08T13:00:49+02:00" level=info msg="Creating namespace in seed cluster" shoot=core/crazy-botany. +func NewShootLogger(logger *logrus.Logger, shoot, project string) *logrus.Entry { + return logger.WithField("shoot", fmt.Sprintf("%s/%s", project, shoot)) +} + +// NewFieldLogger extends an existing logrus logger and adds the provided additional field. +// Example output: time="2017-06-08T13:00:49+02:00" level=info msg="something" =. +func NewFieldLogger(logger *logrus.Logger, fieldKey, fieldValue string) *logrus.Entry { + return logger.WithField(fieldKey, fieldValue) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/encoding.go b/vendor/github.com/gardener/gardener/pkg/utils/encoding.go new file mode 100644 index 000000000..04f2bfb43 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/encoding.go @@ -0,0 +1,166 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "sort" + "strconv" +) + +// EncodeBase64 takes a byte slice and returns the Base64-encoded string. +func EncodeBase64(in []byte) string { + encodedLength := base64.StdEncoding.EncodedLen(len(in)) + buffer := make([]byte, encodedLength) + out := buffer[0:encodedLength] + base64.StdEncoding.Encode(out, in) + return string(out) +} + +// DecodeBase64 takes a Base64-encoded string and returns the decoded byte slice. +func DecodeBase64(in string) ([]byte, error) { + return base64.StdEncoding.DecodeString(in) +} + +// EncodePrivateKey takes a RSA private key object, encodes it to the PEM format, and returns it as +// a byte slice. +func EncodePrivateKey(key *rsa.PrivateKey) []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(key), + }) +} + +// EncodePrivateKeyInPKCS8 takes a RSA private key object, encodes it to the PKCS8 format, and returns it as +// a byte slice. +func EncodePrivateKeyInPKCS8(key *rsa.PrivateKey) ([]byte, error) { + bytes, err := x509.MarshalPKCS8PrivateKey(key) + if err != nil { + return nil, err + } + return pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: bytes, + }), nil +} + +// DecodePrivateKey takes a byte slice, decodes it from the PEM format, converts it to an rsa.PrivateKey +// object, and returns it. In case an error occurs, it returns the error. +func DecodePrivateKey(bytes []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(bytes) + if block == nil || block.Type != "RSA PRIVATE KEY" { + return nil, errors.New("could not decode the PEM-encoded RSA private key") + } + return x509.ParsePKCS1PrivateKey(block.Bytes) +} + +// EncodeCertificate takes a certificate as a byte slice, encodes it to the PEM format, and returns +// it as byte slice. +func EncodeCertificate(certificate []byte) []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certificate, + }) +} + +// DecodeCertificate takes a byte slice, decodes it from the PEM format, converts it to an x509.Certificate +// object, and returns it. In case an error occurs, it returns the error. +func DecodeCertificate(bytes []byte) (*x509.Certificate, error) { + block, _ := pem.Decode(bytes) + if block == nil || block.Type != "CERTIFICATE" { + return nil, errors.New("could not decode the PEM-encoded certificate") + } + return x509.ParseCertificate(block.Bytes) +} + +// SHA1 takes a byte slice and returns the sha1-hashed byte slice. +func SHA1(in []byte) []byte { + s := sha1.New() + s.Write(in) + return s.Sum(nil) +} + +// SHA256 takes a byte slice and returns the sha256-hashed byte slice. +func SHA256(in []byte) []byte { + h := sha256.Sum256(in) + return h[:] +} + +// EncodeSHA1 takes a byte slice and returns the sha1-hashed string (base64-encoded). +func EncodeSHA1(in []byte) string { + return EncodeBase64(SHA1(in)) +} + +// CreateSHA1Secret takes a username and a password and returns a sha1-schemed credentials pair as string. +func CreateSHA1Secret(username, password []byte) string { + credentials := append([]byte(username), ":{SHA}"...) + credentials = append(credentials, EncodeSHA1(password)...) + return EncodeBase64(credentials) +} + +// ComputeSHA1Hex computes the hexadecimal representation of the SHA1 hash of the given input byte +// slice , converts it to a string and returns it (length of returned string is 40 characters). +func ComputeSHA1Hex(in []byte) string { + return hex.EncodeToString(SHA1(in)) +} + +// ComputeSHA256Hex computes the hexadecimal representation of the SHA256 hash of the given input byte +// slice , converts it to a string and returns it. +func ComputeSHA256Hex(in []byte) string { + return hex.EncodeToString(SHA256(in)) +} + +// HashForMap creates a hash value for a map of type map[string]interface{} and returns it. +func HashForMap(m map[string]interface{}) string { + var ( + hash string + keys []string + ) + + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + switch v := m[k].(type) { + case string: + hash += ComputeSHA256Hex([]byte(v)) + case int: + hash += ComputeSHA256Hex([]byte(strconv.Itoa(v))) + case bool: + hash += ComputeSHA256Hex([]byte(strconv.FormatBool(v))) + case []string: + for _, val := range v { + hash += ComputeSHA256Hex([]byte(val)) + } + case map[string]interface{}: + hash += HashForMap(v) + case []map[string]interface{}: + for _, val := range v { + hash += HashForMap(val) + } + } + } + + return ComputeSHA256Hex([]byte(hash)) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/errors/errors.go b/vendor/github.com/gardener/gardener/pkg/utils/errors/errors.go new file mode 100644 index 000000000..79409cdc4 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/errors/errors.go @@ -0,0 +1,262 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "io" + + "github.com/hashicorp/go-multierror" +) + +type withSuppressed struct { + cause error + suppressed error +} + +func (w *withSuppressed) Error() string { + return fmt.Sprintf("%s, suppressed: %s", w.cause.Error(), w.suppressed.Error()) +} + +func (w *withSuppressed) Cause() error { + return w.cause +} + +func (w *withSuppressed) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + _, _ = fmt.Fprintf(s, "%+v\nsuppressed: %+v", w.Cause(), w.suppressed) + return + } + fallthrough + case 's', 'q': + _, _ = io.WriteString(s, w.Error()) + } +} + +func (w *withSuppressed) Suppressed() error { + return w.suppressed +} + +// Suppressed retrieves the suppressed error of the given error, if any. +// An error has a suppressed error if it implements the following interface: +// +// type suppressor interface { +// Suppressed() error +// } +// If the error does not implement the interface, nil is returned. +func Suppressed(err error) error { + type suppressor interface { + Suppressed() error + } + if w, ok := err.(suppressor); ok { + return w.Suppressed() + } + return nil +} + +// WithSuppressed annotates err with a suppressed error. +// If err is nil, WithSuppressed returns nil. +// If suppressed is nil, WithSuppressed returns err. +func WithSuppressed(err, suppressed error) error { + if err == nil || suppressed == nil { + return err + } + + return &withSuppressed{ + cause: err, + suppressed: suppressed, + } +} + +// reconciliationError implements ErrorIDer and Causer +type reconciliationError struct { + error + errorID string +} + +// WithID annotates the error with the given errorID which can afterwards be retrieved by ErrorID() +func WithID(id string, err error) error { + return &reconciliationError{err, id} +} + +// ErrorID implements the errorIDer interface and returns the id of the reconciliationError +func (t *reconciliationError) ErrorID() string { + return t.errorID +} + +// Cause implements the causer interface and returns the underlying error +func (t *reconciliationError) Cause() error { + return t.error +} + +// GetID returns the ID of the error if possible. +// If err does not implement ErrorID or is nill an empty string will be returned. +func GetID(err error) string { + type errorIDer interface { + ErrorID() string + } + + var id string + if err != nil { + if errWithID, ok := err.(errorIDer); ok { + id = errWithID.ErrorID() + } + } + return id +} + +// The ErrorContext holds the lastError IDs from the previous reconciliaton and the IDs of the errors that are processed in this context during the current reconciliation +type ErrorContext struct { + name string + lastErrorIDs []string + errorIDs map[string]struct{} +} + +// NewErrorContext creates a new error context with the given name and lastErrors from the previous reconciliation +func NewErrorContext(name string, lastErrorIDs []string) *ErrorContext { + return &ErrorContext{ + name: name, + lastErrorIDs: lastErrorIDs, + errorIDs: map[string]struct{}{}, + } +} + +// AddErrorID adds an error ID which will be tracked by the context and panics if more than one error have the same ID +func (e *ErrorContext) AddErrorID(errorID string) { + if e.HasErrorWithID(errorID) { + panic(fmt.Sprintf("Error with id %q already exists in error context %q", errorID, e.name)) + } + e.errorIDs[errorID] = struct{}{} +} + +// HasErrorWithID checks if the ErrorContext already contains an error with id errorID +func (e *ErrorContext) HasErrorWithID(errorID string) bool { + _, ok := e.errorIDs[errorID] + return ok +} + +// HasLastErrorWithID checks if the previous reconciliation had encountered an error with id errorID +func (e *ErrorContext) HasLastErrorWithID(errorID string) bool { + for _, lastErrorID := range e.lastErrorIDs { + if errorID == lastErrorID { + return true + } + } + return false +} + +type cancelError struct{} + +func (*cancelError) Error() string { + return "Canceled" +} + +// Cancel returns an error which will cause the HandleErrors function to stop executing tasks without triggering its FailureHandler. +func Cancel() error { + return &cancelError{} +} + +// WasCanceled checks to see if the HandleErrors function was canceled manually. It can be used to check if execution after HandleErrors should be stopped without returning an error +func WasCanceled(err error) bool { + _, ok := err.(*cancelError) + return ok +} + +// FailureHandler is a function which is called when an error occurs +type FailureHandler func(string, error) error + +// SuccessHandler is called when a task completes successfully +type SuccessHandler func(string) error + +// TaskFunc is an interface for a task which should belong to an ErrorContext and can trigger OnSuccess and OnFailure callbacks depending on whether it completes successfully or not +type TaskFunc interface { + Do(errorContext *ErrorContext) (string, error) +} + +// taskFunc implements TaskFunc +type taskFunc func(*ErrorContext) (string, error) + +func (f taskFunc) Do(errorContext *ErrorContext) (string, error) { + return f(errorContext) +} + +func defaultFailureHandler(errorID string, err error) error { + err = fmt.Errorf("%s failed (%v)", errorID, err) + return WithID(errorID, err) +} + +//ToExecute takes an errorID and a function and creates a TaskFunc from them. +func ToExecute(errorID string, task func() error) TaskFunc { + return taskFunc(func(errorContext *ErrorContext) (string, error) { + errorContext.AddErrorID(errorID) + err := task() + if err != nil { + return errorID, err + } + return errorID, nil + }) +} + +// HandleErrors takes a reference to an ErrorContext, onSuccess and onFailure callback functions and a variadic list of taskFuncs. +// It sequentially adds the Tasks' errorIDs to the provided ErrorContext and executes them. +// If the ErrorContext has errors from the previous reconciliation and the tasks which caused errors complete successfully OnSuccess is called. +// If a task fails OnFailure is called +func HandleErrors(errorContext *ErrorContext, onSuccess SuccessHandler, onFailure FailureHandler, tasks ...TaskFunc) error { + for _, task := range tasks { + errorID, err := task.Do(errorContext) + if err != nil && !WasCanceled(err) { + return handleFailure(onFailure, errorID, err) + } + if handlerErr := handleSuccess(errorContext, onSuccess, errorID); handlerErr != nil { + return handlerErr + } + if WasCanceled(err) { + return err + } + } + return nil +} + +func handleFailure(onFailure FailureHandler, errorID string, err error) error { + if onFailure != nil { + return onFailure(errorID, err) + } + return defaultFailureHandler(errorID, err) +} + +func handleSuccess(errorContext *ErrorContext, onSuccess SuccessHandler, errorID string) error { + if onSuccess != nil && errorContext.HasLastErrorWithID(errorID) { + if err := onSuccess(errorID); err != nil { + return err + } + } + return nil +} + +// Errors returns a list of all nested errors of the given error. +// If the error is nil, nil is returned. +// If the error is a multierror, it returns all its errors. +// Otherwise, it returns a slice containing the error as single element. +func Errors(err error) []error { + if err == nil { + return nil + } + if errs, ok := err.(*multierror.Error); ok { + return errs.Errors + } + return []error{err} +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go b/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go new file mode 100644 index 000000000..c0739b277 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/errors/multierror.go @@ -0,0 +1,42 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "github.com/hashicorp/go-multierror" +) + +// NewErrorFormatFuncWithPrefix creates a new multierror.ErrorFormatFunc which can be used as an ErrorFormat on +// multierror.Error instances. The error string is prefixed with , all errors are concatenated at the end. +// This is similar to multierror.ListFormatFunc but does not use any escape sequences, which will look weird in +// the status of Kubernetes objects or controller logs. +func NewErrorFormatFuncWithPrefix(prefix string) multierror.ErrorFormatFunc { + return func(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("%s: 1 error occurred: %s", prefix, es[0]) + } + + combinedMsg := "" + for i, err := range es { + if i > 0 { + combinedMsg += ", " + } + combinedMsg += err.Error() + } + + return fmt.Sprintf("%s: %d errors occurred: [%s]", prefix, len(es), combinedMsg) + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go new file mode 100644 index 000000000..9cbb27e5c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/health.go @@ -0,0 +1,352 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package health + +import ( + "fmt" + "net/http" + "time" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/rest" +) + +func requiredConditionMissing(conditionType string) error { + return fmt.Errorf("condition %q is missing", conditionType) +} + +func checkConditionState(conditionType string, expected, actual, reason, message string) error { + if expected != actual { + return fmt.Errorf("condition %q has invalid status %s (expected %s) due to %s: %s", + conditionType, actual, expected, reason, message) + } + return nil +} + +func getDeploymentCondition(conditions []appsv1.DeploymentCondition, conditionType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { + for _, condition := range conditions { + if condition.Type == conditionType { + return &condition + } + } + return nil +} + +func getNodeCondition(conditions []corev1.NodeCondition, conditionType corev1.NodeConditionType) *corev1.NodeCondition { + for _, condition := range conditions { + if condition.Type == conditionType { + return &condition + } + } + return nil +} + +var ( + trueDeploymentConditionTypes = []appsv1.DeploymentConditionType{ + appsv1.DeploymentAvailable, + } + + trueOptionalDeploymentConditionTypes = []appsv1.DeploymentConditionType{ + appsv1.DeploymentProgressing, + } + + falseOptionalDeploymentConditionTypes = []appsv1.DeploymentConditionType{ + appsv1.DeploymentReplicaFailure, + } +) + +// CheckDeployment checks whether the given Deployment is healthy. +// A deployment is considered healthy if the controller observed its current revision and +// if the number of updated replicas is equal to the number of replicas. +func CheckDeployment(deployment *appsv1.Deployment) error { + if deployment.Status.ObservedGeneration < deployment.Generation { + return fmt.Errorf("observed generation outdated (%d/%d)", deployment.Status.ObservedGeneration, deployment.Generation) + } + + for _, trueConditionType := range trueDeploymentConditionTypes { + conditionType := string(trueConditionType) + condition := getDeploymentCondition(deployment.Status.Conditions, trueConditionType) + if condition == nil { + return requiredConditionMissing(conditionType) + } + if err := checkConditionState(conditionType, string(corev1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil { + return err + } + } + + for _, trueOptionalConditionType := range trueOptionalDeploymentConditionTypes { + conditionType := string(trueOptionalConditionType) + condition := getDeploymentCondition(deployment.Status.Conditions, trueOptionalConditionType) + if condition == nil { + continue + } + if err := checkConditionState(conditionType, string(corev1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil { + return err + } + } + + for _, falseOptionalConditionType := range falseOptionalDeploymentConditionTypes { + conditionType := string(falseOptionalConditionType) + condition := getDeploymentCondition(deployment.Status.Conditions, falseOptionalConditionType) + if condition == nil { + continue + } + if err := checkConditionState(conditionType, string(corev1.ConditionFalse), string(condition.Status), condition.Reason, condition.Message); err != nil { + return err + } + } + + return nil +} + +// CheckStatefulSet checks whether the given StatefulSet is healthy. +// A StatefulSet is considered healthy if its controller observed its current revision, +// it is not in an update (i.e. UpdateRevision is empty) and if its current replicas are equal to +// its desired replicas. +func CheckStatefulSet(statefulSet *appsv1.StatefulSet) error { + if statefulSet.Status.ObservedGeneration < statefulSet.Generation { + return fmt.Errorf("observed generation outdated (%d/%d)", statefulSet.Status.ObservedGeneration, statefulSet.Generation) + } + + replicas := int32(1) + if statefulSet.Spec.Replicas != nil { + replicas = *statefulSet.Spec.Replicas + } + + if statefulSet.Status.ReadyReplicas < replicas { + return fmt.Errorf("not enough ready replicas (%d/%d)", statefulSet.Status.ReadyReplicas, replicas) + } + return nil +} + +func daemonSetMaxUnavailable(daemonSet *appsv1.DaemonSet) int32 { + if daemonSet.Status.DesiredNumberScheduled == 0 || daemonSet.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { + return 0 + } + + rollingUpdate := daemonSet.Spec.UpdateStrategy.RollingUpdate + if rollingUpdate == nil { + return 0 + } + + maxUnavailable, err := intstr.GetValueFromIntOrPercent(rollingUpdate.MaxUnavailable, int(daemonSet.Status.DesiredNumberScheduled), false) + if err != nil { + return 0 + } + + return int32(maxUnavailable) +} + +// CheckDaemonSet checks whether the given DaemonSet is healthy. +// A DaemonSet is considered healthy if its controller observed its current revision and if +// its desired number of scheduled pods is equal to its updated number of scheduled pods. +func CheckDaemonSet(daemonSet *appsv1.DaemonSet) error { + if daemonSet.Status.ObservedGeneration < daemonSet.Generation { + return fmt.Errorf("observed generation outdated (%d/%d)", daemonSet.Status.ObservedGeneration, daemonSet.Generation) + } + + maxUnavailable := daemonSetMaxUnavailable(daemonSet) + + if requiredAvailable := daemonSet.Status.DesiredNumberScheduled - maxUnavailable; daemonSet.Status.CurrentNumberScheduled < requiredAvailable { + return fmt.Errorf("not enough available replicas (%d/%d)", daemonSet.Status.CurrentNumberScheduled, requiredAvailable) + } + return nil +} + +// NodeOutOfDisk is deprecated NodeConditionType. +// It is no longer reported by kubelet >= 1.13. See https://github.com/kubernetes/kubernetes/pull/70111. +// +deprecated +const NodeOutOfDisk = "OutOfDisk" + +var ( + trueNodeConditionTypes = []corev1.NodeConditionType{ + corev1.NodeReady, + } + + falseNodeConditionTypes = []corev1.NodeConditionType{ + corev1.NodeDiskPressure, + corev1.NodeMemoryPressure, + corev1.NodeNetworkUnavailable, + corev1.NodePIDPressure, + NodeOutOfDisk, + } +) + +// CheckNode checks whether the given Node is healthy. +// A node is considered healthy if it has a `corev1.NodeReady` condition and this condition reports +// `corev1.ConditionTrue`. +func CheckNode(node *corev1.Node) error { + for _, trueConditionType := range trueNodeConditionTypes { + conditionType := string(trueConditionType) + condition := getNodeCondition(node.Status.Conditions, trueConditionType) + if condition == nil { + return requiredConditionMissing(conditionType) + } + if err := checkConditionState(conditionType, string(corev1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil { + return err + } + } + + for _, falseConditionType := range falseNodeConditionTypes { + conditionType := string(falseConditionType) + condition := getNodeCondition(node.Status.Conditions, falseConditionType) + if condition == nil { + continue + } + if err := checkConditionState(conditionType, string(corev1.ConditionFalse), string(condition.Status), condition.Reason, condition.Message); err != nil { + return err + } + } + + return nil +} + +var ( + trueSeedConditionTypes = []gardencorev1beta1.ConditionType{ + gardencorev1beta1.SeedGardenletReady, + gardencorev1beta1.SeedBootstrapped, + } +) + +// CheckSeed checks if the Seed is up-to-date and if its extensions have been successfully bootstrapped. +func CheckSeed(seed *gardencorev1beta1.Seed, identity *gardencorev1beta1.Gardener) error { + if seed.Status.ObservedGeneration < seed.Generation { + return fmt.Errorf("observed generation outdated (%d/%d)", seed.Status.ObservedGeneration, seed.Generation) + } + if !apiequality.Semantic.DeepEqual(seed.Status.Gardener, identity) { + return fmt.Errorf("observing Gardener version not up to date (%v/%v)", seed.Status.Gardener, identity) + } + + for _, trueConditionType := range trueSeedConditionTypes { + conditionType := string(trueConditionType) + condition := gardencorev1beta1helper.GetCondition(seed.Status.Conditions, trueConditionType) + if condition == nil { + return requiredConditionMissing(conditionType) + } + if err := checkConditionState(conditionType, string(gardencorev1beta1.ConditionTrue), string(condition.Status), condition.Reason, condition.Message); err != nil { + return err + } + } + + return nil +} + +// CheckExtensionObject checks if an extension Object is healthy or not. +// An extension object is healthy if +// * Its observed generation is up-to-date +// * No gardener.cloud/operation is set +// * No lastError is in the status +// * A last operation is state succeeded is present +func CheckExtensionObject(obj extensionsv1alpha1.Object) error { + status := obj.GetExtensionStatus() + if status.GetObservedGeneration() != obj.GetGeneration() { + return fmt.Errorf("observed generation outdated (%d/%d)", status.GetObservedGeneration(), obj.GetGeneration()) + } + + op, ok := obj.GetAnnotations()[v1beta1constants.GardenerOperation] + if ok { + return fmt.Errorf("gardener operation %q is not yet picked up by extension controller", op) + } + + if lastErr := status.GetLastError(); lastErr != nil { + return fmt.Errorf("extension encountered error during reconciliation: %s", lastErr.GetDescription()) + } + + lastOp := status.GetLastOperation() + if lastOp == nil { + return fmt.Errorf("extension did not record a last operation yet") + } + + if lastOp.GetState() != gardencorev1beta1.LastOperationStateSucceeded { + return fmt.Errorf("extension state is not succeeded but %v", lastOp.GetState()) + } + return nil +} + +// CheckBackupBucket checks if an backup bucket Object is healthy or not. +// An extension object is healthy if +// * Its observed generation is up-to-date +// * No gardener.cloud/operation is set +// * No lastError is in the status +// * A last operation is state succeeded is present +func CheckBackupBucket(obj *gardencorev1beta1.BackupBucket) error { + status := obj.Status + if status.ObservedGeneration != obj.Generation { + return fmt.Errorf("observed generation outdated (%d/%d)", status.ObservedGeneration, obj.Generation) + } + + op, ok := obj.GetAnnotations()[v1beta1constants.GardenerOperation] + if ok { + return fmt.Errorf("gardener operation %q is not yet picked up by controller", op) + } + + if lastErr := status.LastError; lastErr != nil { + return fmt.Errorf("backup bucket encountered error during reconciliation: %s", lastErr.GetDescription()) + } + + lastOp := status.LastOperation + if lastOp == nil { + return fmt.Errorf("backup bucket did not record a last operation yet") + } + + if lastOp.GetState() != gardencorev1beta1.LastOperationStateSucceeded { + return fmt.Errorf("backup bucket state is not succeeded but %v", lastOp.GetState()) + } + return nil +} + +// Now determines the current time. +var Now = time.Now + +// ConditionerFunc to update a condition with type and message +type conditionerFunc func(conditionType string, message string) gardencorev1beta1.Condition + +// CheckAPIServerAvailability checks if the API server of a cluster is reachable and measure the response time. +func CheckAPIServerAvailability(condition gardencorev1beta1.Condition, restClient rest.Interface, conditioner conditionerFunc) gardencorev1beta1.Condition { + now := Now() + response := restClient.Get().AbsPath("/healthz").Do() + responseDurationText := fmt.Sprintf("[response_time:%dms]", Now().Sub(now).Nanoseconds()/time.Millisecond.Nanoseconds()) + if response.Error() != nil { + message := fmt.Sprintf("Request to API server /healthz endpoint failed. %s (%s)", responseDurationText, response.Error().Error()) + return conditioner("HealthzRequestFailed", message) + } + + // Determine the status code of the response. + var statusCode int + response.StatusCode(&statusCode) + + if statusCode != http.StatusOK { + var body string + bodyRaw, err := response.Raw() + if err != nil { + body = fmt.Sprintf("Could not parse response body: %s", err.Error()) + } else { + body = string(bodyRaw) + } + message := fmt.Sprintf("API server /healthz endpoint endpoint check returned a non ok status code %d. %s (%s)", statusCode, responseDurationText, body) + return conditioner("HealthzRequestError", message) + } + + message := fmt.Sprintf("API server /healthz endpoint responded with success status code. %s", responseDurationText) + return gardencorev1beta1helper.UpdatedCondition(condition, gardencorev1beta1.ConditionTrue, "HealthzRequestSucceeded", message) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/pod_health.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/pod_health.go new file mode 100644 index 000000000..dbafa0b83 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/pod_health.go @@ -0,0 +1,53 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Coppied from https://github.com/kubernetes/kubernetes/blob/a93f803f8e400f1d42dc812bc51932ff3b31798a/pkg/api/pod/util.go#L181-L211 + +package health + +import ( + corev1 "k8s.io/api/core/v1" +) + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *corev1.Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status corev1.PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == corev1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition { + _, condition := GetPodCondition(&status, corev1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go b/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go new file mode 100644 index 000000000..4b07cf182 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/miscellaneous.go @@ -0,0 +1,124 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "net" + "regexp" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ValueExists returns true or false, depending on whether the given string +// is part of the given []string list . +func ValueExists(value string, list []string) bool { + for _, v := range list { + if v == value { + return true + } + } + return false +} + +// MergeMaps takes two maps , and merges them. If defines a value with a key +// already existing in the map, the value for that key will be overwritten. +func MergeMaps(a, b map[string]interface{}) map[string]interface{} { + var values = map[string]interface{}{} + + for i, v := range b { + existing, ok := a[i] + values[i] = v + + switch elem := v.(type) { + case map[string]interface{}: + if ok { + if extMap, ok := existing.(map[string]interface{}); ok { + values[i] = MergeMaps(extMap, elem) + } + } + default: + values[i] = v + } + } + + for i, v := range a { + if _, ok := values[i]; !ok { + values[i] = v + } + } + + return values +} + +// MergeStringMaps merges the content of the newMaps with the oldMap. If a key already exists then +// it gets overwritten by the last value with the same key. +func MergeStringMaps(oldMap map[string]string, newMaps ...map[string]string) map[string]string { + var out map[string]string + + if oldMap != nil { + out = make(map[string]string) + } + for k, v := range oldMap { + out[k] = v + } + + for _, newMap := range newMaps { + if newMap != nil && out == nil { + out = make(map[string]string) + } + + for k, v := range newMap { + out[k] = v + } + } + + return out +} + +// TimeElapsed takes a and a checks whether the elapsed time until now is less than the . +// If yes, it returns true, otherwise it returns false. +func TimeElapsed(timestamp *metav1.Time, duration time.Duration) bool { + if timestamp == nil { + return true + } + + var ( + end = metav1.NewTime(timestamp.Time.UTC().Add(duration)) + now = metav1.NewTime(time.Now().UTC()) + ) + return !now.Before(&end) +} + +// FindFreePort finds a free port on the host machine and returns it. +func FindFreePort() (int, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} + +// TestEmail validates the provided against a regular expression and returns whether it matches. +func TestEmail(email string) bool { + match, _ := regexp.MatchString(`^[^@]+@(?:[a-zA-Z-0-9]+\.)+[a-zA-Z]{2,}$`, email) + return match +} + +// IsTrue returns true if the passed bool pointer is not nil and true. +func IsTrue(value *bool) bool { + return value != nil && *value +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/random.go b/vendor/github.com/gardener/gardener/pkg/utils/random.go new file mode 100644 index 000000000..cf44a155c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/random.go @@ -0,0 +1,43 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "crypto/rand" + "math/big" +) + +// GenerateRandomString uses crypto/rand to generate a random string of the specified length . +// The set of allowed characters is [0-9a-zA-Z], thus no special characters are included in the output. +// Returns error if there was a problem during the random generation. +func GenerateRandomString(n int) (string, error) { + allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + return GenerateRandomStringFromCharset(n, allowedCharacters) +} + +// GenerateRandomStringFromCharset generates a cryptographically secure random string of the specified length . +// The set of allowed characters can be specified. Returns error if there was a problem during the random generation. +func GenerateRandomStringFromCharset(n int, allowedCharacters string) (string, error) { + output := make([]byte, n) + max := new(big.Int).SetInt64(int64(len(allowedCharacters))) + for i := range output { + randomCharacter, err := rand.Int(rand.Reader, max) + if err != nil { + return "", err + } + output[i] = allowedCharacters[randomCharacter.Int64()] + } + return string(output), nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/template_engine.go b/vendor/github.com/gardener/gardener/pkg/utils/template_engine.go new file mode 100644 index 000000000..efe169c2a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/template_engine.go @@ -0,0 +1,100 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "path/filepath" + "strings" + "text/template" +) + +const templateDir = "templates" + +var standardFunctions = template.FuncMap{ + "indent": func(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) + }, +} + +// RenderTemplate reads the template file in the directory and renders it. It injects a bunch +// of standard functions which can be used in the template file. +func RenderTemplate(filename string, values interface{}) ([]byte, error) { + return RenderTemplateWithFuncs(filename, standardFunctions, values) +} + +// RenderTemplateWithFuncs reads the template file in the directory and renders it. It allows +// providing a user-defined template.FuncMap to the template which will be merged with the standard +// functions and provided to the template file. The user-defined functions always take precedence in the +// merge process. +func RenderTemplateWithFuncs(filename string, funcs template.FuncMap, values interface{}) ([]byte, error) { + return RenderTemplatesWithFuncs([]string{filename}, funcs, values) +} + +// RenderTemplatesWithFuncs does the same as RenderTemplateWithFuncs except that it allows providing multiple +// template files instead of only exactly one. +func RenderTemplatesWithFuncs(filenames []string, funcs template.FuncMap, values interface{}) ([]byte, error) { + var paths []string + for _, filename := range filenames { + paths = append(paths, filepath.Join(templateDir, filename)) + } + + templateObj, err := template. + New(filenames[0][strings.LastIndex(filenames[0], "/")+1:]). + Funcs(mergeFunctions(funcs)). + ParseFiles(paths...) + if err != nil { + return nil, err + } + return render(templateObj, values) +} + +// RenderLocalTemplate uses a template given as a string and renders it. Thus, the template does not +// necessarily need to be stored as a file. +func RenderLocalTemplate(tpl string, values interface{}) ([]byte, error) { + templateObj, err := template. + New("tpl"). + Parse(tpl) + if err != nil { + return nil, err + } + return render(templateObj, values) +} + +// render takes a text/template.Template object and an interface of which are used to render the +// template. It returns the rendered result as byte slice, or an error if something went wrong. +func render(tpl *template.Template, values interface{}) ([]byte, error) { + var result bytes.Buffer + err := tpl.Execute(&result, values) + if err != nil { + return nil, err + } + return result.Bytes(), nil +} + +// mergeFunctions takes a template.FuncMap and merges them with the standard functions. If +// defines a function with a name already existing in the standard functions map, the standard function will +// be overwritten. +func mergeFunctions(funcs template.FuncMap) template.FuncMap { + var functions = template.FuncMap{} + for i, function := range standardFunctions { + functions[i] = function + } + for i, function := range funcs { + functions[i] = function + } + return functions +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/timewindow.go b/vendor/github.com/gardener/gardener/pkg/utils/timewindow.go new file mode 100644 index 000000000..08ccef8d4 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/timewindow.go @@ -0,0 +1,237 @@ +// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/util/rand" +) + +const maintenanceTimeLayout = "150405-0700" + +// MaintenanceTime is a structure holding a maintenance time. +type MaintenanceTime struct { + hour int + minute int + second int +} + +// NewMaintenanceTime returns a maintenance time structure based on the given hour, minute, and second. +func NewMaintenanceTime(hour, minute, second int) *MaintenanceTime { + if hour >= 24 { + panic(fmt.Sprintf("invalid hour %d", hour)) + } + if minute >= 60 { + panic(fmt.Sprintf("invalid minute %d", minute)) + } + if second >= 60 { + panic(fmt.Sprintf("invalid second %d", second)) + } + return &MaintenanceTime{hour, minute, second} +} + +// ParseMaintenanceTime parses the given value and returns it as MaintenanceTime object. In case the parsing fails, an +// error is returned. The time object is converted to UTC zone. +func ParseMaintenanceTime(value string) (*MaintenanceTime, error) { + t, err := time.Parse(maintenanceTimeLayout, value) + if err != nil { + return nil, fmt.Errorf("could not parse the value into the maintenanceTime format: %s", err.Error()) + } + return timeToMaintenanceTime(t), nil +} + +func timeToMaintenanceTime(t time.Time) *MaintenanceTime { + t = t.UTC() + return NewMaintenanceTime(t.Hour(), t.Minute(), t.Second()) +} + +// RandomMaintenanceTimeWindow computes a random time window and returns both in the format HHMMSS+ZONE. +func RandomMaintenanceTimeWindow() *MaintenanceTimeWindow { + var ( + hour = rand.IntnRange(0, 23) + begin = NewMaintenanceTime(hour, 0, 0) + end = NewMaintenanceTime(hour+1, 0, 0) + ) + return NewMaintenanceTimeWindow(begin, end) +} + +// String returns the string representation of the maintenance time. +func (m *MaintenanceTime) String() string { + return fmt.Sprintf("%.02d:%.02d:%.02d", m.hour, m.minute, m.second) +} + +// Formatted formats the maintenance time object to the maintenance time format. +func (m *MaintenanceTime) Formatted() string { + return m.zeroTime().Format(maintenanceTimeLayout) +} + +func (m *MaintenanceTime) zeroTime() time.Time { + return time.Date(1, time.January, 1, m.hour, m.minute, m.second, 0, time.UTC) +} + +// Hour returns the hour of the maintenance time. +func (m *MaintenanceTime) Hour() int { + return m.hour +} + +// Minute returns the minute of the maintenance time. +func (m *MaintenanceTime) Minute() int { + return m.minute +} + +// Second returns the second of the maintenance time. +func (m *MaintenanceTime) Second() int { + return m.second +} + +// Add adds hour, minute and second to and returns a new maintenance time. +func (m *MaintenanceTime) Add(hour, minute, second int) *MaintenanceTime { + t := m.zeroTime().Add(time.Duration(hour)*time.Hour + time.Duration(minute)*time.Minute + time.Duration(second)*time.Second) + return timeToMaintenanceTime(t) +} + +// Compare compares the two times and . It returns +// * i < 0 if is before other +// * i = 0 if is equal other +// * i > 0 if is after other +func (m *MaintenanceTime) Compare(other *MaintenanceTime) int { + if hourDiff := m.hour - other.hour; hourDiff != 0 { + return hourDiff + } + if minuteDiff := m.minute - other.minute; minuteDiff != 0 { + return minuteDiff + } + return m.second - other.second +} + +func (m *MaintenanceTime) adjust(t time.Time) time.Time { + t = t.UTC() + return time.Date(t.Year(), t.Month(), t.Day(), m.hour, m.minute, m.second, 0, t.Location()) +} + +// MaintenanceTimeWindow contains the beginning and the end of a time window in which maintenance operations can be performed. +type MaintenanceTimeWindow struct { + begin *MaintenanceTime + end *MaintenanceTime +} + +// AlwaysTimeWindow is a MaintenanceTimeWindow that contains all durations. +var AlwaysTimeWindow = NewMaintenanceTimeWindow(NewMaintenanceTime(0, 0, 0), NewMaintenanceTime(23, 59, 59)) + +// NewMaintenanceTimeWindow takes a begin and an end of a time window and returns a pointer to a MaintenanceTimeWindow structure. +func NewMaintenanceTimeWindow(begin, end *MaintenanceTime) *MaintenanceTimeWindow { + return &MaintenanceTimeWindow{begin, end} +} + +// ParseMaintenanceTimeWindow takes a begin and an end of a time window in the maintenance format and returns a pointer +// to a MaintenanceTimeWindow structure. +func ParseMaintenanceTimeWindow(begin, end string) (*MaintenanceTimeWindow, error) { + maintenanceWindowBegin, err := ParseMaintenanceTime(begin) + if err != nil { + return nil, fmt.Errorf("could not parse begin time: %s", err.Error()) + } + maintenanceWindowEnd, err := ParseMaintenanceTime(end) + if err != nil { + return nil, fmt.Errorf("could not parse end time: %s", err.Error()) + } + return NewMaintenanceTimeWindow(maintenanceWindowBegin, maintenanceWindowEnd), nil +} + +// String returns the string representation of the time window. +func (m *MaintenanceTimeWindow) String() string { + return fmt.Sprintf("begin=%s, end=%s", m.begin, m.end) +} + +// Begin returns the begin of the time window. +func (m *MaintenanceTimeWindow) Begin() *MaintenanceTime { + return m.begin +} + +// End returns the end of the time window. +func (m *MaintenanceTimeWindow) End() *MaintenanceTime { + return m.end +} + +// WithBegin returns a new maintenance time window with the given (ending will be kept). +func (m *MaintenanceTimeWindow) WithBegin(begin *MaintenanceTime) *MaintenanceTimeWindow { + return NewMaintenanceTimeWindow(begin, m.end) +} + +// WithEnd returns a new maintenance time window with the given (beginning will be kept). +func (m *MaintenanceTimeWindow) WithEnd(end *MaintenanceTime) *MaintenanceTimeWindow { + return NewMaintenanceTimeWindow(m.begin, end) +} + +// Contains returns true in case the given time is within the time window. +func (m *MaintenanceTimeWindow) Contains(tTime time.Time) bool { + t := timeToMaintenanceTime(tTime) + + if m.spansDifferentDays() { + return !(t.Compare(m.end) > 0 && t.Compare(m.begin) < 0) + } + return t.Compare(m.begin) >= 0 && t.Compare(m.end) <= 0 +} + +var ( + // RandomFunc is a function that computes a random number. + RandomFunc = rand.Int63nRange +) + +// RandomDurationUntilNext computes the duration until a random time within the time window for the next maintenance +// execution. +func (m *MaintenanceTimeWindow) RandomDurationUntilNext(from time.Time) time.Duration { + from = from.UTC() + + var ( + begin = m.adjustedBegin(from) + end = m.adjustedEnd(from) + ) + + if begin.Sub(from) < 0 && (m.Contains(from) || from.After(end)) { + begin = begin.AddDate(0, 0, 1) + end = end.AddDate(0, 0, 1) + } + + delta := end.Sub(begin) + return time.Duration(int64(begin.Sub(from)) + RandomFunc(0, delta.Nanoseconds())) +} + +// Duration returns the duration of the maintenance time window. +func (m *MaintenanceTimeWindow) Duration() time.Duration { + var ( + from = time.Date(0, time.January, 1, 0, 0, 0, 0, time.UTC) + begin = m.adjustedBegin(from) + end = m.adjustedEnd(from) + ) + return end.Sub(begin) +} + +func (m *MaintenanceTimeWindow) adjustedBegin(t time.Time) time.Time { + return m.begin.adjust(t) +} + +func (m *MaintenanceTimeWindow) adjustedEnd(t time.Time) time.Time { + end := m.end.adjust(t) + if m.end.Compare(m.begin) <= 0 { + return end.AddDate(0, 0, 1) + } + return end +} + +func (m *MaintenanceTimeWindow) spansDifferentDays() bool { + return m.end.Compare(m.begin) < 0 +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 000000000..444df08f8 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 000000000..a733bef18 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod new file mode 100644 index 000000000..c9b84022c --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/errwrap diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml new file mode 100644 index 000000000..304a83595 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.x + +branches: + only: + - master + +script: make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 000000000..b97cd6ed0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 000000000..ead5830f7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,97 @@ +# go-multierror + +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` implements the +[errwrap](https://github.com/hashicorp/errwrap) interface so that it can +be used with that library, as well. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-multierror + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 000000000..775b6e753 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,41 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 000000000..aab8e9abe --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 000000000..47f13c49a --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod new file mode 100644 index 000000000..2534331d5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/go-multierror + +require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum new file mode 100644 index 000000000..85b1f8ff3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 000000000..89b1422d1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,51 @@ +package multierror + +import ( + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementation of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implemented only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 000000000..5c477abe4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 000000000..fecb14e81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md index 4920406ae..aeadb66e0 100644 --- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,3 +1,29 @@ +## 1.10.1 + +## Fixes +- stack backtrace: fix skipping (#600) [2a4c0bd] + +## 1.10.0 + +## Fixes +- stack backtrace: fix alignment and skipping [66915d6] +- fix typo in documentation [8f97b93] + +## 1.9.0 + +## Features +- Option to print output into report, when tests have passed [0545415] + +## Fixes +- Fixed typos in comments [0ecbc58] +- gofmt code [a7f8bfb] +- Simplify code [7454d00] +- Simplify concatenation, incrementation and function assignment [4825557] +- Avoid unnecessary conversions [9d9403c] +- JUnit: include more detailed information about panic [19cca4b] +- Print help to stdout when the user asks for help [4cb7441] + + ## 1.8.0 ### New Features diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go index dab2a2470..ac55a5ad2 100644 --- a/vendor/github.com/onsi/ginkgo/config/config.go +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -20,7 +20,7 @@ import ( "fmt" ) -const VERSION = "1.8.0" +const VERSION = "1.10.1" type GinkgoConfigType struct { RandomSeed int64 @@ -52,13 +52,14 @@ type DefaultReporterConfigType struct { Succinct bool Verbose bool FullTrace bool + ReportPassed bool } var DefaultReporterConfig = DefaultReporterConfigType{} func processPrefix(prefix string) string { if prefix != "" { - prefix = prefix + "." + prefix += "." } return prefix } @@ -98,6 +99,7 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") + flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.") } func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { @@ -196,5 +198,9 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor result = append(result, fmt.Sprintf("--%strace", prefix)) } + if reporter.ReportPassed { + result = append(result, fmt.Sprintf("--%sreportPassed", prefix)) + } + return result } diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go index a6b96d88f..8734c061d 100644 --- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -283,7 +283,7 @@ func GinkgoRecover() { //BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. // //In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally -//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object //or method and, within that Describe, outline a number of Contexts and Whens. func Describe(text string, body func()) bool { globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) @@ -499,7 +499,7 @@ func AfterSuite(body interface{}, timeout ...float64) bool { //until that node is done before running. // //SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is -//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1) +//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1) //to the second function (on all the other nodes). // //The functions have the following signatures. The first function (which only runs on node 1) has the signature: diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go index fa2f0bf73..aa89d6cba 100644 --- a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go +++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go @@ -11,19 +11,35 @@ import ( func New(skip int) types.CodeLocation { _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip) + stackTrace := PruneStack(string(debug.Stack()), skip+1) return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} } +// PruneStack removes references to functions that are internal to Ginkgo +// and the Go runtime from a stack string and a certain number of stack entries +// at the beginning of the stack. The stack string has the format +// as returned by runtime/debug.Stack. The leading goroutine information is +// optional and always removed if present. Beware that runtime/debug.Stack +// adds itself as first entry, so typically skip must be >= 1 to remove that +// entry. func PruneStack(fullStackTrace string, skip int) string { stack := strings.Split(fullStackTrace, "\n") + // Ensure that the even entries are the method names and the + // the odd entries the source code information. + if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { + // Ignore "goroutine 29 [running]:" line. + stack = stack[1:] + } + // The "+1" is for skipping over the initial entry, which is + // runtime/debug.Stack() itself. if len(stack) > 2*(skip+1) { stack = stack[2*(skip+1):] } prunedStack := []string{} re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) for i := 0; i < len(stack)/2; i++ { - if !re.Match([]byte(stack[i*2])) { + // We filter out based on the source code file name. + if !re.Match([]byte(stack[i*2+1])) { prunedStack = append(prunedStack, stack[i*2]) prunedStack = append(prunedStack, stack[i*2+1]) } diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go index d6d54234c..393901e11 100644 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go @@ -17,7 +17,7 @@ type benchmarker struct { func newBenchmarker() *benchmarker { return &benchmarker{ - measurements: make(map[string]*types.SpecMeasurement, 0), + measurements: make(map[string]*types.SpecMeasurement), } } diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go index 6b54afe01..f9ab30067 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -54,11 +54,11 @@ func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporte config: config, stenographer: stenographer, - suiteBeginnings: make(chan configAndSuite, 0), - beforeSuites: make(chan *types.SetupSummary, 0), - afterSuites: make(chan *types.SetupSummary, 0), - specCompletions: make(chan *types.SpecSummary, 0), - suiteEndings: make(chan *types.SuiteSummary, 0), + suiteBeginnings: make(chan configAndSuite), + beforeSuites: make(chan *types.SetupSummary), + afterSuites: make(chan *types.SetupSummary), + specCompletions: make(chan *types.SpecSummary), + suiteEndings: make(chan *types.SuiteSummary), } go aggregator.mux() @@ -227,7 +227,7 @@ func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (fi aggregatedSuiteSummary.SuiteSucceeded = true for _, suiteSummary := range aggregator.aggregatedSuiteEndings { - if suiteSummary.SuiteSucceeded == false { + if !suiteSummary.SuiteSucceeded { aggregatedSuiteSummary.SuiteSucceeded = false } diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go index 367c54daf..93e9dac05 100644 --- a/vendor/github.com/onsi/ginkgo/internal/remote/server.go +++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go @@ -213,7 +213,7 @@ func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Re c := spec_iterator.Counter{} server.lock.Lock() c.Index = server.counter - server.counter = server.counter + 1 + server.counter++ server.lock.Unlock() json.NewEncoder(writer).Encode(c) diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go index 7fd68ee8e..6eef40a0e 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go @@ -107,11 +107,11 @@ func (spec *Spec) Summary(suiteID string) *types.SpecSummary { NumberOfSamples: spec.subject.Samples(), ComponentTexts: componentTexts, ComponentCodeLocations: componentCodeLocations, - State: spec.getState(), - RunTime: runTime, - Failure: spec.failure, - Measurements: spec.measurementsReport(), - SuiteID: suiteID, + State: spec.getState(), + RunTime: runTime, + Failure: spec.failure, + Measurements: spec.measurementsReport(), + SuiteID: suiteID, } } diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go index 27c0d1d6c..8a2007137 100644 --- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -107,11 +107,11 @@ func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, toMatch := e.toMatch(description, i) if focusFilter != nil { - matchesFocus = focusFilter.Match([]byte(toMatch)) + matchesFocus = focusFilter.Match(toMatch) } if skipFilter != nil { - matchesSkip = skipFilter.Match([]byte(toMatch)) + matchesSkip = skipFilter.Match(toMatch) } if !matchesFocus || matchesSkip { diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go index 2c683cb8b..c9a0a60d8 100644 --- a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go @@ -300,7 +300,7 @@ func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { } func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { - if failed && len(summary.CapturedOutput) == 0 { + if len(summary.CapturedOutput) == 0 { summary.CapturedOutput = string(runner.writer.Bytes()) } for i := len(runner.reporters) - 1; i >= 1; i-- { diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go index ac58dd5f7..c76283b46 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -62,6 +62,9 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct) } else { reporter.stenographer.AnnounceSuccesfulSpec(specSummary) + if reporter.config.ReportPassed { + reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) + } } case types.SpecStatePending: reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go index 2c9f3c792..89a7c8465 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -32,12 +32,17 @@ type JUnitTestSuite struct { type JUnitTestCase struct { Name string `xml:"name,attr"` ClassName string `xml:"classname,attr"` + PassedMessage *JUnitPassedMessage `xml:"passed,omitempty"` FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` Skipped *JUnitSkipped `xml:"skipped,omitempty"` Time float64 `xml:"time,attr"` SystemOut string `xml:"system-out,omitempty"` } +type JUnitPassedMessage struct { + Message string `xml:",chardata"` +} + type JUnitFailureMessage struct { Type string `xml:"type,attr"` Message string `xml:",chardata"` @@ -48,9 +53,10 @@ type JUnitSkipped struct { } type JUnitReporter struct { - suite JUnitTestSuite - filename string - testSuiteName string + suite JUnitTestSuite + filename string + testSuiteName string + ReporterConfig config.DefaultReporterConfigType } //NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. @@ -60,12 +66,13 @@ func NewJUnitReporter(filename string) *JUnitReporter { } } -func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { +func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) { reporter.suite = JUnitTestSuite{ Name: summary.SuiteDescription, TestCases: []JUnitTestCase{}, } reporter.testSuiteName = summary.SuiteDescription + reporter.ReporterConfig = config.DefaultReporterConfig } func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { @@ -105,11 +112,21 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { Name: strings.Join(specSummary.ComponentTexts[1:], " "), ClassName: reporter.testSuiteName, } + if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { + testCase.PassedMessage = &JUnitPassedMessage{ + Message: specSummary.CapturedOutput, + } + } if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { testCase.FailureMessage = &JUnitFailureMessage{ Type: reporter.failureTypeForState(specSummary.State), Message: failureMessage(specSummary.Failure), } + if specSummary.State == types.SpecStatePanicked { + testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s", + specSummary.Failure.ForwardedPanic, + specSummary.Failure.Location.FullStackTrace) + } testCase.SystemOut = specSummary.CapturedOutput } if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go index 36ee2a600..c8e27b2a7 100644 --- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go @@ -22,8 +22,9 @@ const ( ) type TeamCityReporter struct { - writer io.Writer - testSuiteName string + writer io.Writer + testSuiteName string + ReporterConfig config.DefaultReporterConfigType } func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { @@ -65,6 +66,10 @@ func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { + details := escape(specSummary.CapturedOutput) + fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']", messageId, testName, details) + } if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { message := escape(specSummary.Failure.ComponentCodeLocation.String()) details := escape(specSummary.Failure.Message) diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go index 0e89521be..e4e32b761 100644 --- a/vendor/github.com/onsi/ginkgo/types/types.go +++ b/vendor/github.com/onsi/ginkgo/types/types.go @@ -17,7 +17,7 @@ each node does not deterministically know how many specs it will end up running. Unfortunately making such a change would break backward compatibility. -Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields +Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields with -1. */ type SuiteSummary struct { diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml index 2420a5d07..d147e451d 100644 --- a/vendor/github.com/onsi/gomega/.travis.yml +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -4,6 +4,7 @@ go: - 1.10.x - 1.11.x - 1.12.x + - gotip env: - GO111MODULE=on diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 5d1eda837..f67074016 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,35 @@ +## 1.7.0 + +### Features +- export format property variables (#347) [642e5ba] + +### Fixes +- minor fix in the documentation of ExpectWithOffset (#358) [beea727] + +## 1.6.0 + +### Features + +- Display special chars on error [41e1b26] +- Add BeElementOf matcher [6a48b48] + +### Fixes + +- Remove duplication in XML matcher tests [cc1a6cb] +- Remove unnecessary conversions (#357) [7bf756a] +- Fixed import order (#353) [2e3b965] +- Added missing error handling in test (#355) [c98d3eb] +- Simplify code (#356) [0001ed9] +- Simplify code (#354) [0d9100e] +- Fixed typos (#352) [3f647c4] +- Add failure message tests to BeElementOf matcher [efe19c3] +- Update go-testcov untested sections [37ee382] +- Mark all uncovered files so go-testcov ./... works [53b150e] +- Reenable gotip in travis [5c249dc] +- Fix the typo of comment (#345) [f0e010e] +- Optimize contain_element_matcher [abeb93d] + + ## 1.5.0 ### Features diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 6559525f1..fae25adce 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -1,6 +1,9 @@ /* Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information. */ + +// untested sections: 4 + package format import ( @@ -33,7 +36,15 @@ var PrintContextObjects = false // TruncatedDiff choose if we should display a truncated pretty diff or not var TruncatedDiff = true -// Ctx interface defined here to keep backwards compatability with go < 1.7 +// TruncateThreshold (default 50) specifies the maximum length string to print in string comparison assertion error +// messages. +var TruncateThreshold uint = 50 + +// CharactersAroundMismatchToInclude (default 5) specifies how many contextual characters should be printed before and +// after the first diff location in a truncated string assertion error message. +var CharactersAroundMismatchToInclude uint = 5 + +// Ctx interface defined here to keep backwards compatibility with go < 1.7 // It matches the context.Context interface type Ctx interface { Deadline() (deadline time.Time, ok bool) @@ -58,7 +69,7 @@ Generates a formatted matcher success/failure message of the form: -If expected is omited, then the message looks like: +If expected is omitted, then the message looks like: Expected @@ -85,7 +96,7 @@ to equal | */ func MessageWithDiff(actual, message, expected string) string { - if TruncatedDiff && len(actual) >= truncateThreshold && len(expected) >= truncateThreshold { + if TruncatedDiff && len(actual) >= int(TruncateThreshold) && len(expected) >= int(TruncateThreshold) { diffPoint := findFirstMismatch(actual, expected) formattedActual := truncateAndFormat(actual, diffPoint) formattedExpected := truncateAndFormat(expected, diffPoint) @@ -97,14 +108,23 @@ func MessageWithDiff(actual, message, expected string) string { padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" return Message(formattedActual, message+padding, formattedExpected) } + + actual = escapedWithGoSyntax(actual) + expected = escapedWithGoSyntax(expected) + return Message(actual, message, expected) } +func escapedWithGoSyntax(str string) string { + withQuotes := fmt.Sprintf("%q", str) + return withQuotes[1 : len(withQuotes)-1] +} + func truncateAndFormat(str string, index int) string { leftPadding := `...` rightPadding := `...` - start := index - charactersAroundMismatchToInclude + start := index - int(CharactersAroundMismatchToInclude) if start < 0 { start = 0 leftPadding = "" @@ -112,7 +132,7 @@ func truncateAndFormat(str string, index int) string { // slice index must include the mis-matched character lengthOfMismatchedCharacter := 1 - end := index + charactersAroundMismatchToInclude + lengthOfMismatchedCharacter + end := index + int(CharactersAroundMismatchToInclude) + lengthOfMismatchedCharacter if end > len(str) { end = len(str) rightPadding = "" @@ -141,11 +161,6 @@ func findFirstMismatch(a, b string) int { return 0 } -const ( - truncateThreshold = 50 - charactersAroundMismatchToInclude = 5 -) - /* Pretty prints the passed in object at the passed in indentation level. @@ -288,7 +303,7 @@ func formatString(object interface{}, indentation uint) string { } } - return fmt.Sprintf("%s", result) + return result } else { return fmt.Sprintf("%q", object) } diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go index 14317182b..0763f5e2d 100644 --- a/vendor/github.com/onsi/gomega/gbytes/say_matcher.go +++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 1 + package gbytes import ( @@ -19,7 +21,7 @@ Say is a Gomega matcher that operates on gbytes.Buffers: will succeed if the unread portion of the buffer matches the regular expression "something". -When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match. +When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the successful match. Thus, subsequent calls to Say will only match against the unread portion of the buffer Say pairs very well with Eventually. To assert that a buffer eventually receives data matching "[123]-star" within 3 seconds you can: diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go index 869c1ead8..741d845f4 100644 --- a/vendor/github.com/onsi/gomega/gexec/build.go +++ b/vendor/github.com/onsi/gomega/gexec/build.go @@ -1,3 +1,5 @@ +// untested sections: 5 + package gexec import ( @@ -66,7 +68,7 @@ func doBuild(gopath, packagePath string, env []string, args ...string) (compiled executable := filepath.Join(tmpDir, path.Base(packagePath)) if runtime.GOOS == "windows" { - executable = executable + ".exe" + executable += ".exe" } cmdArgs := append([]string{"build"}, args...) diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go index 98a354937..6e70de68d 100644 --- a/vendor/github.com/onsi/gomega/gexec/exit_matcher.go +++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package gexec import ( diff --git a/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go index 05e695abc..feb6620c5 100644 --- a/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go +++ b/vendor/github.com/onsi/gomega/gexec/prefixed_writer.go @@ -1,3 +1,5 @@ +// untested sections: 1 + package gexec import ( @@ -6,7 +8,7 @@ import ( ) /* -PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line. +PrefixedWriter wraps an io.Writer, emitting the passed in prefix at the beginning of each new line. This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each session by passing in a PrefixedWriter: diff --git a/vendor/github.com/onsi/gomega/gexec/session.go b/vendor/github.com/onsi/gomega/gexec/session.go index 5cb00ca65..6a09140fb 100644 --- a/vendor/github.com/onsi/gomega/gexec/session.go +++ b/vendor/github.com/onsi/gomega/gexec/session.go @@ -1,6 +1,9 @@ /* Package gexec provides support for testing external processes. */ + +// untested sections: 1 + package gexec import ( diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 448d595da..b145768cf 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.5.0" +const GOMEGA_VERSION = "1.7.0" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -155,7 +155,7 @@ func Expect(actual interface{}, extra ...interface{}) Assertion { // ExpectWithOffset(1, "foo").To(Equal("foo")) // // Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument -// this is used to modify the call-stack offset when computing line numbers. +// that is used to modify the call-stack offset when computing line numbers. // // This is most useful in helper functions that make assertions. If you want Gomega's // error message to refer to the calling line in the test (as opposed to the line in the helper function) @@ -242,7 +242,7 @@ func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface // assert that all other values are nil/zero. // This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. // -// Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. +// Consistently is useful in cases where you want to assert that something *does not happen* over a period of time. // For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: // // Consistently(channel).ShouldNot(Receive()) @@ -280,7 +280,7 @@ func SetDefaultEventuallyPollingInterval(t time.Duration) { defaultEventuallyPollingInterval = t } -// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satsified for this long. +// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satisfied for this long. func SetDefaultConsistentlyDuration(t time.Duration) { defaultConsistentlyDuration = t } @@ -320,7 +320,7 @@ type GomegaAsyncAssertion = AsyncAssertion // All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() // and is used to annotate failure messages. // -// All methods return a bool that is true if hte assertion passed and false if it failed. +// All methods return a bool that is true if the assertion passed and false if it failed. // // Example: // diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go index cdab233eb..a233e48c0 100644 --- a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package asyncassertion import ( diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index c3a326dd4..9ec8893cb 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -269,6 +269,22 @@ func ContainElement(element interface{}) types.GomegaMatcher { } } +//BeElementOf succeeds if actual is contained in the passed in elements. +//BeElementOf() always uses Equal() to perform the match. +//When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves +//as the reverse of ContainElement() that operates with Equal() to perform the match. +// Expect(2).Should(BeElementOf([]int{1, 2})) +// Expect(2).Should(BeElementOf([2]int{1, 2})) +//Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...): +// Expect(2).Should(BeElementOf(1, 2)) +// +//Actual must be typed. +func BeElementOf(elements ...interface{}) types.GomegaMatcher { + return &matchers.BeElementOfMatcher{ + Elements: elements, + } +} + //ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. //By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go index 51f8be6ae..be4839520 100644 --- a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go index 7b6975e41..acffc8570 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -1,3 +1,5 @@ +// untested sections: 5 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go index e239131fb..89441c800 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -1,3 +1,5 @@ +// untested sections: 5 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go index d42eba223..ec6506b00 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -1,3 +1,5 @@ +// untested sections: 3 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go index 80c9c8bb1..f13c24490 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go new file mode 100644 index 000000000..1f9d7a8e6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go @@ -0,0 +1,57 @@ +// untested sections: 1 + +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeElementOfMatcher struct { + Elements []interface{} +} + +func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err error) { + if reflect.TypeOf(actual) == nil { + return false, fmt.Errorf("BeElement matcher expects actual to be typed") + } + + length := len(matcher.Elements) + valueAt := func(i int) interface{} { + return matcher.Elements[i] + } + // Special handling of a single element of type Array or Slice + if length == 1 && isArrayOrSlice(valueAt(0)) { + element := valueAt(0) + value := reflect.ValueOf(element) + length = value.Len() + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } + } + + var lastError error + for i := 0; i < length; i++ { + matcher := &EqualMatcher{Expected: valueAt(i)} + success, err := matcher.Match(actual) + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be an element of", matcher.Elements) +} + +func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be an element of", matcher.Elements) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go index 8b00311b0..527c1a1c1 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go index 97ab20a4e..263627f40 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go index 91d3b779e..e326c0157 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go index fdcda4d1f..631ce11e3 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go index 7ee84fe1b..551d99d74 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import "github.com/onsi/gomega/format" diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go index 9f4f77eec..f72591a1a 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 4 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go index 302dd1a0a..cf582a3fc 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 3 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go index cb7c038ef..dec4db024 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 3 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go index ec57c5db4..60bc1e3fa 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index 7b0e08868..cbbf61802 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -1,3 +1,5 @@ +// untested sections: 3 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go index 4159335d0..8d6c44c7a 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( @@ -22,19 +24,21 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e } value := reflect.ValueOf(actual) - var keys []reflect.Value + var valueAt func(int) interface{} if isMap(actual) { - keys = value.MapKeys() + keys := value.MapKeys() + valueAt = func(i int) interface{} { + return value.MapIndex(keys[i]).Interface() + } + } else { + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } } + var lastError error for i := 0; i < value.Len(); i++ { - var success bool - var err error - if isMap(actual) { - success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface()) - } else { - success, err = elemMatcher.Match(value.Index(i).Interface()) - } + success, err := elemMatcher.Match(valueAt(i)) if err != nil { lastError = err continue diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go index f8dc41e74..e725f8c27 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go index 7ace93dc3..9856752f1 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go index ea5b92336..00cffec70 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 6 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go index 06355b1e9..4c5916804 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -1,3 +1,5 @@ +// untested sections:10 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go index bef00ae21..5bcfdd2ad 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 2 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 2018a6128..1936a2ba5 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -1,3 +1,5 @@ +// untested sections: 3 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go index 639295684..1369c1e87 100644 --- a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go +++ b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go @@ -1,3 +1,5 @@ +// untested sections: 5 + package matchers import ( diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go index 8aaf8759d..108f28586 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -1,6 +1,5 @@ package bipartitegraph -import "errors" import "fmt" import . "github.com/onsi/gomega/matchers/support/goraph/node" @@ -28,7 +27,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in for j, rightValue := range rightValues { neighbours, err := neighbours(leftValue, rightValue) if err != nil { - return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())) + return nil, fmt.Errorf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error()) } if neighbours { diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go index 75afcd844..dced2419e 100644 --- a/vendor/github.com/onsi/gomega/matchers/type_support.go +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -6,6 +6,9 @@ See the docs for Gomega for documentation on the matchers http://onsi.github.io/gomega/ */ + +// untested sections: 11 + package matchers import ( diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go new file mode 100644 index 000000000..82a473bb1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -0,0 +1,127 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rand provides utilities related to randomization. +package rand + +import ( + "math/rand" + "sync" + "time" +) + +var rng = struct { + sync.Mutex + rand *rand.Rand +}{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// Int returns a non-negative pseudo-random int. +func Int() int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Int() +} + +// Intn generates an integer in range [0,max). +// By design this should panic if input is invalid, <= 0. +func Intn(max int) int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Intn(max) +} + +// IntnRange generates an integer in range [min,max). +// By design this should panic if input is invalid, <= 0. +func IntnRange(min, max int) int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Intn(max-min) + min +} + +// IntnRange generates an int64 integer in range [min,max). +// By design this should panic if input is invalid, <= 0. +func Int63nRange(min, max int64) int64 { + rng.Lock() + defer rng.Unlock() + return rng.rand.Int63n(max-min) + min +} + +// Seed seeds the rng with the provided seed. +func Seed(seed int64) { + rng.Lock() + defer rng.Unlock() + + rng.rand = rand.New(rand.NewSource(seed)) +} + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { + rng.Lock() + defer rng.Unlock() + return rng.rand.Perm(n) +} + +const ( + // We omit vowels from the set of available characters to reduce the chances + // of "bad words" being formed. + alphanums = "bcdfghjklmnpqrstvwxz2456789" + // No. of bits required to index into alphanums string. + alphanumsIdxBits = 5 + // Mask used to extract last alphanumsIdxBits of an int. + alphanumsIdxMask = 1<>= alphanumsIdxBits + remaining-- + } + return string(b) +} + +// SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and +// ensures that strings generated from hash functions appear consistent throughout the API. +func SafeEncodeString(s string) string { + r := make([]byte, len(s)) + for i, b := range []rune(s) { + r[i] = alphanums[(int(b) % len(alphanums))] + } + return string(r) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index d0aad3387..bc8798938 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -19,7 +19,17 @@ github.com/davecgh/go-spew/spew # github.com/evanphx/json-patch v4.5.0+incompatible github.com/evanphx/json-patch # github.com/gardener/gardener v1.1.2 +github.com/gardener/gardener/pkg/apis/core +github.com/gardener/gardener/pkg/apis/core/v1beta1 +github.com/gardener/gardener/pkg/apis/core/v1beta1/constants +github.com/gardener/gardener/pkg/apis/core/v1beta1/helper +github.com/gardener/gardener/pkg/apis/extensions +github.com/gardener/gardener/pkg/apis/extensions/v1alpha1 +github.com/gardener/gardener/pkg/logger +github.com/gardener/gardener/pkg/utils +github.com/gardener/gardener/pkg/utils/errors github.com/gardener/gardener/pkg/utils/imagevector +github.com/gardener/gardener/pkg/utils/kubernetes/health github.com/gardener/gardener/pkg/utils/version # github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml @@ -61,6 +71,10 @@ github.com/google/uuid github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions +# github.com/hashicorp/errwrap v1.0.0 +github.com/hashicorp/errwrap +# github.com/hashicorp/go-multierror v1.0.0 +github.com/hashicorp/go-multierror # github.com/hashicorp/golang-lru v0.5.3 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -88,7 +102,7 @@ github.com/mitchellh/reflectwalk github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 -# github.com/onsi/ginkgo v1.8.0 +# github.com/onsi/ginkgo v1.10.1 github.com/onsi/ginkgo github.com/onsi/ginkgo/config github.com/onsi/ginkgo/internal/codelocation @@ -107,7 +121,7 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.5.0 +# github.com/onsi/gomega v1.7.0 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/gbytes @@ -350,6 +364,7 @@ k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/strategicpatch